[
  {
    "path": ".clang-format",
    "content": "---\nLanguage:        Cpp\n# BasedOnStyle:  Google\nAccessModifierOffset: -1\nAlignAfterOpenBracket: Align\nAlignConsecutiveAssignments: false\nAlignConsecutiveDeclarations: false\nAlignEscapedNewlines: Left\nAlignOperands:   true\nAlignTrailingComments: true\nAllowAllParametersOfDeclarationOnNextLine: true\nAllowShortBlocksOnASingleLine: false\nAllowShortCaseLabelsOnASingleLine: false\nAllowShortFunctionsOnASingleLine: All\nAllowShortIfStatementsOnASingleLine: true\nAllowShortLoopsOnASingleLine: true\nAlwaysBreakAfterDefinitionReturnType: None\nAlwaysBreakAfterReturnType: None\nAlwaysBreakBeforeMultilineStrings: true\nAlwaysBreakTemplateDeclarations: Yes\nBinPackArguments: true\nBinPackParameters: true\nBraceWrapping:\n  AfterClass:      false\n  AfterControlStatement: false\n  AfterEnum:       false\n  AfterFunction:   false\n  AfterNamespace:  false\n  AfterObjCDeclaration: false\n  AfterStruct:     false\n  AfterUnion:      false\n  AfterExternBlock: false\n  BeforeCatch:     false\n  BeforeElse:      false\n  IndentBraces:    false\n  SplitEmptyFunction: true\n  SplitEmptyRecord: true\n  SplitEmptyNamespace: true\nBreakBeforeBinaryOperators: None\nBreakBeforeBraces: Attach\nBreakBeforeInheritanceComma: false\nBreakInheritanceList: BeforeColon\nBreakBeforeTernaryOperators: true\nBreakConstructorInitializersBeforeComma: false\nBreakConstructorInitializers: BeforeColon\nBreakAfterJavaFieldAnnotations: false\nBreakStringLiterals: true\nColumnLimit:     120\nCommentPragmas:  '^ IWYU pragma:'\nCompactNamespaces: false\nConstructorInitializerAllOnOneLineOrOnePerLine: true\nConstructorInitializerIndentWidth: 4\nContinuationIndentWidth: 2\nCpp11BracedListStyle: true\nDerivePointerAlignment: false\nDisableFormat:   false\nExperimentalAutoDetectBinPacking: false\nFixNamespaceComments: true\nForEachMacros:\n#  - foreach\n  - Q_FOREACH\n  - BOOST_FOREACH\nIncludeBlocks:   Preserve\nIncludeCategories:\n  - Regex:           '^<ext/.*\\.h>'\n    Priority:        2\n  - Regex:           '^<.*\\.h>'\n    Priority:        1\n  - Regex:           '^<.*'\n    Priority:        2\n  - Regex:           '.*'\n    Priority:        3\nIncludeIsMainRegex: '([-_](test|unittest))?$'\nIndentCaseLabels: true\nIndentPPDirectives: None\nIndentWidth:     2\nIndentWrappedFunctionNames: false\nJavaScriptQuotes: Leave\nJavaScriptWrapImports: true\nKeepEmptyLinesAtTheStartOfBlocks: false\nMacroBlockBegin: ''\nMacroBlockEnd:   ''\nMaxEmptyLinesToKeep: 1\nNamespaceIndentation: None\nObjCBinPackProtocolList: Never\nObjCBlockIndentWidth: 2\nObjCSpaceAfterProperty: false\nObjCSpaceBeforeProtocolList: true\nPenaltyBreakAssignment: 2\nPenaltyBreakBeforeFirstCallParameter: 1\nPenaltyBreakComment: 300\nPenaltyBreakFirstLessLess: 120\nPenaltyBreakString: 1000\nPenaltyBreakTemplateDeclaration: 10\nPenaltyExcessCharacter: 1000000\nPenaltyReturnTypeOnItsOwnLine: 200\nPointerAlignment: Right\nRawStringFormats:\n  - Language:        Cpp\n    Delimiters:\n      - cc\n      - CC\n      - cpp\n      - Cpp\n      - CPP\n      - 'c++'\n      - 'C++'\n    CanonicalDelimiter: ''\n    BasedOnStyle:    google\n  - Language:        TextProto\n    Delimiters:\n      - pb\n      - PB\n      - proto\n      - PROTO\n    EnclosingFunctions:\n      - EqualsProto\n      - EquivToProto\n      - PARSE_PARTIAL_TEXT_PROTO\n      - PARSE_TEST_PROTO\n      - PARSE_TEXT_PROTO\n      - ParseTextOrDie\n      - ParseTextProtoOrDie\n    CanonicalDelimiter: ''\n    BasedOnStyle:    google\nReflowComments:  true\nSortUsingDeclarations: true\nSpaceAfterCStyleCast: false\nSpaceAfterTemplateKeyword: true\nSpaceBeforeAssignmentOperators: true\nSpaceBeforeCpp11BracedList: false\nSpaceBeforeCtorInitializerColon: true\nSpaceBeforeInheritanceColon: true\nSpaceBeforeParens: ControlStatements\nSpaceBeforeRangeBasedForLoopColon: true\nSpaceInEmptyParentheses: false\nSpacesBeforeTrailingComments: 2\nSpacesInAngles:  false\nSpacesInContainerLiterals: true\nSpacesInCStyleCastParentheses: false\nSpacesInParentheses: false\nSpacesInSquareBrackets: false\nStandard:        Auto\nStatementMacros:\n  - Q_UNUSED\n  - QT_REQUIRE_VERSION\nTabWidth:        2\nUseTab:          Never\nSortIncludes:    false\n...\n\n"
  },
  {
    "path": ".gitee/PULL_REQUEST_TEMPLATE.md",
    "content": "<!--  Thanks for sending a pull request!  Here are some tips for you:\r\n\r\n1) If this is your first time, please read our contributor guidelines: https://gitee.com/mindspore/mindspore/blob/master/CONTRIBUTING.md\r\n\r\n2) If you want to contribute your code but don't know who will review and merge, please add label `mindspore-assistant` to the pull request, we will find and do it as soon as possible.\r\n-->\r\n\r\n**What type of PR is this?**\r\n> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line:\r\n>\r\n> /kind bug\r\n> /kind task\r\n> /kind feature\r\n\r\n\r\n**What does this PR do / why do we need it**:\r\n\r\n\r\n**Which issue(s) this PR fixes**:\r\n<!--\r\n*Automatically closes linked issue when PR is merged.\r\nUsage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.\r\n-->\r\nFixes #\r\n\r\n**Special notes for your reviewers**:\r\n\r\n\r\n"
  },
  {
    "path": ".gitignore",
    "content": "# MindSpore Serving\nbuild/\nmindspore_serving/lib\noutput\n*.ir\n.coverage*\nhtmlcov/\ncov_output/\n\n# Cmake files\nCMakeFiles/\ncmake_install.cmake\nCMakeCache.txt\nMakefile\ncmake-build-debug\n\n# Prerequisites\n*.d\n\n# Compiled Object files\n*.slo\n*.lo\n*.o\n*.obj\n\n# Precompiled Headers\n*.gch\n*.pch\n\n# Compiled Dynamic libraries\n*.so\n*.dylib\n*.dll\n*.so.*\n\n# Fortran module files\n*.mod\n*.smod\n\n# Compiled Static libraries\n*.lai\n*.la\n*.a\n*.lib\n\n# Executables\n*.exe\n*.out\n*.app\n\n# Protocol buffers\n*_pb2.py\n*.pb.h\n*.pb.cc\n*.pb\n*_grpc.py\n\n# Editor\n.vscode\n.idea/\n\n# Cquery\n.cquery_cached_index/\ncompile_commands.json\n\n# Ctags and cscope\ntags\nTAGS\nCTAGS\nGTAGS\nGRTAGS\nGSYMS\nGPATH\ncscope.*\n\n# Python files\n*__pycache__*\n.pytest_cache\n\n# Mac files\n*.DS_Store\n\n# Test results\ntest_temp_summary_event_file/\n*.dot\n*.dat\n*.svg\n*.perf\n*.info\n*.ckpt\n*.shp\n*.pkl\n.clangd\nmindspore_serving/version.py\nmindspore_serving/default_config.py\nmindspore_serving/.commit_id\ntests/ut/python/tests/ca.crt\ntests/ut/python/tests/ca.key\ntests/ut/python/tests/ca.srl\ntests/ut/python/tests/server.crt\ntests/ut/python/tests/server.csr\ntests/ut/python/tests/server.key\ntests/ut/python/tests/client.crt\ntests/ut/python/tests/client.csr\ntests/ut/python/tests/client.key\ntests/ut/python/tests/serving_logs/\ntests/ut/python/tests/unix_socket_files/\ntests/ut/python/tests/serving_python_ut_servables/\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"third_party/mindspore\"]\n\tpath = third_party/mindspore\n\turl = https://gitee.com/mindspore/mindspore.git\n"
  },
  {
    "path": ".jenkins/test/config/dependent_packages.yaml",
    "content": "mindspore:\n  'mindspore/mindspore/version/202310/20231010/master_20231010144855_e5008bcfa07e3e6f3fa50f3ba0ac90175504dfd7/'\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.14.1)\nproject(MindSpore_Serving)\n\nif(CMAKE_CXX_COMPILER_ID STREQUAL \"GNU\" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.3.0)\n    message(FATAL_ERROR \"GCC version ${CMAKE_CXX_COMPILER_VERSION} must not be less than 7.3.0\")\nendif()\n\ninclude(${CMAKE_SOURCE_DIR}/cmake/options.cmake)  # set compile options\ninclude(${CMAKE_SOURCE_DIR}/cmake/check_requirements.cmake) # check require party, like OpenSSL\nset(CMAKE_CXX_FLAGS_RELEASE \"$ENV{CXXFLAGS} -O2 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0 \\\n    -D_FORTIFY_SOURCE=2\")\nif(NOT CMAKE_SYSTEM_NAME MATCHES \"Windows\")\n    add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)\nendif()\n\nif(ENABLE_PYTHON)\n    add_compile_definitions(ENABLE_PYTHON)\nendif()\n\nset(CMAKE_CXX_FLAGS_DEBUG \"$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer \\\n    -Wl,--allow-shlib-undefined -D_LIBCPP_INLINE_VISIBILITY='' -D_LIBCPP_DISABLE_EXTERN_TEMPLATE=1 \\\n    -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2 -Wno-cpp\")\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -I/usr/local/include -std=c++17 -Werror -Wall -fPIC\")\nset(CMAKE_EXPORT_COMPILE_COMMANDS ON)\n\nset(PYBIND11_CPP_STANDARD -std=c++17)\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${OPTION_CXX_FLAGS}\")\n\n# compile third party: grpc, libevent, gtest, onnx\ninclude(${CMAKE_SOURCE_DIR}/cmake/mind_expression.cmake)\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/securec/include)\n\n# find python3 packages\ninclude(${CMAKE_SOURCE_DIR}/cmake/dependency_utils.cmake)\nfind_package(Python3 3.7 COMPONENTS Interpreter Development)\nif(Python3_FOUND)\n    set(PYTHON_INCLUDE_DIRS \"${Python3_INCLUDE_DIRS}\")\n    set(PYTHON_LIBRARIES \"${Python3_LIBRARIES}\")\n    if(WIN32)\n        if(Python3_DIR)\n            message(\"Python3_DIR set already: \" ${Python3_DIR})\n        else()\n            string(LENGTH ${PYTHON_LIBRARIES} PYTHON_LIBRARIES_LEN)\n            string(LENGTH \"libpythonxx.a\" Python3_NAME_LEN)\n            math(EXPR Python3_DIR_LEN ${PYTHON_LIBRARIES_LEN}-${Python3_NAME_LEN})\n            string(SUBSTRING ${Python3_LIBRARIES} 0 ${Python3_DIR_LEN} Python3_DIR)\n            message(\"Python3_DIR: \" ${Python3_DIR})\n        endif()\n        link_directories(${Python3_DIR})\n    endif()\nelse()\n    find_python_package(py_inc py_lib)\n    set(PYTHON_INCLUDE_DIRS \"${py_inc}\")\n    set(PYTHON_LIBRARIES \"${py_lib}\")\nendif()\nmessage(\"PYTHON_INCLUDE_DIRS = ${PYTHON_INCLUDE_DIRS}\")\nmessage(\"PYTHON_LIBRARIES = ${PYTHON_LIBRARIES}\")\ninclude_directories(${PYTHON_INCLUDE_DIRS})\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fvisibility=hidden\")\nfind_package(Threads REQUIRED)\n\n\nif(ENABLE_TESTCASES)\n    add_subdirectory(tests)\nendif()\n\nadd_subdirectory(mindspore_serving)\ninclude(cmake/package.cmake)\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\r\n                           Version 2.0, January 2004\r\n                        http://www.apache.org/licenses/\r\n\r\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\r\n\r\n   1. Definitions.\r\n\r\n      \"License\" shall mean the terms and conditions for use, reproduction,\r\n      and distribution as defined by Sections 1 through 9 of this document.\r\n\r\n      \"Licensor\" shall mean the copyright owner or entity authorized by\r\n      the copyright owner that is granting the License.\r\n\r\n      \"Legal Entity\" shall mean the union of the acting entity and all\r\n      other entities that control, are controlled by, or are under common\r\n      control with that entity. For the purposes of this definition,\r\n      \"control\" means (i) the power, direct or indirect, to cause the\r\n      direction or management of such entity, whether by contract or\r\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\r\n      outstanding shares, or (iii) beneficial ownership of such entity.\r\n\r\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\r\n      exercising permissions granted by this License.\r\n\r\n      \"Source\" form shall mean the preferred form for making modifications,\r\n      including but not limited to software source code, documentation\r\n      source, and configuration files.\r\n\r\n      \"Object\" form shall mean any form resulting from mechanical\r\n      transformation or translation of a Source form, including but\r\n      not limited to compiled object code, generated documentation,\r\n      and conversions to other media types.\r\n\r\n      \"Work\" shall mean the work of authorship, whether in Source or\r\n      Object form, made available under the License, as indicated by a\r\n      copyright notice that is included in or attached to the work\r\n      (an example is provided in the Appendix below).\r\n\r\n      \"Derivative Works\" shall mean any work, whether in Source or Object\r\n      form, that is based on (or derived from) the Work and for which the\r\n      editorial revisions, annotations, elaborations, or other modifications\r\n      represent, as a whole, an original work of authorship. For the purposes\r\n      of this License, Derivative Works shall not include works that remain\r\n      separable from, or merely link (or bind by name) to the interfaces of,\r\n      the Work and Derivative Works thereof.\r\n\r\n      \"Contribution\" shall mean any work of authorship, including\r\n      the original version of the Work and any modifications or additions\r\n      to that Work or Derivative Works thereof, that is intentionally\r\n      submitted to Licensor for inclusion in the Work by the copyright owner\r\n      or by an individual or Legal Entity authorized to submit on behalf of\r\n      the copyright owner. For the purposes of this definition, \"submitted\"\r\n      means any form of electronic, verbal, or written communication sent\r\n      to the Licensor or its representatives, including but not limited to\r\n      communication on electronic mailing lists, source code control systems,\r\n      and issue tracking systems that are managed by, or on behalf of, the\r\n      Licensor for the purpose of discussing and improving the Work, but\r\n      excluding communication that is conspicuously marked or otherwise\r\n      designated in writing by the copyright owner as \"Not a Contribution.\"\r\n\r\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\r\n      on behalf of whom a Contribution has been received by Licensor and\r\n      subsequently incorporated within the Work.\r\n\r\n   2. Grant of Copyright License. Subject to the terms and conditions of\r\n      this License, each Contributor hereby grants to You a perpetual,\r\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\r\n      copyright license to reproduce, prepare Derivative Works of,\r\n      publicly display, publicly perform, sublicense, and distribute the\r\n      Work and such Derivative Works in Source or Object form.\r\n\r\n   3. Grant of Patent License. Subject to the terms and conditions of\r\n      this License, each Contributor hereby grants to You a perpetual,\r\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\r\n      (except as stated in this section) patent license to make, have made,\r\n      use, offer to sell, sell, import, and otherwise transfer the Work,\r\n      where such license applies only to those patent claims licensable\r\n      by such Contributor that are necessarily infringed by their\r\n      Contribution(s) alone or by combination of their Contribution(s)\r\n      with the Work to which such Contribution(s) was submitted. If You\r\n      institute patent litigation against any entity (including a\r\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\r\n      or a Contribution incorporated within the Work constitutes direct\r\n      or contributory patent infringement, then any patent licenses\r\n      granted to You under this License for that Work shall terminate\r\n      as of the date such litigation is filed.\r\n\r\n   4. Redistribution. You may reproduce and distribute copies of the\r\n      Work or Derivative Works thereof in any medium, with or without\r\n      modifications, and in Source or Object form, provided that You\r\n      meet the following conditions:\r\n\r\n      (a) You must give any other recipients of the Work or\r\n          Derivative Works a copy of this License; and\r\n\r\n      (b) You must cause any modified files to carry prominent notices\r\n          stating that You changed the files; and\r\n\r\n      (c) You must retain, in the Source form of any Derivative Works\r\n          that You distribute, all copyright, patent, trademark, and\r\n          attribution notices from the Source form of the Work,\r\n          excluding those notices that do not pertain to any part of\r\n          the Derivative Works; and\r\n\r\n      (d) If the Work includes a \"NOTICE\" text file as part of its\r\n          distribution, then any Derivative Works that You distribute must\r\n          include a readable copy of the attribution notices contained\r\n          within such NOTICE file, excluding those notices that do not\r\n          pertain to any part of the Derivative Works, in at least one\r\n          of the following places: within a NOTICE text file distributed\r\n          as part of the Derivative Works; within the Source form or\r\n          documentation, if provided along with the Derivative Works; or,\r\n          within a display generated by the Derivative Works, if and\r\n          wherever such third-party notices normally appear. The contents\r\n          of the NOTICE file are for informational purposes only and\r\n          do not modify the License. You may add Your own attribution\r\n          notices within Derivative Works that You distribute, alongside\r\n          or as an addendum to the NOTICE text from the Work, provided\r\n          that such additional attribution notices cannot be construed\r\n          as modifying the License.\r\n\r\n      You may add Your own copyright statement to Your modifications and\r\n      may provide additional or different license terms and conditions\r\n      for use, reproduction, or distribution of Your modifications, or\r\n      for any such Derivative Works as a whole, provided Your use,\r\n      reproduction, and distribution of the Work otherwise complies with\r\n      the conditions stated in this License.\r\n\r\n   5. Submission of Contributions. Unless You explicitly state otherwise,\r\n      any Contribution intentionally submitted for inclusion in the Work\r\n      by You to the Licensor shall be under the terms and conditions of\r\n      this License, without any additional terms or conditions.\r\n      Notwithstanding the above, nothing herein shall supersede or modify\r\n      the terms of any separate license agreement you may have executed\r\n      with Licensor regarding such Contributions.\r\n\r\n   6. Trademarks. This License does not grant permission to use the trade\r\n      names, trademarks, service marks, or product names of the Licensor,\r\n      except as required for reasonable and customary use in describing the\r\n      origin of the Work and reproducing the content of the NOTICE file.\r\n\r\n   7. Disclaimer of Warranty. Unless required by applicable law or\r\n      agreed to in writing, Licensor provides the Work (and each\r\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\r\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r\n      implied, including, without limitation, any warranties or conditions\r\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\r\n      PARTICULAR PURPOSE. You are solely responsible for determining the\r\n      appropriateness of using or redistributing the Work and assume any\r\n      risks associated with Your exercise of permissions under this License.\r\n\r\n   8. Limitation of Liability. In no event and under no legal theory,\r\n      whether in tort (including negligence), contract, or otherwise,\r\n      unless required by applicable law (such as deliberate and grossly\r\n      negligent acts) or agreed to in writing, shall any Contributor be\r\n      liable to You for damages, including any direct, indirect, special,\r\n      incidental, or consequential damages of any character arising as a\r\n      result of this License or out of the use or inability to use the\r\n      Work (including but not limited to damages for loss of goodwill,\r\n      work stoppage, computer failure or malfunction, or any and all\r\n      other commercial damages or losses), even if such Contributor\r\n      has been advised of the possibility of such damages.\r\n\r\n   9. Accepting Warranty or Additional Liability. While redistributing\r\n      the Work or Derivative Works thereof, You may choose to offer,\r\n      and charge a fee for, acceptance of support, warranty, indemnity,\r\n      or other liability obligations and/or rights consistent with this\r\n      License. However, in accepting such obligations, You may act only\r\n      on Your own behalf and on Your sole responsibility, not on behalf\r\n      of any other Contributor, and only if You agree to indemnify,\r\n      defend, and hold each Contributor harmless for any liability\r\n      incurred by, or claims asserted against, such Contributor by reason\r\n      of your accepting any such warranty or additional liability.\r\n\r\n   END OF TERMS AND CONDITIONS\r\n\r\n   APPENDIX: How to apply the Apache License to your work.\r\n\r\n      To apply the Apache License to your work, attach the following\r\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\r\n      replaced with your own identifying information. (Don't include\r\n      the brackets!)  The text should be enclosed in the appropriate\r\n      comment syntax for the file format. We also recommend that a\r\n      file or class name and description of purpose be included on the\r\n      same \"printed page\" as the copyright notice for easier\r\n      identification within third-party archives.\r\n\r\n   Copyright [yyyy] [name of copyright owner]\r\n\r\n   Licensed under the Apache License, Version 2.0 (the \"License\");\r\n   you may not use this file except in compliance with the License.\r\n   You may obtain a copy of the License at\r\n\r\n       http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n   Unless required by applicable law or agreed to in writing, software\r\n   distributed under the License is distributed on an \"AS IS\" BASIS,\r\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n   See the License for the specific language governing permissions and\r\n   limitations under the License.\r\n"
  },
  {
    "path": "NOTICE",
    "content": "MindSpore Serving\r\nCopyright 2020 Huawei Technologies Co., Ltd\r\n"
  },
  {
    "path": "OWNERS",
    "content": "approvers:\n- zhaizhiqiang\n- zhangxuetong\n- hangangqiang\n"
  },
  {
    "path": "README.md",
    "content": "# MindSpore Serving\n\n[查看中文](./README_CN.md)\n\n<!-- TOC -->\n\n- [MindSpore Serving](#mindspore-serving)\n    - [Overview](#overview)\n    - [Installation](#installation)\n        - [Installing Serving](#installing-serving)\n        - [Configuring Environment Variables](#configuring-environment-variables)\n    - [Quick Start](#quick-start)\n    - [Documents](#documents)\n        - [Developer Guide](#developer-guide)\n    - [Community](#community)\n        - [Governance](#governance)\n        - [Communication](#communication)\n    - [Contributions](#contributions)\n    - [Release Notes](#release-notes)\n    - [License](#license)\n\n<!-- /TOC -->\n\n## Overview\n\nMindSpore Serving is a lightweight and high-performance service module that helps MindSpore developers efficiently\ndeploy online inference services in the production environment. After completing model training on MindSpore, you can\nexport the MindSpore model and use MindSpore Serving to create an inference service for the model.\n\nMindSpore Serving architecture:\n\n<img src=\"docs/architecture.png\" alt=\"MindSpore Architecture\" width=\"600\"/>\n\nMindSpore Serving includes two parts: `Client` and `Server`. On a `Client` node, you can deliver inference service\ncommands through the gRPC or RESTful API. The `Server` consists of a `Main` node and one or more `Worker` nodes.\nThe `Main` node manages all `Worker` nodes and their model information, accepts user requests from `Client`s, and\ndistributes the requests to `Worker` nodes. `Servable` is deployed on a worker node, indicates a single model or a\ncombination of multiple models and can provide different services in various methods. `\n\nOn the server side, when [MindSpore](#https://www.mindspore.cn/) is used as the inference backend,, MindSpore Serving\nsupports the Ascend 910 and Nvidia GPU environments. When [MindSpore Lite](#https://www.mindspore.cn/lite) is\nused as the inference backend, MindSpore Serving supports Ascend 310/310P, Nvidia GPU and CPU environments. Client` does\nnot depend on specific hardware platforms.\n\nMindSpore Serving provides the following functions:\n\n- gRPC and RESTful APIs on clients\n- Pre-processing and post-processing of assembled models\n- Batch. Multiple instance requests are split and combined to meet the `batch size` requirement of the model.\n- Simple Python APIs on clients\n- The multi-model combination is supported. The multi-model combination and single-model scenarios use the same set of\n  interfaces.\n- Distributed model inference\n\n## Installation\n\nFor details about how to install and configure MindSpore Serving, see the [MindSpore Serving installation page](https://www.mindspore.cn/serving/docs/en/master/serving_install.html).\n\n## Quick Start\n\n[MindSpore-based Inference Service Deployment](https://www.mindspore.cn/serving/docs/en/master/serving_example.html) is\nused to demonstrate how to use MindSpore Serving.\n\n## Documents\n\n### Developer Guide\n\n- [gRPC-based MindSpore Serving Access](https://www.mindspore.cn/serving/docs/en/master/serving_grpc.html)\n- [RESTful-based MindSpore Serving Access](https://www.mindspore.cn/serving/docs/en/master/serving_restful.html)\n- [Services Provided Through Model Configuration](https://www.mindspore.cn/serving/docs/en/master/serving_model.html)\n- [Services Composed of Multiple Models](https://www.mindspore.cn/serving/docs/en/master/serving_model.html#services-composed-of-multiple-models)\n- [MindSpore Serving-based Distributed Inference Service Deployment](https://www.mindspore.cn/serving/docs/en/master/serving_distributed_example.html)\n\nFor more details about the installation guide, tutorials, and APIs,\nsee [MindSpore Python API](https://www.mindspore.cn/serving/docs/en/master/server.html).\n\n## Community\n\n### Governance\n\n[MindSpore Open Governance](https://gitee.com/mindspore/community/blob/master/governance.md)\n\n### Communication\n\n- [MindSpore Slack](https://join.slack.com/t/mindspore/shared_invite/zt-dgk65rli-3ex4xvS4wHX7UDmsQmfu8w) developer\n  communication platform\n\n## Contributions\n\nWelcome to MindSpore contribution.\n\n## Release Notes\n\n[RELEASE](RELEASE.md)\n\n## License\n\n[Apache License 2.0](LICENSE)\n"
  },
  {
    "path": "README_CN.md",
    "content": "# MindSpore Serving\n\n[View English](./README.md)\n\n<!-- TOC -->\n\n- [MindSpore Serving](#mindspore-serving)\n    - [概述](#概述)\n    - [安装](#安装)\n        - [安装Serving](#安装serving)\n        - [配置环境变量](#配置环境变量)\n    - [快速入门](#快速入门)\n    - [文档](#文档)\n        - [开发者教程](#开发者教程)\n    - [社区](#社区)\n        - [治理](#治理)\n        - [交流](#交流)\n    - [贡献](#贡献)\n    - [版本说明](#版本说明)\n    - [许可证](#许可证)\n\n<!-- /TOC -->\n\n## 概述\n\nMindSpore Serving是一个轻量级、高性能的服务模块，旨在帮助MindSpore开发者在生产环境中高效部署在线推理服务。当用户使用MindSpore完成模型训练\n后，导出MindSpore模型，即可使用MindSpore Serving创建该模型的推理服务。\n\nMindSpore Serving架构：\n\n<img src=\"docs/architecture.png\" alt=\"MindSpore Architecture\" width=\"600\"/>\n\nMindSpore Serving分为客户端、服务器两个部分。在客户端中，用户通过gRPC或RESTful接口向服务器下发推理服务命令。服务器包括主（`Main`）节点和\n一个或多个工作（`Worker`）节点，主节点管理所有的工作节点及其部署的模型信息，接受客户端的用户请求，并将请求分发给工作节点。每个工作节点部署了\n一个可服务对象，即`Servable`，这里的`Servable`可以是单个模型，也可以是多个模型的组合，一个`Servable`可以围绕相同的模型通过多种方法来提供\n不同的服务。\n\n对于服务端，当以[MindSpore](#https://www.mindspore.cn/)作为推理后端时，MindSpore Serving当前支持Ascend 910和Nvidia\nGPU环境。当以[MindSpore Lite](#https://www.mindspore.cn/lite)作为推理后端时，MindSpore Serving当前支持Ascend 310/310P、Nvidia\nGPU和CPU。客户端不依赖特定硬件平台。\n\nMindSpore Serving提供以下功能：\n\n- 支持客户端gRPC和RESTful接口。\n- 支持组装模型的前处理和后处理。\n- 支持batch功能，多实例请求会被拆分组合以满足模型`batch size`的需要。\n- 提供客户端Python简易接口。\n- 支持多模型组合，多模型组合和单模型场景使用相同的一套接口。\n- 支持分布式模型推理功能。\n\n## 安装\n\nMindSpore Serving安装和配置可以参考[MindSpore Serving安装页面](https://www.mindspore.cn/serving/docs/zh-CN/master/serving_install.html)。\n\n## 快速入门\n\n以一个简单的[Add网络示例](https://www.mindspore.cn/serving/docs/zh-CN/master/serving_example.html)，演示MindSpore Serving如何使用。\n\n## 文档\n\n### 开发者教程\n\n- [基于gRPC接口访问MindSpore Serving服务](https://www.mindspore.cn/serving/docs/zh-CN/master/serving_grpc.html)\n- [基于RESTful接口访问MindSpore Serving服务](https://www.mindspore.cn/serving/docs/zh-CN/master/serving_restful.html)\n- [配置模型提供服务](https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html)\n- [配置多模型组合的服务](https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html#id9)\n- [基于MindSpore Serving部署分布式推理服务](https://www.mindspore.cn/serving/docs/zh-CN/master/serving_distributed_example.html)\n\n有关安装指南、教程和API的更多详细信息，请参阅[用户文档](https://www.mindspore.cn/serving/docs/zh-CN/master/server.html)。\n\n## 社区\n\n### 治理\n\n查看MindSpore如何进行[开放治理](https://gitee.com/mindspore/community/blob/master/governance.md)。\n\n### 交流\n\n- [MindSpore Slack](https://join.slack.com/t/mindspore/shared_invite/zt-dgk65rli-3ex4xvS4wHX7UDmsQmfu8w) 开发者交流平台。\n\n## 贡献\n\n欢迎参与贡献。\n\n## 版本说明\n\n版本说明请参阅[RELEASE](RELEASE.md)。\n\n## 许可证\n\n[Apache License 2.0](LICENSE)\n"
  },
  {
    "path": "RELEASE.md",
    "content": "# MindSpore Serving Release Notes\n\n[查看中文](./RELEASE_CN.md)\n\n## MindSpore Serving 2.0.2 Release Notes\n\n### Major Features and Improvements\n\n- Released based on MindSpore 2.2.0.\n- Fix third-party OpenSSL vulnerabilities: CVE-2023-3446 and CVE-2023-4807.\n\n### Contributors\n\nThanks goes to these wonderful people:\n\nqinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\nContributions of any kind are welcome!\n\n## MindSpore Serving 2.0.0 Release Notes\n\n### Major Features and Improvements\n\n- Released based on MindSpore 2.0.0rc1.\n- Fix third-party OpenSSL vulnerabilities: CVE-2022-4304, CVE-2022-4450, CVE-2022-4450, CVE-2023-0286, CVE-2023-0464, CVE-2023-0465 and CVE-2023-0466.\n\n### Contributors\n\nThanks goes to these wonderful people:\n\nqinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\nContributions of any kind are welcome!\n\n## MindSpore Serving 1.8.0 Release Notes\n\n### Major Features and Improvements\n\n- [STABLE] When deploying a large-scale model with parallel pipeline, Serving supports parallel pipeline processing of multiple inference instances.\n\n### Contributors\n\nThanks goes to these wonderful people:\n\nqinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\nContributions of any kind are welcome!\n\n## MindSpore Serving 1.7.0 Release Notes\n\n### Major Features and Improvements\n\n- [DEMO] Ascend 310P can be used as the inference device, for more detail see [MindSpore Serving backend](https://www.mindspore.cn/serving/docs/en/master/serving_install.html#installation).\n- [DEMO] Support models of MindIR format when MindSpore Lite is used as the MindSpore Serving inference backend, for more detail see [MindSpore Serving backend](https://www.mindspore.cn/serving/docs/en/master/serving_install.html#installation).\n\n#### Deprecations\n\n##### Python API\n\n- `AclOptions` and `GpuOptions` are removed from version 1.7.0, and use `AscendDeviceInfo` and `GPUDeviceInfo` instead.\n- `register.declare_sevable` and `register.call_servable` are removed from version 1.7.0, and use `register.declare_model` and `register.add_stage` instead.\n- `register.call_preprocess`, `register.call_preprocess_pipeline`, `register.call_postprocess` and `register.call_postprocess_pipeline` are removed from version 1.7.0, and use `register.add_stage` instead.\n\n### Contributors\n\nThanks goes to these wonderful people:\n\nqinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\nContributions of any kind are welcome!\n\n## MindSpore Serving 1.6.0 Release Notes\n\n### Major Features and Improvements\n\n- [STABLE] We can use existing interfaces(`decalre_model` and `add_stage`) that define single-model services to define\n  multi-model composite services.\n- [STABLE] When the number of occupied devices is fixed, additional worker processes(using parameter\n  `num_parallel_workers`) are supported to accelerate Python functions such as preprocessing and postprocessing,\n  improving device utilization.\n- [STABLE] The interface `Model.call` is a stable feature, and can be used to define complex model invocation processes\n  in the Serving server, such as looping and conditional branching.\n- [STABLE] The new interfaces `Context`, `CPUDeviceInfo`, `GPUDeviceInfo`, `AscendDeviceInfo` are provided to set\n  user-defined device information. The original interfaces `GpuOptions` and `AclOptions` are deprecated.\n- [BETA] We support MindSpore Lite as the MindSpore Serving inference backend, for more detail see\n  [MindSpore Serving backend](https://www.mindspore.cn/serving/docs/en/master/serving_install.html#installation).\n\n### API Change\n\n#### New features\n\n##### Python API\n\n###### Multi-model composite services\n\nWe can use existing interfaces(`decalre_model` and `add_stage`) that define single-model services to define\nmulti-model composite services. For more detail, see [Services Composed of Multiple Models](https://www.mindspore.cn/serving/docs/en/master/serving_model.html#services-composed-of-multiple-models).\n\n```python\nfrom mindspore_serving.server import register\n\nadd_model = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\nsub_model = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=[\"y\"])\ndef add_sub_only_model(x1, x2, x3):  # x1+x2-x3\n    y = register.add_stage(add_model, x1, x2, outputs_count=1)\n    y = register.add_stage(sub_model, y, x3, outputs_count=1)\n    return y\n```\n\n###### Additional worker processes are supported to accelerate Python functions(preprocessing and postprocessing)\n\nParameter `num_parallel_workers` in class `ServableStartConfig`  is a stable feature. It's can be used to configure the\ntotal number of workers. The number of workers occupying devices is determined by the length of parameter `device_ids`.\nAdditional worker processes use worker processes that occupy devices for model inference.  For more detail, see\n[Multi-process Concurrency](https://www.mindspore.cn/serving/docs/en/master/serving_model.html#multi-process-concurrency).\n\n```python\nclass ServableStartConfig:\n    def __init__(self, servable_directory, servable_name, device_ids, version_number=0, device_type=None,\n                 num_parallel_workers=0, dec_key=None, dec_mode='AES-GCM')\n```\n\nStart the serving server that contains the `resnet50` servable. The `resnet50` servable has four worker\nprocesses(`num_parallel_workers`), one of which occupies the device(`device_ids`).\n\n```python\nimport os\nimport sys\nfrom mindspore_serving import server\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    # Total 4 worker, one worker occupy device 0, the model inference tasks of other workers are forwarded to the worker\n    # that occupies the device.\n    config = server.ServableStartConfig(servable_directory=servable_dir,\n                                        servable_name=\"resnet50\", device_ids=0,\n                                        num_parallel_workers=4)\n    server.start_servables(config)\n\n    server.start_grpc_server(\"127.0.0.1:5500\")\n    server.start_restful_server(\"127.0.0.1:1500\")\n\nif __name__ == \"__main__\":\n    start()\n```\n\n###### Model.call interface can be used to define complex model invocation processes\n\nThe interface `Model.call` is a stable feature, and can be used to define complex model invocation processes\nin the Serving server, such as looping and conditional branching.\n\n```python\nfrom mindspore_serving.server import register\n\nimport numpy as np\nfrom .tokenizer import create_tokenizer, padding, END_TOKEN\n\nbert_model = register.declare_model(model_file=\"bert_poetry.mindir\", model_format=\"MindIR\")\n\ndef calc_new_token(probas):\n  ...\n  return new_token_id\n\ntokenizer = create_tokenizer()\n\ndef generate_sentence(input_sentence):\n  input_token_ids = tokenizer.encode(input_sentence)\n  target_ids = []\n  MAX_LEN = 64\n  while len(input_token_ids) + len(target_ids) < MAX_LEN:\n    input_ids = padding(np.array(input_token_ids + target_ids), length=128)\n    pad_mask = (input_ids != 0).astype(np.float32)\n    probas = bert_model.call(input_ids, pad_mask)  # call bert model to generate token id of new word\n    new_token_id = calc_new_token(probas[len(input_token_ids)])\n    target_ids.append(new_token_id)\n    if new_token_id == END_TOKEN:\n      break\n  output_sentence = tokenizer.decode(input_token_ids + target_ids)\n  return output_sentence\n\n\n@register.register_method(output_names=[\"output_sentence\"])\ndef predict(input_sentence):\n  output_sentence = register.add_stage(generate_sentence, input_sentence, outputs_count=1)\n  return output_sentence\n```\n\n#### Deprecations\n\n##### Python API\n\n- The parameter `options` in `register.declare_model` is deprecated from version 1.6.0 and will be removed in a future version, use parameter `context` instead.\n- `AclOptions` and `GpuOptions` are deprecated from version 1.6.0 and will be removed in a future version, use `AscendDeviceInfo` and `GPUDeviceInfo` instead.\n\n### Contributors\n\nThanks goes to these wonderful people:\n\nqinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\nContributions of any kind are welcome!\n\n## MindSpore Serving 1.5.0 Release Notes\n\n### Major Features and Improvements\n\n- [STABLE] To support multi-model orchestration (to be released in version 1.6), a set of APIs (`decalre_model`\n  and `add_stage`) is added. The new APIs will be used in single-model and multi-model scenarios. The old\n  APIs(`register.declare_servable`,`call_servable`,`call_preprocess`,`call_postprocess`) used in single-model scenarios\n  are deprecated.\n- [BETA] When the number of occupied devices is fixed, additional worker processes are supported to accelerate Python\n  functions such as preprocessing and postprocessing, improving device utilization.\n- [BETA]`Model.call` interface is added to support invoking models in Python functions.\n\n### API Change\n\n#### API Incompatible Change\n\n##### Python API\n\n###### New set of APIs for single-model and multi-model scenarios\n\nTo support multiple models(will be officially released in version 1.6), a set of APIs (`decalre_model` and `add_stage`)\nis added. The single-model and multi-model scenarios will use the same set of APIs.\n\nNew APIs are recommended in single-model scenarios. Old APIs (`declare_servable`,`call_servable`,`call_preprocess`,\n`call_postprocess`) are deprecated.\n\n<table>\n<tr>\n<td style=\"text-align:center\"> 1.4 </td> <td style=\"text-align:center\"> 1.5 </td>\n</tr>\n<tr>\n<td>\n\n```python\nfrom mindspore_serving.server import register\n\nregister.declare_servable(servable_file=\"resnet.mindir\",\n                          model_format=\"MindIR\")\n\ndef resnet_preprocess(image):\n    ....\n\ndef resnet_postprocess(scores):\n    ....\n\n@register.register_method(output_names=[\"label\"])\ndef predict(image):\n    x = register.call_preprocess(resnet_preprocess, image)\n    x = register.call_servable(x)\n    x = register.call_postprocess(resnet_postprocess, x)\n    return x\n```\n\n</td>\n<td>\n\n```python\nfrom mindspore_serving.server import register\n\nresnet_model = register.declare_model(model_file=\"resnet.mindir\",\n                                      model_format=\"MindIR\")\n\ndef resnet_preprocess(image):\n    ....\n\ndef resnet_postprocess(scores):\n    ....\n\n@register.register_method(output_names=[\"label\"])\ndef predict(image):\n    x = register.add_stage(resnet_preprocess, image, outputs_count=1)\n    x = register.add_stage(resnet_model, x, outputs_count=1)\n    x = register.add_stage(resnet_postprocess, x, outputs_count=1)\n    return x\n```\n\n</td>\n</tr>\n</table>\n\n#### New features\n\n##### Python API\n\n###### Additional worker processes are supported to accelerate Python functions(preprocessing and postprocessing)\n\nParameter `num_parallel_workers` is added to class `ServableStartConfig` to configure the total number of workers. The\nnumber of workers occupying devices is determined by the length of parameter `device_ids`. Additional worker processes\nuse worker processes that occupy devices for model inference.\n\n```python\nclass ServableStartConfig:\n    def __init__(self, servable_directory, servable_name, device_ids, version_number=0, device_type=None,\n                 num_parallel_workers=0, dec_key=None, dec_mode='AES-GCM')\n```\n\nStart the serving server that contains the `resnet50` servable. The `resnet50` servable has four worker\nprocesses(`num_parallel_workers`), one of which occupies the device(`device_ids`).\n\n```python\nimport os\nimport sys\nfrom mindspore_serving import server\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    # Total 4 worker, one worker occupy device 0, the model inference tasks of other workers are forwarded to the worker\n    # that occupies the device.\n    config = server.ServableStartConfig(servable_directory=servable_dir,\n                                        servable_name=\"resnet50\", device_ids=0,\n                                        num_parallel_workers=4)\n    server.start_servables(config)\n\n    server.start_grpc_server(\"127.0.0.1:5500\")\n    server.start_restful_server(\"127.0.0.1:1500\")\n\nif __name__ == \"__main__\":\n    start()\n```\n\n###### Model.call interface is added to support invoking models in Python functions\n\n```python\nfrom mindspore_serving.server import register\n\nadd_model = register.declare_model(model_file=\"tensor_add.mindir\",\n                                   model_format=\"MindIR\")\n\ndef add_func(x1, x2, x3, x4):\n    instances = []\n    instances.append((x1, x2))\n    instances.append((x3, x4))\n    output_instances = add_model.call(instances)  # for multi instances\n    y1 = output_instances[0][0]  # instance 0 output 0\n    y2 = output_instances[1][0]  # instance 1 output 0\n    y = add_model.call(y1, y2)  # for single instance\n    return y\n\n@register.register_method(output_names=[\"y\"])\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(add_func, x1, x2, x3, x4, outputs_count=1)\n    return y\n```\n\n#### Deprecations\n\n##### Python API\n\n- `register.declare_servable`,`call_servable`,`call_preprocess`,`call_postprocess`,`call_preprocess_pipeline`\n  and`call_postprocess_pipeline` are now deprecated in favor of`register.declare_model` and`add_stage`, as shown above.\n  Deprecated interfaces will be deleted in the future.\n- Beta interfaces`PipelineServable` and`register_pipeline` introduced in version 1.3 will be deleted and replaced\n  with`Model.call`.\n\n### Contributors\n\nThanks goes to these wonderful people:\n\nchenweifeng, qinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\nContributions of any kind are welcome!\n\n## MindSpore Serving 1.3.0 Release Notes\n\n### Major Features and Improvements\n\n- [STABLE] Enhances and simplifies the deployment and startup of single-chip models. Multiple models can be loaded by a\n  single script. Each model can have multiple copies on multiple chips. Requests can be split and distributed to these\n  copies for concurrent execution.\n- [STABLE] The `master`+`worker` interface of the Serving server is changed to the `server` interface.\n- [STABLE] The client and server support Unix Domain Socket-based gRPC communication.\n- [STABLE] gRPC and RESTful interfaces support TLS/SSL security authentication.\n- [STABLE] The MindIR encryption model is supported.\n- [BETA] Incremental inference models consisting of multiple static graphs are supported, including single-card models\n  and distributed models.\n\n### API Change\n\n#### API Incompatible Change\n\n##### Python API\n\n###### Enhances and simplifies the deployment and startup of single-chip models\n\nMultiple models can be loaded by a single script. Each model can have multiple copies on multiple chips. Requests can be\nsplit and distributed to these copies for concurrent execution.\n\nInterface `worker.start_servable_in_master` that can start only a single servables is changed to\ninterface `server.start_servables` that can start multiple servables, and each servable can correspond to multiple\ncopies. In addition, related interface `server.ServableStartConfig` is added.\n\n<table>\n<tr>\n<td style=\"text-align:center\"> 1.2.x </td> <td style=\"text-align:center\"> 1.3.0 </td>\n</tr>\n<tr>\n<td>\n\n```python\nimport os\nimport sys\nfrom mindspore_serving import master\nfrom mindspore_serving import worker\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    # deploy model add on device 0\n    worker.start_servable_in_master(servable_dir, \"add\", device_id=0)\n\n    master.start_grpc_server(\"127.0.0.1\", 5500)\n    master.start_restful_server(\"127.0.0.1\", 1500)\n\nif __name__ == \"__main__\":\n    start()\n```\n\n</td>\n<td>\n\n```python\nimport os\nimport sys\nfrom mindspore_serving import server\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    # deploy model add on devices 0 and 1\n    add_config = server.ServableStartConfig(servable_directory=servable_dir,\n                                            servable_name=\"add\",\n                                            device_ids=(0, 1))\n    # deploy model resnet50 on devices 2 and 3\n    resnet50_config = server.ServableStartConfig(servable_directory=servable_dir,\n                                                 servable_name=\"resnet50 \",\n                                                 device_ids=(2, 3))\n    server.start_servables(servable_configs=(add_config, resnet50_config))\n\n    server.start_grpc_server(address=\"127.0.0.1:5500\")\n    server.start_restful_server(address=\"127.0.0.1:1500\")\n\nif __name__ == \"__main__\":\n    start()\n```\n\n</td>\n</tr>\n</table>\n\n###### `mindspore_serving.worker.register` is updated to `mindspore_serving.server.register`\n\n<table>\n<tr>\n<td style=\"text-align:center\"> 1.2.x </td> <td style=\"text-align:center\"> 1.3.0 </td>\n</tr>\n<tr>\n<td>\n\n```python\nfrom mindspore_serving.worker import register\n```\n\n</td>\n<td>\n\n```python\nfrom mindspore_serving.server import register\n```\n\n</td>\n</tr>\n</table>\n\n###### The gRPC and RESTful startup interfaces are updated. The namespace is changed from master to server, and the input parameters `ip` and `port` are changed to `address` only\n\n<table>\n<tr>\n<td style=\"text-align:center\"> 1.2.x </td> <td style=\"text-align:center\"> 1.3.0 </td>\n</tr>\n<tr>\n<td>\n\n```python\nfrom mindspore_serving import master\n\nmaster.start_grpc_server(\"127.0.0.1\", 5500)\nmaster.start_restful_server(\"127.0.0.1\", 1500)\nmaster.stop()\n```\n\n</td>\n<td>\n\n```python\nfrom mindspore_serving import server\n\nserver.start_grpc_server(\"127.0.0.1:5500\")\nserver.start_restful_server(\"127.0.0.1:1500\")\nserver.stop()\n```\n\n</td>\n</tr>\n</table>\n\n###### The name of the distributed interface function is simplified, and the namespace is changed from `worker` to `server`\n\nIn `servable_config.py` of distributed model:\n\n<table>\n<tr>\n<td style=\"text-align:center\"> 1.2.x </td> <td style=\"text-align:center\"> 1.3.0 </td>\n</tr>\n<tr>\n<td>\n\n```python\nfrom mindspore_serving.worker import distributed\n\ndistributed.declare_distributed_servable(\n    rank_size=8, stage_size=1, with_batch_dim=False)\n```\n\n</td>\n<td>\n\n```python\nfrom mindspore_serving.server import distributed\n\ndistributed.declare_servable(\n    rank_size=8, stage_size=1, with_batch_dim=False)\n```\n\n</td>\n</tr>\n</table>\n\nIn startup script of distributed model:\n\n<table>\n<tr>\n<td style=\"text-align:center\"> 1.2.x </td> <td style=\"text-align:center\"> 1.3.0 </td>\n</tr>\n<tr>\n<td>\n\n```python\nimport os\nimport sys\nfrom mindspore_serving import master\nfrom mindspore_serving.worker import distributed\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    distributed.start_distributed_servable_in_master(\n        servable_dir, \"matmul\",\n        rank_table_json_file=\"rank_table_8pcs.json\",\n        version_number=1,\n        worker_ip=\"127.0.0.1\", worker_port=6200)\n\n    master.start_grpc_server(\"127.0.0.1\", 5500)\n    master.start_restful_server(\"127.0.0.1\", 1500)\n\nif __name__ == \"__main__\":\n    start()\n```\n\n</td>\n<td>\n\n```python\nimport os\nimport sys\nfrom mindspore_serving import server\nfrom mindspore_serving.server import distributed\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    distributed.start_servable(\n        servable_dir, \"matmul\",\n        rank_table_json_file=\"rank_table_8pcs.json\",\n        version_number=1,\n        distributed_address=\"127.0.0.1:6200\")\n\n    server.start_grpc_server(\"127.0.0.1:5500\")\n    server.start_restful_server(\"127.0.0.1:1500\")\n\nif __name__ == \"__main__\":\n    start()\n```\n\n</td>\n</tr>\n</table>\n\nIn agent startup script of distributed model:\n\n<table>\n<tr>\n<td style=\"text-align:center\"> 1.2.x </td> <td style=\"text-align:center\"> 1.3.0 </td>\n</tr>\n<tr>\n<td>\n\n```python\nfrom mindspore_serving.worker import distributed\n\ndef start_agents():\n    \"\"\"Start all the worker agents in current machine\"\"\"\n    model_files = []\n    group_configs = []\n    for i in range(8):\n        model_files.append(f\"model/device{i}/matmul.mindir\")\n        group_configs.append(f\"model/device{i}/group_config.pb\")\n\n    distributed.startup_worker_agents(\n        worker_ip=\"127.0.0.1\", worker_port=6200,\n        model_files=model_files,\n        group_config_files=group_configs)\n\nif __name__ == '__main__':\n    start_agents()\n```\n\n</td>\n<td>\n\n```python\nfrom mindspore_serving.server import distributed\n\ndef start_agents():\n    \"\"\"Start all the agents in current machine\"\"\"\n    model_files = []\n    group_configs = []\n    for i in range(8):\n        model_files.append(f\"model/device{i}/matmul.mindir\")\n        group_configs.append(f\"model/device{i}/group_config.pb\")\n\n    distributed.startup_agents(\n        distributed_address=\"127.0.0.1:6200\",\n        model_files=model_files,\n        group_config_files=group_configs)\n\nif __name__ == '__main__':\n    start_agents()\n```\n\n</td>\n</tr>\n</table>\n\n###### The input parameters `ip`+`port` of the gRPC client are changed to `address`\n\nIn addition to the {ip}:{port} address format, the Unix Domain Socket in the unix:{unix_domain_file_path} format is\nsupported.\n\n<table>\n<tr>\n<td style=\"text-align:center\"> 1.2.x </td> <td style=\"text-align:center\"> 1.3.0 </td>\n</tr>\n<tr>\n<td>\n\n```python\nimport numpy as np\nfrom mindspore_serving.client import Client\n\ndef run_add_cast():\n    \"\"\"invoke servable add method add_cast\"\"\"\n    client = Client(\"localhost\", 5500, \"add\", \"add_cast\")\n    instances = []\n    x1 = np.ones((2, 2), np.int32)\n    x2 = np.ones((2, 2), np.int32)\n    instances.append({\"x1\": x1, \"x2\": x2})\n    result = client.infer(instances)\n    print(result)\n\nif __name__ == '__main__':\n    run_add_cast()\n```\n\n</td>\n<td>\n\n```python\nimport numpy as np\nfrom mindspore_serving.client import Client\n\ndef run_add_cast():\n    \"\"\"invoke servable add method add_cast\"\"\"\n    client = Client(\"127.0.0.1:5500\", \"add\", \"add_cast\")\n    instances = []\n    x1 = np.ones((2, 2), np.int32)\n    x2 = np.ones((2, 2), np.int32)\n    instances.append({\"x1\": x1, \"x2\": x2})\n    result = client.infer(instances)\n    print(result)\n\nif __name__ == '__main__':\n    run_add_cast()\n```\n\n</td>\n</tr>\n</table>\n\n#### New features\n\n##### Python API\n\n###### Support Unix Domain Socket\n\nThe Serving server:\n\n```python\nimport os\nimport sys\nfrom mindspore_serving import server\n\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name=\"resnet50\",\n                                                 device_ids=(0, 1))\n    server.start_servables(servable_configs=servable_config)\n    server.start_grpc_server(address=\"unix:/tmp/serving_resnet50_test_temp_file\")\n\n\nif __name__ == \"__main__\":\n    start()\n```\n\nThe Serving client:\n\n```python\nimport os\nfrom mindspore_serving.client import Client\n\n\ndef run_classify_top1():\n    client = Client(\"unix:/tmp/serving_resnet50_test_temp_file\", \"resnet50\", \"classify_top1\")\n    instances = []\n    for path, _, file_list in os.walk(\"./test_image/\"):\n        for file_name in file_list:\n            image_file = os.path.join(path, file_name)\n            print(image_file)\n            with open(image_file, \"rb\") as fp:\n                instances.append({\"image\": fp.read()})\n    result = client.infer(instances)\n    print(result)\n\n\nif __name__ == '__main__':\n    run_classify_top1()\n```\n\n###### Support SSL/TLS\n\nThe Serving server:\n\n```python\nimport os\nimport sys\nfrom mindspore_serving import server\n\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name=\"add\",\n                                                 device_ids=(0, 1))\n    server.start_servables(servable_configs=servable_config)\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.key\", custom_ca=None,\n                                  verify_client=False)\n    server.start_grpc_server(address=\"127.0.0.1:5500\", ssl_config=ssl_config)\n    server.start_restful_server(address=\"127.0.0.1:1500\", ssl_config=ssl_config)\n\n\nif __name__ == \"__main__\":\n    start()\n```\n\nThe gRPC Serving client:\n\n```python\nfrom mindspore_serving.client import Client\nfrom mindspore_serving.client import SSLConfig\nimport numpy as np\n\n\ndef run_add_common():\n    \"\"\"invoke Servable add method add_common\"\"\"\n    ssl_config = SSLConfig(custom_ca=\"ca.crt\")\n    client = Client(\"localhost:5500\", \"add\", \"add_common\", ssl_config=ssl_config)\n    instances = []\n\n    # instance 1\n    x1 = np.asarray([[1, 1], [1, 1]]).astype(np.float32)\n    x2 = np.asarray([[1, 1], [1, 1]]).astype(np.float32)\n    instances.append({\"x1\": x1, \"x2\": x2})\n\n    result = client.infer(instances)\n    print(result)\n\n\nif __name__ == '__main__':\n    run_add_common()\n```\n\nThe RESTful client\n\n```shell\n>>> curl -X POST -d '{\"instances\":{\"x1\":[[1.0, 1.0], [1.0, 1.0]], \"x2\":[[1.0, 1.0], [1.0, 1.0]]}}' --insecure https://127.0.0.1:1500/model/add:add_common\n{\"instances\":[{\"y\":[[2.0,2.0],[2.0,2.0]]}]}\n```\n\n###### Support encryption MindIR model\n\n```python\n# export model\nimport mindspore as ms\n\n# define add network\n# export encryption model\nms.export(add, ms.Tensor(x), ms.Tensor(y), file_name='tensor_add_enc', file_format='MINDIR',\n          enc_key=\"asdfasdfasdfasgwegw12310\".encode(), enc_mode='AES-GCM')\n```\n\n```python\n# start Serving server\nimport os\nimport sys\nfrom mindspore_serving import server\n\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name=\"add\",\n                                                 device_ids=(0, 1),\n                                                 dec_key='asdfasdfasdfasgwegw12310'.encode(), dec_mode='AES-CBC')\n    server.start_servables(servable_configs=servable_config)\n\n    server.start_grpc_server(address=\"127.0.0.1:5500\")\n    server.start_restful_server(address=\"127.0.0.1:1500\")\n\n\nif __name__ == \"__main__\":\n    start()\n```\n\n###### [BETA] Support incremental inference models consisting of multiple static graphs\n\nA Incremental inference models can include a full input graph and an incremental input graph, and the Serving\norchestrates the two static graphs using a user-defined Python script. For more details, please refer\nto [Serving pangu alpha](https://gitee.com/mindspore/models/tree/master/official/nlp/Pangu_alpha/serving_increment)\n.\n\n#### Deprecations\n\n##### Python API\n\n- `mindspore_serving.master` and `mindspore_serving.worker` are now deprecated in favor of `mindspore_serving.server`,\n  as shown above. Deprecated interfaces will be deleted in the next iteration.\n\n- The following interfaces are directly deleted. That is, workers of one serving server can no longer be deployed on\n  othe machines. Users are no longer aware of workers at the interface layer.\n\n```python\nmindspore_serving.worker.start_servable\nmindspore_serving.worker.distributed.start_distributed_servable\nmindspore_serving.master.start_master_server\n```\n\n### Contributors\n\nThanks goes to these wonderful people:\n\nchenweifeng, qinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\nContributions of any kind are welcome!\n\n## MindSpore Serving 1.2.0 Release Notes\n\n### Major Features and Improvements\n\n- [STABLE] Support distributed inference, it needs to cooperate with distributed training to export distributed models\n  for super-large-scale neural network parameters(Ascend 910).\n- [STABLE] Support GPU platform, Serving worker nodes can be deployer on Nvidia GPU, Ascend 310 and Ascend 910.\n- This release is based on MindSpore version 1.2.0\n- Support Python 3.8 and 3.9.\n\n### API Change\n\n#### API Incompatible Change\n\n##### Python API\n\nSupport deployment of distributed model, refer\nto [distributed inference tutorial](https://www.mindspore.cn/serving/docs/en/master/serving_distributed_example.html)\nfor related API.\n\n#### Deprecations\n\n##### Python API\n\n### Bug Fixes\n\n### Contributors\n\nThanks goes to these wonderful people:\n\nchenweifeng, qinzheng, xujincai, xuyongfei, zhangyinxia, zhoufeng.\n\nContributions of any kind are welcome!\n\n## MindSpore Serving 1.1.1 Release Notes\n\n### Major Features and Improvements\n\n- Adapts new C++ inference interface for MindSpore version 1.1.1.\n\n### Bug fixes\n\n- [BUGFIX] Fix bug in transforming result of type int16 in python Client.\n- [BUGFIX] Fix bytes type misidentified as str type after python preprocess and postprocess.\n- [BUGFIX] Fix bug releasing C++ tensor data when it's wrapped as numpy object sometimes.\n- [BUGFIX] Update RuntimeError to warning log when check Ascend environment failed.\n\n## MindSpore Serving 1.1.0 Release Notes\n\n### Major Features and Improvements\n\n- [STABLE] Support gRPC and RESTful API.\n- [STABLE] Support simple Python API for Client and Server.\n- [STABLE] Support Model configuration，User can customize preprocessing & postprocessing for model.\n- [STABLE] Support multiple models，Multiple models can run simultaneously.\n- [STABLE] Support Model batching，Multiple instances will be split and combined to meet the batch size requirements of\n  the model.\n- This release is based on MindSpore version 1.1.0\n\n### Bug Fixes\n\n### Contributors\n"
  },
  {
    "path": "RELEASE_CN.md",
    "content": "# MindSpore Serving Release Notes\n\n[View English](./RELEASE.md)\n\n## MindSpore Serving 2.0.2 Release Notes\n\n### 主要特性和增强\n\n- 配套MindSpore 2.2.0版本接口。\n- 修复第三方库OpenSSL漏洞CVE-2023-3446、CVE-2023-4807。\n\n### 贡献者\n\n感谢以下人员做出的贡献:\n\nqinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\n欢迎以任何形式对项目提供贡献！\n\n## MindSpore Serving 2.0.0 Release Notes\n\n### 主要特性和增强\n\n- 配套MindSpore 2.0.0rc1版本接口。\n- 修复第三方库OpenSSL漏洞CVE-2022-4304、CVE-2022-4450、CVE-2022-4450、CVE-2023-0286、CVE-2023-0464、CVE-2023-0465、CVE-2023-0466。\n\n### 贡献者\n\n感谢以下人员做出的贡献:\n\nqinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\n欢迎以任何形式对项目提供贡献！\n\n## MindSpore Serving 1.8.0 Release Notes\n\n### 主要特性和增强\n\n- [STABLE] Serving部署流水线并行的大模型时，支持流水线并行处理多个推理实例。\n\n### 贡献者\n\n感谢以下人员做出的贡献：\n\nqinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\n欢迎以任何形式对项目提供贡献！\n\n## MindSpore Serving 1.7.0 Release Notes\n\n### 主要特性和增强\n\n- [DEMO] Ascend 310P可以作为MindSpore Serving的硬件后端，详情可参考[MindSpore Serving后端](https://www.mindspore.cn/serving/docs/zh-CN/master/serving_install.html#installation)。\n- [DEMO] MindSpore Lite作为MindSpore Serving推理后端时，支持MindIR模型格式，详情可参考[MindSpore Serving后端](https://www.mindspore.cn/serving/docs/zh-CN/master/serving_install.html#installation)。\n\n#### 不建议使用\n\n##### Python API\n\n- `AclOptions`和 `GpuOptions`从1.7.0版本开始被移除，使用 `AscendDeviceInfo`和 `GPUDeviceInfo`替代。\n- `register.declare_sevable`和 `register.call_servable`从1.7.0版本开始被移除，使用 `register.declare_model`和 `register.add_stage`替代。\n- `register.call_preprocess`，`register.call_preprocess_pipeline`，`register.call_postprocess`和 `register.call_postprocess_pipeline`从1.7.0版本开始被移除，使用 `register.add_stage`替代。\n\n### 贡献者\n\n感谢以下人员做出的贡献:\n\nqinzheng, xuyongfei, zhangyinxia, zhoufeng.\n\n欢迎以任何形式对项目提供贡献！\n"
  },
  {
    "path": "Third_Party_Open_Source_Software_Notice",
    "content": "OPEN SOURCE SOFTWARE NOTICE\n\nPlease note we provide an open source software notice along with this product and/or this product firmware (in the following just “this product”). The open source software licenses are granted by the respective right holders. And the open source licenses prevail all other license information with regard to the respective open source software contained in the product, including but not limited to End User Software Licensing Agreement. This notice is provided on behalf of Huawei Technologies Co. Ltd. and any of its local subsidiaries which may have provided this product to you in your local country. \n\nWarranty Disclaimer    \nTHE OPEN SOURCE SOFTWARE IN THIS PRODUCT IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT WITHOUT ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE APPLICABLE LICENSES FOR MORE DETAILS.\n\nCopyright Notice and License Texts \n\nSoftware: Eigen 3.3.7\nCopyright notice: \nCopyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>\nCopyright (C) 2013 Christian Seiler <christian@iwakd.de>\nCopyright (C) 2015 Eugene Brevdo <ebrevdo@gmail.com>\nCopyright (C) 2014-2015 Benoit Steiner <benoit.steiner.goog@gmail.com>\nCopyright (C) 2015 Navdeep Jaitly <ndjaitly@google.com>\nCopyright (C) 2014 Eric Martin <eric@ericmart.in>\nCopyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>\nCopyright (C) 2016 Rasmus Munk Larsen <rmlarsen@google.com>\nCopyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>\nCopyright (C) 2015 Jianwei Cui <thucjw@gmail.com>\nCopyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>\nCopyright (C) 2015 Ke Yang <yangke@gmail.com>\nCopyright (C) 2016 Mehdi Goli, Codeplay Software Ltd <eigen@codeplay.com>\nCopyright (C) 2014 Navdeep Jaitly <ndjaitly@google.com>\nCopyright (C) 2016 Igor Babuschkin <igor@babuschk.in>\nCopyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>\nCopyright (C) EDF R&D,  lun sep 30 14:23:30 CEST 2002\nCopyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) EDF R&D,  lun sep 30 14:23:31 CEST 2002\nCopyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2009 Mark Borgerding mark a borgerding net\nCopyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2013 Desire Nuentsa <desire.nuentsawakam@inria.fr>\nCopyright (C) 2013 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsawakam@inria.fr>\nCopyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) EDF R&D,  lun sep 30 14:23:28 CEST 2002\nCopyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>\nCopyright (C) 2009 Claire Maurice\nCopyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (c) 2011, Intel Corporation. All rights reserved.\nCopyright (C) 2012-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2016 Tobias Wood <tobias@spinicist.org.uk>\nCopyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2012 Alexey Korepanov <kaikaikai@yandex.ru>\nCopyright (C) 2010 Vincent Lejeune\nCopyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2009 Mathieu Gautier <mathieu.gautier@cea.fr>\nCopyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>\nCopyright (C) 2009 Hauke Heibel <hauke.heibel@gmail.com>\nCopyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) EDF R&D,  mar déc 3 18:59:36 CET 2002\nCopyright (C) EDF R&D,  lun sep 30 14:23:17 CEST 2002\nCopyright (C) EDF R&D,  mar déc 3 18:59:35 CET 2002\nCopyright (C) 2016 Konstantinos Margaritis <markos@freevec.org>\nCopyright (C) 2007 Julien Pommier\nCopyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2009 Keir Mierle <mierle@gmail.com>\nCopyright (C) 2011 Timothy E. Holy <tim.holy@gmail.com >\nCopyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>\nCopyright (C) 2012 Desire Nuentsa <desire.nuentsawakam@inria.fr>\nCopyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2015 Tal Hadad <talhd@hotmail.com>\n@copyright (c) 2009-2014 The University of Tennessee and The University of Tennessee Research Foundation.\n@copyright (c) 2012-2016 Inria. All rights reserved.\n@copyright (c) 2012-2014 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria, Univ. Bordeaux. All rights reserved.\nCopyright 2007-2009 Kitware, Inc.\nCopyright 2012-2013 Inria\nCopyright 2012-2013 Emmanuel Agullo\nCopyright 2012-2013 Mathieu Faverge\nCopyright 2012      Cedric Castagnede\nCopyright 2013-2016 Florent Pruvost\nCopyright 2016 Codeplay Software Ltd.\nCopyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>\nCopyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>\nCopyright (c) 2009 Boudewijn Rempt <boud@valdyas.org>\n@copyright (c) 2012-2014 Inria. All rights reserved.\nCopyright 2013      Florent Pruvost\nCopyright (c) 2010 Jitse Niesen, <jitse@maths.leeds.ac.uk>\nCopyright (C) 2009 Benjamin Schindler <bschindler@inf.ethz.ch>\nCopyright (C) 2016 Pedro Gonnet (pedro.gonnet@gmail.com)\nCopyright (C) 2016 Benoit Steiner (benoit.steiner.goog@gmail.com)\nCopyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>\nCopyright (C) 2012-2013 Desire Nuentsa <desire.nuentsawakam@inria.fr>\nCopyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright Jorge More - Argonne National Laboratory\nCopyright Burt Garbow - Argonne National Laboratory\nCopyright Ken Hillstrom - Argonne National Laboratory\nCopyright (C) 2009 Ilya Baran <ibaran@mit.edu>\nCopyright (c) 2010, Intel Corp.\nCopyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2013-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>\nCopyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>\nCopyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>\nCopyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>\nCopyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2014-2017 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsawakam@inria.fr>\nCopyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (c) 1994 by Xerox Corporation.  All rights reserved.\nCopyright (C) 2001 Intel Corporation\nCopyright (c) 2001 Intel Corporation.\nCopyright (C) 2009 Gael Guennebaud <g.gael@free.fr>\nCopyright (C) 2013 Christoph Hertzberg <chtz@informatik.uni-bremen.de>\nCopyright (C) 2015 Eugene Brevdo <ebrevdo@google.com>\nCopyright (C) 2016\nMehdi Goli    Codeplay Software Ltd.\nRalph Potter  Codeplay Software Ltd.\nLuke Iwanski  Codeplay Software Ltd.\nCopyright (C) 2014 Jianwei Cui <thucjw@gmail.com>\nCopyright (C) 2015 Vijay Vasudevan <vrv@google.com>\nCopyright (C) 2015\nMehdi Goli    Codeplay Software Ltd.\nRalph Potter  Codeplay Software Ltd.\nLuke Iwanski  Codeplay Software Ltd.\nCopyright (C) 2014 Navdeep Jaitly <ndjaitly@google.com and Benoit Steiner <benoit.steiner.goog@gmail.com>\nCopyright (C) 2011 Gael Guennebaud <g.gael@free.fr>\nCopyright (C) 2012 desire Nuentsa <desire.nuentsawakam@inria.fr\nCopyright (C) 2008 Gael Guennebaud <g.gael@free.fr>\nCopyright (C) 2012 Kolja Brix <brix@igpm.rwth-aaachen.de>\nCopyright (C) 2011 Kolja Brix <brix@igpm.rwth-aachen.de>\nCopyright (C) 2011 Andreas Platen <andiplaten@gmx.de>\nCopyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>\nCopyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2009-2011 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2012, 2013 Chen-Pang He <jdh8@ms63.hinet.net>\nCopyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2012 Giacomo Po <gpo@ucla.edu>\nCopyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>\nCopyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2010-2011 Hauke Heibel <heibel@gmail.com>\nCopyright (C) 2012 David Harmon <dharmon@gmail.com>\nCopyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com>\nCopyright (C) 2010 Thomas Capricelli <orzel@freehackers.org>\nCopyright (C) 2013 Pavel Holoborodko <pavel@holoborodko.com>\nCopyright (C) EDF R&D,  lun sep 30 14:23:16 CEST 2002\nCopyright (C) EDF R&D,  mar déc 3 18:59:37 CET 2002\nCopyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2008-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (c) 2008-2015 Pavel Holoborodko\nCopyright (C) 20010-2011 Hauke Heibel <hauke.heibel@gmail.com>\nCopyright (c) 2006, Montel Laurent, <montel@kde.org>\nCopyright (c) 2007, Allen Winter, <winter@kde.org>\nCopyright (c) 2007, Alexander Neundorf, <neundorf@kde.org>\nCopyright (C) 2008 Guillaume Saupin <guillaume.saupin@cea.fr>\nCopyright (C) 2008-2009 Guillaume Saupin <guillaume.saupin@cea.fr>\nCopyright (C) 2009 Guillaume Saupin <guillaume.saupin@cea.fr>\nCopyright (C) 2010-2016 Konstantinos Margaritis <markos@freevec.org>\nCopyright (C) 2008-2016 Konstantinos Margaritis <markos@freevec.org>\nCopyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)\nCopyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)\nCopyright (c) Fabian Giesen, 2016\nCopyright (C) 2010 Konstantinos Margaritis <markos@freevec.org>\nCopyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>\nCopyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com)\nCopyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2010-2013 Hauke Heibel <hauke.heibel@gmail.com>\nCopyright (C) 2006-2008, 2010 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (C) 2010-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2009 Ricard Marxer <email@ricardmarxer.com>\nCopyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2010-2011 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2009 Rohit Garg <rpg.314@gmail.com>\nCopyright (c) 2006, Timothy A. Davis.\nCopyright (c) 1998-2003 by the University of Florida.\nCopyright (C) 2012  Désiré Nuentsa-Wakam <desire.nuentsawakam@inria.fr>\nCopyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>\nLDL Copyright (c) 2005 by Timothy A. Davis.  All Rights Reserved.\nCopyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>\nCopyright (C) EDF R&D,  lun sep 30 14:23:20 CEST 2002\nCopyright (C) EDF R&D,  lun sep 30 14:23:19 CEST 2002\nCopyright (C) 2009, 2010, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2011, 2013 Chen-Pang He <jdh8@ms63.hinet.net>\nCopyright (C) 2009-2011, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2011, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>\nCopyright (C) 2010, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>\nCopyright (C) 2010-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2012 The Android Open Source Project\n(C) Desire NUENTSA WAKAM, INRIA\nCopyright (C) EDF R&D,  lun sep 30 14:23:18 CEST 2002\nCopyright (C) 2012 Keir Mierle <mierle@gmail.com>\nCopyright (C) 1989, 1991 Free Software Foundation, Inc.\nCopyright (C) EDF R&D,  lun sep 30 14:23:23 CEST 2002\nCopyright (C) EDF R&D,  lun sep 30 14:23:24 CEST 2002\nCopyright (C) EDF R&D,  lun sep 30 14:23:27 CEST 2002\nCopyright (C) 2007 Free Software Foundation, Inc. <http:fsf.org/>\nCopyright (C) 1991, 1999 Free Software Foundation, Inc.\nCopyright (C) 2015 Benoit Jacob <benoitjacob@google.com>\nGeometric Tools, LLC Copyright (c) 1998-2010\nCopyright (C) EDF R&D,  lun sep 30 14:23:15 CEST 2002\nCopyright (C) 2002-2007 Yves Renard\nCopyright (C) 2012, 2014 Kolja Brix <brix@igpm.rwth-aaachen.de>\nCopyright (C) 1997-2001 Andrew Lumsdaine <lums@osl.iu.edu>  Lie-Quan Lee     <llee@osl.iu.edu>\nCopyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsawakam@inria.fr\nCopyright (C) 2015-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2013 Hauke Heibel <hauke.heibel@gmail.com>\nCopyright (C) 2010-2011 Jitse Niesen <jitse@maths.leeds.ac.uk>\nIntel Copyright (C) ....\nCopyright (C) 2010-2017 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 20013 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2008 Daniel Gomez Ferro <dgomezferro@gmail.com>\nCopyright (C) 2013 Désiré Nuentsa-Wakam <desire.nuentsawakam@inria.fr>\nCopyright (C) 2011-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 20015 Gael Guennebaud <gael.guennebaud@inria.fr>\nCopyright (C) 2014-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n\n\nLicense: Mozilla Public License (MPL) V2.0\n\nMozilla Public License\nVersion 2.0\n1. Definitions\n1.1. “Contributor”\nmeans each individual or legal entity that creates, contributes to the creation of, or owns Covered Software.\n1.2. “Contributor Version”\nmeans the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor’s Contribution.\n1.3. “Contribution”\nmeans Covered Software of a particular Contributor.\n1.4. “Covered Software”\nmeans Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof.\n1.5. “Incompatible With Secondary Licenses”\nmeans\nthat the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or\nthat the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License.\n1.6. “Executable Form”\nmeans any form of the work other than Source Code Form.\n1.7. “Larger Work”\nmeans a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software.\n1.8. “License”\nmeans this document.\n1.9. “Licensable”\nmeans having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License.\n1.10. “Modifications”\nmeans any of the following:\nany file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or\nany new file in Source Code Form that contains any Covered Software.\n1.11. “Patent Claims” of a Contributor\nmeans any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version.\n1.12. “Secondary License”\nmeans either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses.\n1.13. “Source Code Form”\nmeans the form of the work preferred for making modifications.\n1.14. “You” (or “Your”)\nmeans an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.\n2. License Grants and Conditions\n2.1. Grants\nEach Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:\nunder intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and\nunder Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version.\n2.2. Effective Date\nThe licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution.\n2.3. Limitations on Grant Scope\nThe licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor:\nfor any code that a Contributor has removed from Covered Software; or\nfor infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or\nunder Patent Claims infringed by Covered Software in the absence of its Contributions.\nThis License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4).\n2.4. Subsequent Licenses\nNo Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3).\n2.5. Representation\nEach Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License.\n2.6. Fair Use\nThis License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents.\n2.7. Conditions\nSections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1.\n3. Responsibilities\n3.1. Distribution of Source Form\nAll distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients’ rights in the Source Code Form.\n3.2. Distribution of Executable Form\nIf You distribute Covered Software in Executable Form then:\nsuch Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and\nYou may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients’ rights in the Source Code Form under this License.\n3.3. Distribution of a Larger Work\nYou may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s).\n3.4. Notices\nYou may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies.\n3.5. Application of Additional Terms\nYou may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction.\n4. Inability to Comply Due to Statute or Regulation\nIf it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it.\n5. Termination\n5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice.\n5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate.\n5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination.\n6. Disclaimer of Warranty\nCovered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer.\n7. Limitation of Liability\nUnder no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party’s negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.\n8. Litigation\nAny litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party’s ability to bring cross-claims or counter-claims.\n9. Miscellaneous\nThis License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor.\n10. Versions of the License\n10.1. New Versions\nMozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number.\n10.2. Effect of New Versions\nYou may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward.\n10.3. Modified Versions\nIf you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License).\n10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses\nIf You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached.\nExhibit A - Source Code Form License Notice\nThis Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.\nIf it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice.\nYou may add additional accurate notices of copyright ownership.\nExhibit B - “Incompatible With Secondary Licenses” Notice\nThis Source Code Form is “Incompatible With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0.\n\n\nSoftware: JSON for Modern C++ 3.6.1\nCopyright notice: \nCopyright 2015 Google Inc. All rights reserved.\nCopyright 2018 Google Inc. All rights reserved.\nCopyright 2016 Ismael Jimenez Martinez. All rights reserved.\nCopyright 2017 Roman Lebedev. All rights reserved.\nCopyright (c) 2012 Two Blue Cubes Ltd. All rights reserved.\nCopyright (c) 2015 Max Woolf\nCopyright 2014 The Authors\nCopyright (c) 2016 Nicolas Seriot\nCopyright (c) 2015-2017 Niels Lohmann.\nCopyright (c) 2015-2017 Niels Lohmann\nCopyright (c) 2013-2019 Niels Lohmann <http:nlohmann.me>.\nCopyright (c) 2018 Vitaliy Manushkin <agri@akamo.info>.\nCopyright (c) 2012, Erik Edlund <erik.edlund@32767.se>\nCopyright (c) 2013-2019 Niels Lohmann\nCopyright 2013-2019 [Niels Lohmann](http:nlohmann.me)\nCopyright (c) 2009 Google Inc. All rights reserved.\nCopyright (C) 2009 Google Inc.\n\nLicense: MIT License\nThe MIT License\nCopyright (c) <year> <copyright holders>\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\nSoftware: re2 20191201\nCopyright notice:\nCopyright (C) 2005 Free Software Foundation, Inc.\nCopyright (C) 2007 Free Software Foundation, Inc.\nCopyright (C) 2009 Free Software Foundation, Inc.\nCopyright (C) 2009 The Android Open Source Project\nCopyright (c) 2002 by Lucent Technologies.\nCopyright (c) 2009 The RE2 Authors.\nCopyright 1999-2005 The RE2 Authors.  All Rights Reserved.\nCopyright 2001-2010 The RE2 Authors.  All Rights Reserved.\nCopyright 2002-2009 The RE2 Authors.  All Rights Reserved.\nCopyright 2003-2009 Google Inc.\nCopyright 2003-2009 The RE2 Authors.  All Rights Reserved.\nCopyright 2003-2010 Google Inc.  All Rights Reserved.\nCopyright 2004 The RE2 Authors.  All Rights Reserved.\nCopyright 2005 The RE2 Authors.  All Rights Reserved.\nCopyright 2006 The RE2 Authors.  All Rights Reserved.\nCopyright 2006-2007 The RE2 Authors.  All Rights Reserved.\nCopyright 2006-2008 The RE2 Authors.  All Rights Reserved.\nCopyright 2007 The RE2 Authors.  All Rights Reserved.\nCopyright 2008 The RE2 Authors.  All Rights Reserved.\nCopyright 2009 The RE2 Authors.  All Rights Reserved.\nCopyright 2010 The RE2 Authors.  All Rights Reserved.\nCopyright 2012 The Go Authors.\nCopyright 2015 The RE2 Authors.  All Rights Reserved.\nCopyright 2016 The RE2 Authors.  All Rights Reserved.\nCopyright 2018 The RE2 Authors.  All Rights Reserved.\n\nLicense: BSD-3 with additional clause \nMost files in this release are marked with the copyrights of the\norganizations who have edited them.  The copyrights below are in no\nparticular order and generally reflect members of the Open MPI core\nteam who have contributed code to this release.  The copyrights for\ncode used under license from other parties are included in the\ncorresponding files.\n\nCopyright (c) 2004-2010 The Trustees of Indiana University and Indiana\n                        University Research and Technology\n                        Corporation.  All rights reserved.\nCopyright (c) 2004-2017 The University of Tennessee and The University\n                        of Tennessee Research Foundation.  All rights\n                        reserved.\nCopyright (c) 2004-2010 High Performance Computing Center Stuttgart,\n                        University of Stuttgart.  All rights reserved.\nCopyright (c) 2004-2008 The Regents of the University of California.\n                        All rights reserved.\nCopyright (c) 2006-2017 Los Alamos National Security, LLC.  All rights\n                        reserved.\nCopyright (c) 2006-2017 Cisco Systems, Inc.  All rights reserved.\nCopyright (c) 2006-2010 Voltaire, Inc. All rights reserved.\nCopyright (c) 2006-2017 Sandia National Laboratories. All rights reserved.\nCopyright (c) 2006-2010 Sun Microsystems, Inc.  All rights reserved.\n                        Use is subject to license terms.\nCopyright (c) 2006-2017 The University of Houston. All rights reserved.\nCopyright (c) 2006-2009 Myricom, Inc.  All rights reserved.\nCopyright (c) 2007-2017 UT-Battelle, LLC. All rights reserved.\nCopyright (c) 2007-2017 IBM Corporation.  All rights reserved.\nCopyright (c) 1998-2005 Forschungszentrum Juelich, Juelich Supercomputing\n                        Centre, Federal Republic of Germany\nCopyright (c) 2005-2008 ZIH, TU Dresden, Federal Republic of Germany\nCopyright (c) 2007      Evergrid, Inc. All rights reserved.\nCopyright (c) 2008      Chelsio, Inc.  All rights reserved.\nCopyright (c) 2008-2009 Institut National de Recherche en\n                        Informatique.  All rights reserved.\nCopyright (c) 2007      Lawrence Livermore National Security, LLC.\n                        All rights reserved.\nCopyright (c) 2007-2017 Mellanox Technologies.  All rights reserved.\nCopyright (c) 2006-2010 QLogic Corporation.  All rights reserved.\nCopyright (c) 2008-2017 Oak Ridge National Labs.  All rights reserved.\nCopyright (c) 2006-2012 Oracle and/or its affiliates.  All rights reserved.\nCopyright (c) 2009-2015 Bull SAS.  All rights reserved.\nCopyright (c) 2010      ARM ltd.  All rights reserved.\nCopyright (c) 2016      ARM, Inc.  All rights reserved.\nCopyright (c) 2010-2011 Alex Brick <bricka@ccs.neu.edu>.  All rights reserved.\nCopyright (c) 2012      The University of Wisconsin-La Crosse. All rights\n                        reserved.\nCopyright (c) 2013-2016 Intel, Inc. All rights reserved.\nCopyright (c) 2011-2017 NVIDIA Corporation.  All rights reserved.\nCopyright (c) 2016      Broadcom Limited.  All rights reserved.\nCopyright (c) 2011-2017 Fujitsu Limited.  All rights reserved.\nCopyright (c) 2014-2015 Hewlett-Packard Development Company, LP.  All\n                        rights reserved.\nCopyright (c) 2013-2017 Research Organization for Information Science (RIST).\n                        All rights reserved.\nCopyright (c) 2017      Amazon.com, Inc. or its affiliates.  All Rights\n                        reserved.\n\n$COPYRIGHT$\n\nAdditional copyrights may follow\n\n$HEADER$\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n- Redistributions of source code must retain the above copyright\n  notice, this list of conditions and the following disclaimer.\n\n- Redistributions in binary form must reproduce the above copyright\n  notice, this list of conditions and the following disclaimer listed\n  in this license in the documentation and/or other materials\n  provided with the distribution.\n\n- Neither the name of the copyright holders nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nThe copyright holders provide no reassurances that the source code\nprovided does not infringe any patent, copyright, or any other\nintellectual property rights of third parties.  The copyright holders\ndisclaim any liability to any recipient for claims brought against\nrecipient by any third party for infringement of that parties\nintellectual property rights.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nSoftware: pip 20.0.2\nCopyright notice: \nCopyright (c) 2010 ActiveState Software Inc.\nCopyright (c) 1991-2014 Unicode, Inc. All rights reserved.\nCopyright (C) 2013 Vinay Sajip.\nCopyright (C) 2013-2015 Vinay Sajip.\nCopyright 2012 Facebook\nCopyright (c) 2017 Thomas Kluyver\nCopyright (c) 2008-2019 Andrey Petrov and contributors (see CONTRIBUTORS.txt)\nCopyright (c) 2001-2014 Python Software Foundation; All Rights Reserved\nCopyright (C) 2012-2015 Vinay Sajip.\nCopyright (C) 1991, 1999 Free Software Foundation, Inc.\nCopyright (c) 2010-2020 Benjamin Peterson\nCopyright (c) 2010-2019 Benjamin Peterson\nCopyright (C) 2016 Jason R Coombs <jaraco@jaraco.com>\nCopyright (C) 2012 The Python Software Foundation.\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved\" are retained in Python alone or in any derivative version prepared by Licensee.\nCopyright (c) 2005-2010 ActiveState Software Inc.\nCopyright (c) 2006-2013 James Graham and other contributors\nCopyright (c) 2013 Eddy Petrișor\nCopyright 2015 Eric Larson\nCopyright (c) 2013-2018, Kim Davies. All rights reserved.\nCopyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>\nCopyright (C) 2008-2011 INADA Naoki <songofacandy@gmail.com>\nCopyright (c) 2008-2016 The pip developers (see AUTHORS.txt file)\nCopyright 2018 Kenneth Reitz\ncopyright = '2008-2017, PyPA'\nCopyright (c) Donald Stufft and individual contributors.\nCopyright (C) 2012-2019 Vinay Sajip.\nCopyright 2015,2016,2017 Nir Cohen\nCopyright (c) 2008-2019 The pip developers (see AUTHORS.txt file)\nCopyright (C) 2013-2017 Vinay Sajip.\nCopyright 2007 Google Inc.\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c) 1995-2001 Corporation for National Research Initiatives; All Rights Reserved\" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee.  Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): \"Python 1.6.1 is made available subject to the terms and conditions in CNRI's License Agreement.  This Agreement together with Python 1.6.1 may be located on the Internet using the following unique, persistent identifier (known as a handle): 1895.22/1013.  This Agreement may also be obtained from a proxy server on the Internet\ncopyright = \"Copyright 2014-2019 %s\" % author\nCopyright (c) 2015-2016 Will Bond <will@wbond.net>\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands.  All rights reserved.\nCopyright (c) 2010 Jonathan Hartley All rights reserved.\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Python Software Foundation; All Rights Reserved\" are retained in Python alone or in any derivative version prepared by Licensee.\nCopyright (C) 2012-2013 Python Software Foundation.\nCopyright 2013-2014 Ray Holder\nCopyright (C) 2012-2017 Vinay Sajip.\ncopyright = 'Copyright 2019 Kenneth Reitz'\nCopyright (c) 2012 by Simon Sapin.\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Python Software Foundation; All Rights Reserved\" are retained in Python alone or in any derivative version prepared by Licensee.\nCopyright (c) 2003-2019  Paul T. McGuire\nCopyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>\nCopyright (C) 2012-2017 The Python Software Foundation.\n\nLicense: \nCopyright (c) 2008-2019 The pip developers (see AUTHORS.txt file)\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nSoftware: pytest 1.6.0\nCopyright notice: \ncopyright = \"2015–2020, holger krekel and pytest-dev team\"\nIf true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\nCopyright (c) 2010 by Armin Ronacher.\nCopyright (c) 2004-2020 Holger Krekel and others\nCopyright Holger Krekel and others, 2004-2020.\nepubcopyright = \"2013-2020, holger krekel et alii\"\n\nLicense: The MIT License (MIT)\n\nCopyright (c) 2004-2020 Holger Krekel and others\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\nSoftware: googletest 1.8.1\nCopyright notice: \nCopyright 2009, Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2007 Google Inc.\nCopyright 2007, Google Inc.\nCopyright 2013, Google Inc.\nCopyright 2015, Google Inc.\nCopyright 2005, Google Inc.\nCopyright 2008 Google Inc.\nCopyright 2006, Google Inc.\nCopyright 2009 Google Inc. All Rights Reserved.\nCopyright 2013 Google Inc. All Rights Reserved.\nCopyright 2017 Google Inc.\nCopyright 2007 Neal Norwitz\nCopyright 2008 Google Inc.  All Rights Reserved.\nCopyright 2009 Neal Norwitz All Rights Reserved.\nCopyright 2003 Google Inc.\nCopyright 2009 Google Inc.\nCopyright 2008 Google Inc. All Rights Reserved.\nCopyright [2007] Neal Norwitz\nPortions Copyright [2007] Google Inc.\nCopyright 2010 Google Inc.  All Rights Reserved.\nCopyright 2010, Google Inc.\nCopyright 2005 Google Inc. All Rights Reserved.\nCopyright 2018, Google Inc.\nCopyright 2003, Google Inc.\nCopyright 2009 Google Inc. All rights reserved.\nCopyright 2015 Google Inc. All rights reserved.\nCopyright 2009 Google Inc.  All rights reserved.\nCopyright 2018 Google LLC. All rights reserved.\nCopyright 2018, Google LLC.\n\n\nLicense: BSD 3-Clause License\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nSoftware: glog 0.4.0\nCopyright notice:\nCopyright (c) 1999, Google Inc.\nCopyright (c) 2007, Google Inc.\nCopyright (c) 2006, Google Inc.\nCopyright (c) 2003, Google Inc.\nCopyright (c) 1999, 2007, Google Inc.\nCopyright (c) 2008, Google Inc.\nCopyright (c) 2009, Google Inc.\nCopyright (c) 2002, Google Inc.\nCopyright (c) 2000 - 2007, Google Inc.\nCopyright (c) 2005 - 2007, Google Inc.\nCopyright (c) 2004, Google Inc.\nCopyright (c) 2003-2008, Jouni Malinen <j@w1.fi> and contributors\n\n\nLicense: BSD 3-Clause License\nCopyright (c) 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nA function gettimeofday in utilities.cc is based on\n\nhttp://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd\n\nThe license of this code is:\n\nCopyright (c) 2003-2008, Jouni Malinen <j@w1.fi> and contributors\nAll Rights Reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n   notice, this list of conditions and the following disclaimer in the\n   documentation and/or other materials provided with the distribution.\n\n3. Neither the name(s) of the above-listed copyright holder(s) nor the\n   names of its contributors may be used to endorse or promote products\n   derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nSoftware: pybind11 2.4.3\nCopyright notice:\nCopyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2016 Trent Houliston <trent@houliston.me> and Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>\nCopyright (c) 2016 Klemens Morgenstern <klemens.morgenstern@ed-chemnitz.de> and\nCopyright (c) 2017 Henry F. Schreiner\nCopyright (c) 2016 Sergey Lyskov and Wenzel Jakob\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.\nCopyright (c) 2016 Jason Rhinelander <jason@imaginary.ca>\nCopyright (c) 2019 Google LLC\nCopyright (c) 2019 Google Inc.\nCopyright (c) 2016 Ben North <ben@redfrontdoor.org>\nCopyright (c) 2016 Klemens D. Morgenstern\nCopyright (c) 2016 Pim Schellart <P.Schellart@princeton.edu>\nCopyright (c) 2017 Borja Zarco (Google LLC) <bzarco@google.com>\nCopyright (c) 2016 Ivan Smirnov <i.s.smirnov@gmail.com>\nCopyright (c) 2016 Ivan Smirnov\nCopyright (c) 2016 Sergey Lyskov\nCopyright (c) 2018 Hudson River Trading LLC <opensource@hudson-trading.com>\nCopyright (c) 2019 Roland Dreier <roland.dreier@gmail.com>\nCopyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>\nCopyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>\nCopyright (c) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright 2001-2009 Kitware, Inc.\nCopyright 2012 Continuum Analytics, Inc.\nCopyright (c) 2007-2012 University of Illinois at Urbana-Champaign.\n\nLicense:BSD 3-Clause License\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n   may be used to endorse or promote products derived from this software\n   without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n\nSoftware: pybind11 2.6.1\nCopyright notice:\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.\nCopyright (c) 2016 Ben North <ben@redfrontdoor.org>\nCopyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright 2012 Continuum Analytics, Inc.\nCopyright 2001-2009 Kitware, Inc.\nCopyright (c) 2016 Ivan Smirnov <i.s.smirnov@gmail.com>\nCopyright (c) 2017 Borja Zarco (Google LLC) <bzarco@google.com>\ncopyright = \"2017, Wenzel Jakob\"\nCopyright (c) 2016 Jason Rhinelander <jason@imaginary.ca>\nCopyright (c) 2016 Trent Houliston <trent@houliston.me> and Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>\nCopyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>\nCopyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>\nCopyright (c) 2016 Klemens Morgenstern <klemens.morgenstern@ed-chemnitz.de> and Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2020 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2019 Google Inc.\nCopyright (c) 2019 Roland Dreier <roland.dreier@gmail.com>\nCopyright (c) 2018 Hudson River Trading LLC <opensource@hudson-trading.com>\nCopyright (c) 2019 Google LLC\nCopyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>\nCopyright (c) 2016 Sergey Lyskov and Wenzel Jakob\nCopyright (c) 2016 Ivan Smirnov\nCopyright (c) 2016 Klemens D. Morgenstern\nCopyright (c) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (c) 2016 Pim Schellart <P.Schellart@princeton.edu>\nCopyright (c) 2020 Wenzel Jakob <wenzel@inf.ethz.ch> and Henry Schreiner\nCopyright (c) 2016 Sergey Lyskov\nCopyright (c) 2017 Henry F. Schreiner\n\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n   may be used to endorse or promote products derived from this software\n   without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n\nSoftware: google/protobuf 3.13.0\nCopyright 2008 Google Inc. All rights reserved.\nCopyright 2008 Google Inc. All rights reserved.\nCopyright 2007-2010 Baptiste Lepilleur Distributed under MIT license, or public domain if desired and recognized in your jurisdiction.\nCopyright 2007 Google Inc. All Rights Reserved.\nCopyright 2012 Google Inc. All rights reserved.\nCopyright 2014 Google Inc. All rights reserved.\nCopyright 2019 Google Inc. All rights reserved.\nCopyright 2008 Google Inc. All Rights Reserved.\ncopyright = u\"2008, Google LLC\"\nCopyright 2017 Google Inc. All rights reserved.\nCopyright 2008 Google Inc.\nCopyright 2015 Google Inc. All rights reserved.\nCopyright 2019 Google Inc. All rights reserved.\nCopyright (c) 2006, Google Inc.\nCopyright (c) 2007-2010 Baptiste Lepilleur\nCopyright 2017 Google Inc. All rights reserved.\nCopyright 2015 Google Inc. All rights reserved.\nCopyright 2018 Google Inc. All rights reserved.\nCopyright 2009 Google Inc. All rights reserved.\nCopyright 2007-2011 Baptiste Lepilleur Distributed under MIT license, or public domain if desired and recognized in your jurisdiction.\nCopyright 2011 Baptiste Lepilleur Distributed under MIT license, or public domain if desired and recognized in your jurisdiction.\nCopyright 2015, Google Inc.\nCopyright 2019 Google LLC. All rights reserved.\nCopyright 2016 Google Inc. All rights reserved.\nCopyright 2005 Google Inc.\nCopyright 2016 Google Inc. All rights reserved.\n\n\nLicense: BSD 3-Clause License\nCopyright 2008 Google Inc.  All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nCode generated by the Protocol Buffer compiler is owned by the owner\nof the input file used when generating it.  This code is not\nstandalone and requires a support library to be linked with it.  This\nsupport library is itself covered by the above license.\n\nSoftware: libevent 2.1.12\nCopyright notice:\nCopyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.\nCOPYRIGHT AND PERMISSION NOTICE\nCopyright (c) 1996 - 2013, Daniel Stenberg, <daniel@haxx.se>.\nCopyright (C) 2012, iSEC Partners.\nCopyright (c) 1987, 1993, 1994, 1995\nCopyright (c) 1987, 1993, 1994, 1996\nCopyright 2002 Niels Provos <provos@citi.umich.edu>\nCopyright (c) 2007-2012 Niels Provos and Nick Mathewson\nCopyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>\nCopyright (c) 2007-2012 Niels Provos, Nick Mathewson\nCopyright (c) 2009-2012 Niels Provos and Nick Mathewson\nCopyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>\nCopyright (c) 2008-2012 Niels Provos and Nick Mathewson\nCopyright (c) 1991, 1993\nCopyright (c) 2009, Michihiro NAKAJIMA\nCopyright 2000-2013 Kitware, Inc.\nCopyright 2000-2011 Insight Software Consortium\nnotices of original copyright by their contributors; see each source\nCopyright (C) 1996-2018 Free Software Foundation, Inc.\nCopyright (c) 2010 Chris Davis, Niels Provos, and Nick Mathewson\nCopyright (c) 2010-2012 Niels Provos and Nick Mathewson\nCopyright (c) 1996, David Mazieres <dm@uun.org>\nCopyright (c) 2008, Damien Miller <djm@openbsd.org>\nCopyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>\nCopyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>\nCopyright (c) 2009-2012 Niels Provos, Nick Mathewson\nCopyright 2000-2009 Niels Provos <provos@citi.umich.edu>\nCopyright 2009-2012 Niels Provos and Nick Mathewson\nCopyright 2000-2007 Niels Provos <provos@citi.umich.edu>\nCopyright 2007-2012 Niels Provos, Nick Mathewson\nCopyright 2003-2009 Niels Provos <provos@citi.umich.edu>\nCopyright 2006-2007 Niels Provos\nCopyright 2007-2012 Nick Mathewson and Niels Provos\nCopyright (c) 2005-2007 Niels Provos <provos@citi.umich.edu>\nCopyright (c) 2003-2009 Niels Provos <provos@citi.umich.edu>\nCopyright 2007-2012 Niels Provos and Nick Mathewson\nCopyright (c) 2007 Sun Microsystems. All rights reserved.\nCopyright (c) 2008-2012 Niels Provos, Nick Mathewson\nCopyright 2002 Christopher Clark\nCopyright 2005-2012 Nick Mathewson\nCopyright 2001-2007 Niels Provos <provos@citi.umich.edu>\nCopyright (c) 2012 Niels Provos and Nick Mathewson\nCopyright (c) 2000 Dug Song <dugsong@monkey.org>\nCopyright (c) 1993 The Regents of the University of California.\nCopyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>\nCopyright (c) 2003 Michael A. Davis <mike@datanerds.net>\nCopyright (c) 2007 Sun Microsystems\nCopyright (c) 2002 Christopher Clark\nCopyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>\nCopyright (c) 2010 BitTorrent, Inc.\nCopyright (c) 2005-2012 Niels Provos and Nick Mathewson\nCopyright (c) 1993\nCopyright 2003 Michael A. Davis <mike@datanerds.net>\nCopyright 2003-2007 Niels Provos <provos@citi.umich.edu>\nCopyright 2008-2012 Niels Provos and Nick Mathewson\nCopyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>\nCopyright (c) 2013 Niels Provos and Nick Mathewson\nCopyright (c) 2009-2012 Nick Mathewson and Niels Provos\nCopyright (c) 2007-2013 Niels Provos and Nick Mathewson\nCopyright (c) 2012 Ross Lagerwall <rosslagerwall@gmail.com>\ntinytest.c -- Copyright 2009-2012 Nick Mathewson\ntinytest.h -- Copyright 2009-2012 Nick Mathewson\ntinytestmacros.h -- Copyright 2009-2012 Nick Mathewson\n\nLibevent is available for use under the following license, commonly known\nas the 3-clause (or \"modified\") BSD license:\n\n==============================\nCopyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>\nCopyright (c) 2007-2012 Niels Provos and Nick Mathewson\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n1. Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n2. Redistributions in binary form must reproduce the above copyright\n   notice, this list of conditions and the following disclaimer in the\n   documentation and/or other materials provided with the distribution.\n3. The name of the author may not be used to endorse or promote products\n   derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\nIMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\nOF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\nIN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\nNOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\nTHIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n==============================\n\nPortions of Libevent are based on works by others, also made available by\nthem under the three-clause BSD license above.  The copyright notices are\navailable in the corresponding source files; the license is as above.  Here's\na list:\n\nlog.c:\n   Copyright (c) 2000 Dug Song <dugsong@monkey.org>\n   Copyright (c) 1993 The Regents of the University of California.\n\nstrlcpy.c:\n   Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>\n\nwin32select.c:\n   Copyright (c) 2003 Michael A. Davis <mike@datanerds.net>\n\nevport.c:\n   Copyright (c) 2007 Sun Microsystems\n\nht-internal.h:\n   Copyright (c) 2002 Christopher Clark\n\nminheap-internal.h:\n   Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>\n\n==============================\n\nThe arc4module is available under the following, sometimes called the\n\"OpenBSD\" license:\n\n   Copyright (c) 1996, David Mazieres <dm@uun.org>\n   Copyright (c) 2008, Damien Miller <djm@openbsd.org>\n\n   Permission to use, copy, modify, and distribute this software for any\n   purpose with or without fee is hereby granted, provided that the above\n   copyright notice and this permission notice appear in all copies.\n\n   THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n   WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n   MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n   ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n==============================\n\nThe Windows timer code is based on code from libutp, which is\ndistributed under this license, sometimes called the \"MIT\" license.\n\n\nCopyright (c) 2010 BitTorrent, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n==============================\n\nThe wepoll module is available under the following, sometimes called the\n\"FreeBSD\" license:\n\nCopyright 2012-2020, Bert Belder <bertbelder@gmail.com>\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n  * Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright\n    notice, this list of conditions and the following disclaimer in the\n    documentation and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n==============================\n\nThe ssl-client-mbedtls.c is available under the following license:\n\nCopyright (C) 2006-2015, ARM Limited, All Rights Reserved\nSPDX-License-Identifier: Apache-2.0\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may\nnot use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\nWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nThis file is part of mbed TLS (https://tls.mbed.org)\n\nSoftware: grpc 1.36.1\nCopyright notice: \nCopyright 2015 The gRPC Authors\nCopyright 2016 The gRPC Authors\nCopyright 2018 The gRPC Authors\nCopyright 2019 The gRPC Authors\nCopyright 2018 The gRPC Authors\nCopyright © 2018 gRPC.\nCopyright 2016 gRPC authors.\nCopyright 2017 gRPC authors.\nCopyright 2019 gRPC authors.\nCopyright (C) 1995, 1996, 1997, and 1998 WIDE Project.\nCopyright (C) 2009 - 2013 by Daniel Stenberg et al\nCopyright (c) 2004, 2006-2010 Michael Roth <mroth@nessie.de>\nCopyright (c) 2004-2009 Michael Roth <mroth@nessie.de>\nCopyright (c) 2004-2010 Michael Roth <mroth@nessie.de>\nCopyright (c) 2006-2008 Michael Roth <mroth@nessie.de>\nCopyright (c) 2009-2011, Google Inc.\nCopyright (c) 2018, Google Inc.\nCopyright 2007 Google Inc. All Rights Reserved.\nCopyright 2008 Google Inc.\nCopyright 2013 Google Inc.\nCopyright 2014 Google Inc.\nCopyright 2014 gRPC authors.\nCopyright 2014, Google Inc.\nCopyright 2015 The gRPC Authors\nCopyright 2015 gRPC authors.\nCopyright 2015, Google Inc.\nCopyright 2015-2016 gRPC authors.\nCopyright 2015-2017 gRPC authors.\nCopyright 2016 Google Inc.\nCopyright 2016 The Chromium Authors.\nCopyright 2016 gRPC authors.\nCopyright 2016, Google Inc.\nCopyright 2017 The gRPC Authors\nCopyright 2017 gRPC authors.\nCopyright 2018 The Bazel Authors.\nCopyright 2018 The gRPC Authors\nCopyright 2018 The gRPC Authors.\nCopyright 2018 gRPC Authors.\nCopyright 2018 gRPC authors.\nCopyright 2018, gRPC Authors\nCopyright 2019 Istio Authors. All Rights Reserved.\nCopyright 2019 The Bazel Authors.\nCopyright 2019 The gRPC Authors\nCopyright 2019 The gRPC Authors.\nCopyright 2019 The gRPC authors.\nCopyright 2019 gRPC authors.\nCopyright 2019 the gRPC authors.\nCopyright 2019, Google Inc.\nCopyright 2020 The gRPC Authors\nCopyright 2020 The gRPC Authors.\nCopyright 2020 The gRPC authors.\nCopyright 2020 gRPC authors.\nCopyright 2020 the gRPC authors.\nCopyright 2020 王一 Wang Yi <godspeedchina@yeah.net>\nCopyright 2021 The gRPC Authors\nCopyright 2021 The gRPC authors.\nCopyright 2021 gRPC authors.\nCopyright 2021 the gRPC authors.\nCopyright 2015 The gRPC Authors\nCopyright 2017 The gRPC Authors\nCopyright 2015 gRPC authors.\nCopyright 2016 gRPC authors.\nCopyright 2020 The gRPC Authors\n\nSoftware: cmake-modules cf2e087039f81d13e687cf6c2b1b382b9c1e756f\nCopyright notice:\nCopyright 2009 Kitware, Inc.\nCopyright 2009 Will Dicharry <wdicharry@stellarscience.com>\nCopyright 2005-2009 Kitware, Inc.\nCopyright Iowa State University 2009-2010.\nCopyright 2006-2009 Kitware, Inc.\nCopyright 2006-2008 Andreas Schneider <mail@cynapses.org>\nCopyright 2007      Wengo\nCopyright 2007      Mike Jackson\nCopyright 2008      Andreas Pakulat <apaku@gmx.de>\nCopyright 2008-2010 Philip Lowman <philip@yhbt.com>\nCopyright 2009 Alexander Neundorf <neundorf@kde.org>\nCopyright (c) 2012 - 2017, Lars Bilke\nCopyright (c) 2012-2016 Sascha Kratky\nCopyright 2012-2018 Sascha Kratky\nCopyright (c) 2012-2018, OpenGeoSys Community (http://www.opengeosys.org)\nCopyright (c) 2012 - 2015, Lars Bilke\nCopyright 2008-2009 Philip Lowman <philip@yhbt.com>\nCopyright 2010      Iowa State University (Ryan Pavlik <abiryan@ryand.net>)\nCopyright 2000-2009 Kitware, Inc., Insight Software Consortium\nCopyright 2010-2011 Kitware, Inc.\nCopyright Iowa State University 2009-2011\n\nBoost Software License - Version 1.0 - August 17th, 2003\n\nPermission is hereby granted, free of charge, to any person or organization\nobtaining a copy of the software and accompanying documentation covered by\nthis license (the \"Software\") to use, reproduce, display, distribute,\nexecute, and transmit the Software, and to prepare derivative works of the\nSoftware, and to permit third-parties to whom the Software is furnished to\ndo so, all subject to the following:\n\nThe copyright notices in the Software and this entire statement, including\nthe above license grant, this restriction and the following disclaimer,\nmust be included in all copies of the Software, in whole or in part, and\nall derivative works of the Software, unless such copies or derivative\nworks are solely in the form of machine-executable object code generated by\na source language processor.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT\nSHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE\nFOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\nSoftware: abseil-cpp 20200923.3\nCopyright notice:\nCopyright 2016 Google Inc. All Rights Reserved.\nCopyright 2017 Google Inc. All Rights Reserved.\nCopyright 2017 The Abseil Authors.\nCopyright 2018 The Abseil Authors.\nCopyright 2019 The Abseil Authors.\nCopyright 2020 The Abseil Authors.\n\nApache License\n                           Version 2.0, January 2004\n                        https://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       https://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n\nSoftware: c-ares 1.15.0\nCopyright notice:\nCopyright (c) 2012 Dan Winship\nCopyright (C) 2005 by Dominick Meglio\nCopyright (C) 2009-2013 by Daniel Stenberg\nCopyright (C) 2003-2018 Free Software Foundation, Inc.\nCopyright (c) 2011 Daniel Stenberg <daniel@haxx.se>\nCopyright (c) 1996-1999 by Internet Software Consortium.\nCopyright (C) 2005, 2013 by Dominick Meglio\nCopyright (C) 2017 by John Schember <john@nachtimwald.com>\nCopyright (C) 2008-2013 by Daniel Stenberg\nCopyright 2004 by Daniel Stenberg\ndefine ARESCOPYRIGHT \"2004 - 2017 Daniel Stenberg, <daniel@haxx.se>.\"\nCopyright (c) 1996,1999 by Internet Software Consortium.\nCopyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>\nCopyright (C) 2004-2005, 2007-2008, 2011-2015 Free Software Foundation, Inc.\nCopyright (C) 1996-2018 Free Software Foundation, Inc.\nCopyright (C) 2005 - 2010, Daniel Stenberg\nCopyright (c) 2012 Philip Withnall\nCopyright (C) 2004 - 2012 by Daniel Stenberg et al\nCopyright (C) 2004-2010 by Daniel Stenberg.\nCopyright (C) 2004-2009 by Daniel Stenberg.\nCopyright (C) 2004-2009 by Daniel Stenberg\nCopyright 2003 Google Inc.\nCopyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>\nCopyright 2005 by Dominick Meglio.\nCopyright (C) 2017 by John Schember\nCopyright (C) 2010 Jeremy Lal <kapouer@melix.org>\nCopyright (C) 2009 by Jakub Hrozek <jhrozek@redhat.com>\nCopyright (c) 2012 Christian Persch\nCopyright (C) 2013 by Daniel Stenberg\nCopyright 2005, Google Inc.\nCopyright 2013, Google Inc.\nCopyright (C) 2011 Free Software Foundation, Inc.\nCopyright (C) 2018 by John Schember <john@nachtimwald.com>\nCopyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>\nCopyright 2005 by Dominick Meglio\nCopyright (C) 2004-2005, 2007, 2009, 2011-2015 Free Software Foundation, Inc.\nCopyright (c) 2012 Paolo Borelli\nCopyright (C) 2009 by Daniel Stenberg et al\nCopyright (C) 1996-2001, 2003-2015 Free Software Foundation, Inc.\nCopyright 1998, 2000 by the Massachusetts Institute of Technology.\nCopyright (C) 2004-2005, 2007-2009, 2011-2015 Free Software Foundation, Inc.\nCopyright (C) 2009-2018 Free Software Foundation, Inc.\nCopyright 2005 by Dominick Meglio.\nCopyright (c) 2013 Daniel Stenberg <daniel@haxx.se>\nCopyright (C) 1994 X Consortium\nCopyright 2008 Google Inc.\nCopyright (C) 1999-2018 Free Software Foundation, Inc.\nCopyright 1998, 2011, 2013 by the Massachusetts Institute of Technology.\nCopyright 1998 by the Massachusetts Institute of Technology.\nCopyright (C) 2004-2010 by Daniel Stenberg\nCopyright (c) 2004 by Internet Systems Consortium, Inc. (\"ISC\")\nCopyright 1992-2018 Free Software Foundation, Inc.\nCopyright 2015, Google Inc.\nCopyright (C) 2004-2018 Free Software Foundation, Inc.\nCopyright (C) 2012 Free Software Foundation, Inc.\nCopyright (C) 2010-2012 by Daniel Stenberg\nCopyright (c) 2012 Xan Lopez\n- aresversion.h: copyright end range year is now 2013\nCopyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>\nCopyright 2000 by the Massachusetts Institute of Technology.\nCopyright (C) 2009 - 2013 by Daniel Stenberg et al\nCopyright (C) 2004 by Daniel Stenberg et al\nCopyright (C) 2006-2018 Free Software Foundation, Inc.\nCopyright (C) 2008 - 2013 by Daniel Stenberg et al\nCopyright 2006, Google Inc.\nCopyright (C) 2009-2016 by Daniel Stenberg\nCopyright (C) 2004-2009 by Daniel Stenberg\nCopyright (C) 2004-2010 by Daniel Stenberg\nCopyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS file.\nCopyright 2010 by Ben Greear <greearb@candelatech.com>\nCopyright 2007, Google Inc.\nCopyright (C) 1997-2018 Free Software Foundation, Inc.\nCopyright 1998, 2011 by the Massachusetts Institute of Technology.\nCopyright 1998, 2000 by the Massachusetts Institute of Technology.\nCopyright (C) 2004-2011 by Daniel Stenberg\nCopyright (C) 2008 - 2009 by Daniel Stenberg et al\nCopyright (C) 2005-2013 by Daniel Stenberg et al\nCopyright 2008, Google Inc.\nCopyright (C) 2004-2017 by Daniel Stenberg\nCopyright (C) 2016 by Daniel Stenberg\nCopyright (C) 2010-2013 by Daniel Stenberg\nCopyright (c) 1987-2001 The Regents of the University of California.\nCopyright (C) 2008 - 2012 by Daniel Stenberg et al\nCopyright (C) 2009-2013 by Daniel Stenberg et al\nCopyright (c) 2015 Bastien ROUCARIES\nCopyright 2000 by the Massachusetts Institute of Technology.\nCopyright 2005 Dominick Meglio\nCopyright 1998 by Daniel Stenberg\nCopyright (c) 2011 Daniel Richard G. <skunk@iSKUNK.ORG>\nCopyright (C) 2001-2018 Free Software Foundation, Inc.\nCopyright (C) 2004 - 2011 by Daniel Stenberg et al\nCopyright (C) 2004 - 2013 by Daniel Stenberg et al\nCopyright (C) 2012 Marko Kreen <markokr@gmail.com>\nCopyright (C) 2008-2010 by Daniel Stenberg\nCopyright (C) 2008 by Daniel Stenberg et al\nCopyright 1998 by the Massachusetts Institute of Technology.\nCopyright (C) 2008-2010 by Daniel Stenberg\nCopyright (C) 2017 by John Schember\nCopyright (C) 2004, 2011-2015 Free Software Foundation, Inc.\nCopyright (C) 2002-2018 Free Software Foundation, Inc.\nCopyright (C) 2007-2013 by Daniel Stenberg\nCopyright 2009 Google Inc.\nCopyright (C) 1994-2018 Free Software Foundation, Inc.\nCopyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.\nCopyright (C) 2014 Free Software Foundation, Inc.\nCopyright (c) 2012 Zack Weinberg <zackw@panix.com>\n\nCopyright 1998 by the Massachusetts Institute of Technology.\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted, provided that\nthe above copyright notice appear in all copies and that both that copyright\nnotice and this permission notice appear in supporting documentation, and that\nthe name of M.I.T. not be used in advertising or publicity pertaining to\ndistribution of the software without specific, written prior permission.\nM.I.T. makes no representations about the suitability of this software for any\npurpose.  It is provided \"as is\" without express or implied warranty.\n\nSoftware: numpy 1.17.0\nCopyright notice:\nCopyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu\nCopyright 2014 Melissa O'Neill <oneill@pcg-random.org>\nCopyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant All rights reserved.\nCopyright 1999-2004 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee>\nCopyright (c) 2009-2019: Jeff Bezanson, Stefan Karpinski, Viral B. Shah, and other contributors:\nCopyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org)\ncopyright = '2008-2019, The SciPy community'\nCopyright 2002 Pearu Peterson all rights reserved, Pearu Peterson <pearu@cens.ioc.ee>\nCopyright (c) 2019 NumPy Developers\nCopyright (c) 2005-2017, NumPy Developers.\nCopyright (c) 2018 Melissa E. O'Neill\nCopyright (c) 2008 Ian Bicking and Contributors\nCopyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation.  All rights reserved.\nCopyright 1999, 2000, 2001 Regents of the University of California.\nCopyright (c) 2005, NumPy Developers\nCopyright (c) 2019 Kevin Sheppard. All rights reserved.\nCopyright 2010-2012, D. E. Shaw Research.\nCopyright (c) 2011 by Mark Wiebe (mwwiebe@gmail.com)\nCopyright 2006, Dean Edwards\nCopyright (c) 2014 Ryan Juckett\nCopyright 2001-2005 Pearu Peterson all rights reserved, Pearu Peterson <pearu@cens.ioc.ee>\ncopyright = u'2017-2018, NumPy Developers'\nCopyright (C) 2004-2018 Max-Planck-Society \\author Martin Reinecke\nCopyright (c) 2010-2011 by Mark Wiebe (mwwiebe@gmail.com)\nCopyright (c) 2012 Stephen Montgomery-Smith <stephen@FreeBSD.ORG>\nCopyright (c) 2005-2019, NumPy Developers.\nCopyright (c) 2011 Enthought, Inc\nCopyright (c) 2007, 2011 David Schultz <das@FreeBSD.ORG>\nCopyright (c) 2000-2013 The University of California Berkeley. All rights reserved.\nCopyright (c) 2015 Pauli Virtanen All rights reserved.\nCopyright 1999,2000 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee>\nCopyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved.\nCopyright (c) 2005-2015, NumPy Developers.\nCopyright (c) 2011 by Enthought, Inc.\nCopyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com)\nCopyright 2000 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee>\nf90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x  f36t87) Version 2.3 Wed Apr 19, 2006  13:05:16\nCopyright (c) 2006-2013 The University of Colorado Denver.  All rights reserved.\nf90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0\nCopyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.\nCopyright 1999--2011 Pearu Peterson all rights reserved, Pearu Peterson <pearu@cens.ioc.ee>\nCopyright 1999 - 2011 Pearu Peterson all rights reserved.\nCopyright 2015 Robert Kern <robert.kern@gmail.com>\nCopyright (c) 2015 Melissa E. O'Neill\n\nCopyright (c) 2005-2019, NumPy Developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\n       notice, this list of conditions and the following disclaimer.\n\n    * Redistributions in binary form must reproduce the above\n       copyright notice, this list of conditions and the following\n       disclaimer in the documentation and/or other materials provided\n       with the distribution.\n\n    * Neither the name of the NumPy Developers nor the names of any\n       contributors may be used to endorse or promote products derived\n       from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nSoftware: Python 3.7.5\nCopyright notice:\nCopyright (c) 1999-2000 by Secret Labs AB\nCopyright (C) 2005-2007   Gregory P. Smith (greg@krypto.org)\nCopyright (c) 2003.\n.  Copyright (C) 2005-2010   Gregory P. Smith (greg@krypto.org)\nCopyright 1996,1997 by Oliver Andrich, Koblenz, Germany.\nCopyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.  All rights reserved.\nCopyright 1994 by Lance Ellinghouse Cathedral City, California Republic, United States of America.\nCopyright (C) 2001 Python Software Foundation Barry Warsaw <barry@python.org>, 2000.\nlibffi 2.00-beta - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (c) 2008-2012 Stefan Krah. All rights reserved.\n2001-07-01 fl   added BIGCHARSET support (from Martin von Loewis)\n``'Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam'``\nCopyright (C) 2003 Python Software Foundation\nCopyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.\nCopyright 1995-1997, Automatrix, Inc., all rights reserved.\nCopyright (c) 2002 MyCompanyName. All rights reserved.\n<string>%version%, (c) 2001-2019 Python Software Foundation.</string>\nCopyright (c) 2004 by Peter Astrand <astrand@lysator.liu.se>\nCopyright (c) 1999-2002 by Fredrik Lundh.\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.\\n\\\nAIX ABI support (c) 2002 Free Software Foundation, Inc.\n( Copyright (c) 2011 Stefan Krah. All rights reserved. )\n2013-02-04 mrab added fullmatch primitive\n2003-10-17 gn   implemented non recursive scheme\n2003-04-18 mvl  fully support 4-byte codes\nCopyright (C) 1996-2018 Free Software Foundation, Inc.\nportions copyright 2001, Autonomous Zones Industries, Inc., all rights...\nCopyright (c) 1999-2002 by Secret Labs AB.\nCopyright (C) 1986 Gary S. Brown.  You may use this program, or code or tables extracted from it, as desired without restriction.\n-- Copyright (c) IBM Corporation, 2003, 2008.  All rights reserved.   --\n; Copyright (c) 2008-2016 Stefan Krah. All rights reserved.\nCopyright (c) 2001-2019 Python Software Foundation.\\n\\\nCopyright 2008 Armin Ronacher.\nCopyright © 2000 BeOpen.com. All rights reserved.\n(c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)\nCopyright (c) 2005-2006 ActiveState Software Inc.\nCopyright (C) 1994 Steen Lumholt.\nCopyright (c) 1999 by Fredrik Lundh.\nlibffi - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (c) 2006-2008 Alexander Chemeris\nCopyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>\nCopyright (c) 1999-2003 Steve Purcell\nDarwin ABI support (c) 2001 John Hornkvist\nCopyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)\nCopyright (c) 1999-2008 by Fredrik Lundh\ni.e., \"Copyright © 1995-2001 Corporation for National Research Initiatives; All Rights Reserved\" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee.  Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): \"Python 1.6.1\n-- Copyright (c) IBM Corporation, 2005, 2009.  All rights reserved.   --\nCopyright (c) 2001-2017 Expat maintainers\nCopyright (c) 2001-2012 Python Software Foundation. All Rights Reserved.\nCopyright (C) 2002-2006 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\n(c) 2002 Gregory P. Ward.  All Rights Reserved.\nCopyright (c) 2000 BeOpen.com.\\n\\\nCopyright (C) 2001-2007 Python Software Foundation Author: Ben Gertzfield, Barry Warsaw Contact: email-sig@python.org\nCopyright (C) 2003-2004 Federico Di Gregorio <fog@debian.org>\n2001-05-14 fl   fixes for 1.5.2 compatibility\nCopyright © 1995-2000 Corporation for National Research Initiatives. All rights reserved.\nCopyright (C) 1995, 1996, 1997, 1998, and 1999 WIDE Project.\ncopyright, i.e., \"Copyright © 2001-2019 Python Software Foundation; All Rights Reserved\" are retained in Python |release| alone or in any derivative version prepared by Licensee.\nCopyright (C) 2001-2007 Python Software Foundation Author: Barry Warsaw, Thomas Wouters, Anthony Baxter Contact: email-sig@python.org\nCopyright (C) 2005-2010   Gregory P. Smith (greg@krypto.org)\nCopyright 2001-2017 by Vinay Sajip. All Rights Reserved.\nCopyright (c) 2000 Doug White, 2006 James Knight, 2007 Christian Heimes All rights reserved.\nCopyright (C) 1999-2001 Gregory P. Ward.\nCopyright (c) 1999-2002 by Fredrik Lundh\n+   Copyright 2007 Python Software Foundation.\nelse if (config == (void )2000 && (c) == 0x9B1D) {                 \\\nCopyright (c) 1999-2002 by Secret Labs AB\n2002-11-09 fl   fixed empty sub/subn return type\nCopyright 2009 Gabriel A. Genellina\nCopyright (c) 2003-2009 by Fredrik Lundh.  All rights reserved.\nCopyright 2004-2005 Elemental Security, Inc. All Rights Reserved.\n(c) Copyright Guido van Rossum, 2000.\nCopyright (C) 1995, 1996, 1997, and 1998 WIDE Project.\nCopyright (C) 2011-2012 Vinay Sajip.\nCopyright 2006 Google, Inc. All Rights Reserved.\n(c) Copyright Marc-Andre Lemburg, 2005.\nCopyright (C) YEAR ORGANIZATION FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\nCopyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com\nCopyright (c) 1995-2000, Corporation for National Research Initiatives.\nCopyright (c) 1999 Toby Dickenson\nCopyright (C) 2001,2002 Python Software Foundation csv package unit tests\nCopyright (C) 2005, 2006 Martin von Löwis Licensed to PSF under a Contributor Agreement.\nCopyright (c) 1997 by Fredrik Lundh\nCopyright (c) 2002-2006 Python Software Foundation.  All rights reserved.\nCopyright (c) 2002  Roger Sayle\nCopyright 1995-1996 by Fred L. Drake, Jr. and Virginia Polytechnic Institute and State University, Blacksburg, Virginia, USA.\ntypes.c - Copyright (c) 1996, 1998  Red Hat, Inc.\nCopyright 2000, Mojam Media, Inc., all rights reserved.\nCopyright (C) 1994 X Consortium\nCopyright (C) 2002-2004 Python Software Foundation\nCopyright (C) 2004-2006 Python Software Foundation Authors: Baxter, Wouters and Warsaw Contact: email-sig@python.org\nCopyright (c) 2002 Jorge Acereda  <jacereda@users.sourceforge.net> &\ndarwin.S - Copyright (c) 1996, 1998, 2001, 2002, 2003  Red Hat, Inc.\nCopyright © 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands.  All rights reserved.\n\"Copyright 1995-1996 by Virginia Polytechnic Institute & State\\n\\\nCopyright 2012, Samuel Neves <sneves@dei.uc.pt>.  You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option.  The terms of these licenses can be found at:\nCopyright (C) 1995-2011 Jean-loup Gailly and Mark Adler\nx86-ffitarget.h - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.\ndarwin64.S - Copyright (c) 2006 Free Software Foundation, Inc.\nCopyright (C) 2001-2006 Python Software Foundation Author: Ben Gertzfield Contact: email-sig@python.org\nCopyright (c) 2005 Don Owens All rights reserved.\nwin32.S - Copyright (c) 1996, 1998, 2001, 2002  Red Hat, Inc.\n(c) Copyright 2005, Marc-Andre Lemburg (mal@lemburg.com).\nlibrary/xml.etree.elementtree,,:include,  Copyright (c) <xi:include href=\"year.txt\" parse=\"text\" />.\nffi.c - Copyright (c) 1998 Geoffrey Keating\nCopyright 2006 Georg Brandl.\nCopyright (C) 2005-2010 Gerhard Häring <gh@ghaering.de>\n(c) 2013-2017 Christian Heimes <christian@python.org>\nCopyright 1992-2018 Free Software Foundation, Inc.\nCopyright (c) 1990-1995, Stichting Mathematisch Centrum.\nCopyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019 Python Software Foundation;\n2001-10-24 fl   added finditer primitive (for 2.2 only)\nCopyright (C) 2001-2006 Python Software Foundation Author: Keith Dart Contact: email-sig@python.org\nCopyright (c) 1999-2009 by Fredrik Lundh.\n(c) 2000 Peter Bosch.  All Rights Reserved.\nffitarget.h - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (C) 2012 Free Software Foundation, Inc.\nCopyright (C) 2002-2007 Python Software Foundation Author: Ben Gertzfield Contact: email-sig@python.org\nCopyright 1994 by Lance Ellinghouse, Cathedral City, California Republic, United States of America.\nCopyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.\nCopyright (C) 2006 - 2010  Gregor Lingl email: glingl@aon.at\nCopyright © 2001-2019 Python Software Foundation. All rights reserved.\nCopyright (c) 2009,2010 Zmanda Inc. <http://www.zmanda.com/>\nCopyright (c) 1998-2008 The OpenSSL Project.  All rights reserved.\nCopyright 1996 by Sam Rushing\nCopyright (c) 1998-2000 Thai Open Source Software Center Ltd and Clark Cooper\n(c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 /\nCopyright (c) 1999, 2000, 2001 Steve Purcell This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form.\n-- Copyright (c) IBM Corporation, 2005, 2008.  All rights reserved.   --\nCopyright (C) 2001-2012 Python Software Foundation. All Rights Reserved.\ndnl Copyright © 2004 Scott James Remnant <scott@netsplit.com>.\nCopyright (C) 2002-2007 Python Software Foundation Contact: email-sig@python.org\nCopyright (c) 1998-2001 by Secret Labs AB.  All rights reserved.\nCopyright (c) 2009,2010 Dustin J. Mitchell <dustin@zmanda.com>\nCopyright (c) 2002  Bo Thorsen\n2001-10-21 fl   added sub/subn primitive\ncopyright, i.e., \"Copyright © 2001-2018 Python Software Foundation; All Rights Reserved\" are retained in Python 3.7 alone or in any derivative version prepared by Licensee.\nCopyright (C) 2011-2013 Vinay Sajip.\nCopyright (c) 1991, 2000, 2001 by Lucent Technologies.\nCopyright (c) 2010 Python Software Foundation. All Rights Reserved.\nCopyright 1992-1994, David Gottner\n\" SRE 2.2.2 Copyright (c) 1997-2002 by Secret Labs AB \";\n;   Copyright (c) 2004, Outercurve Foundation.\nCopyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>\nCopyright (c) 1999 by Secret Labs AB\nlibffi PyOBJC - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (c) 1999-2009 by Fredrik Lundh\nCopyright 2007 Google, Inc. All Rights Reserved.\n-- Copyright (c) IBM Corporation, 2004, 2008.  All rights reserved.   --\nCopyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019 Python Software Foundation.  All rights reserved.\nCopyright (C) 2001-2007 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\nCopyright (c) 2000 BeOpen.com.  All rights reserved.\nCopyright © 1991-1995 Stichting Mathematisch Centrum. All rights reserved.\nCopyright (C) 2000  Luke Kenneth Casson Leighton <lkcl@samba.org>\n2001-12-07 fl   fixed memory leak in sub/subn (Guido van Rossum)\n-- Copyright (c) IBM Corporation, 2001, 2008.  All rights reserved.   --\nVirginia, USA.  Portions copyright 1991-1995 by Stichting Mathematisch\\n\\\nCopyright (c) 2004 Free Software Foundation, Inc.\nso portions are Copyright (C) 2001,2002 Python Software Foundation, and were written by Barry Warsaw.\nCopyright (C) 2004-2010 Gerhard Häring <gh@ghaering.de>\nCopyright (c) 2004 Python Software Foundation.\n(c) Copyright 2000 Guido van Rossum.\nCopyright 2007 Georg Brandl.\nCopyright (c) 1999 by Secret Labs AB.\nCopyright (c) 2002 Unicode, Inc.  All Rights reserved.\nCopyright 2009 Brian Quinlan. All Rights Reserved.\nCopyright (c) 2008-2009, Google Inc.\nCopyright (c) 2001-2006 Twisted Matrix Laboratories.\n(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c) 1995-2001 Corporation for National Research Initiatives; All Rights Reserved\" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee.  Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): \"Python 1.6.1 is made available subject to the terms and conditions in CNRI's License Agreement.  This Agreement together with Python 1.6.1 may be located on the Internet using the following unique, persistent identifier (known as a handle): 1895.22/1013.  This Agreement may also be obtained from a proxy server on the Internet\nCopyright (c) Corporation for National Research Initiatives.\nCopyright (c) 2008-2016 Stefan Krah. All rights reserved.\nCopyright 2001-2016 by Vinay Sajip. All Rights Reserved.\nif (config == (void )2000 && (c) == 0x20B9F) {                     \\\nppc-ffitarget.h - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (c) 2001-2006 Gregory P. Ward.  All rights reserved.\nCopyright (c) 1997-2001 by Secret Labs AB.  All rights reserved.\nCopyright 2000 by Timothy O'Malley <timo@alum.mit.edu>\nCopyright (C) 2007-2012 Michael Foord & the mock team E-mail: fuzzyman AT voidspace DOT org DOT uk\nCopyright (C) 2011-2014 Vinay Sajip.\nCopyright (c) 2002  Ranjit Mathew\nppc-darwin.h - Copyright (c) 2002, 2003, 2004, Free Software Foundation, Inc.\nppc64-darwinclosure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation, Inc. based on ppcclosure.S\nx86-ffi64.c - Copyright (c) 2002  Bo Thorsen <bo@suse.de>\nCopyright (C) 2001-2006 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\n﻿Copyright (c) 2004, Outercurve Foundation.\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.\\n\\\nCopyright 1999, Bioreason, Inc., all rights reserved.\n2001-10-20 fl   added split primitive; re-enable unicode for 1.6/2.0/2.1\nCopyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.\nCopyright (c) 2008 by Christian Heimes <christian@cheimes.de>\nCopyright 2001-2019 by Vinay Sajip. All Rights Reserved.\nCopyright (C) 2005 Martin v. Löwis Licensed to PSF under a contributor agreement.\nppc-darwin.S - Copyright (c) 2000 John Hornkvist\nCopyright (c) 2000, BeOpen.com.\nCopyright (C) 2001-2010 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\nCopyright (C) 2001-2007 Python Software Foundation Author: Anthony Baxter Contact: email-sig@python.org\nCopyright (c) 2004 by Fredrik Lundh <fredrik@pythonware.com>\nCopyright Disney Enterprises, Inc.  All Rights Reserved.\nffi.c - Copyright (c) 1996, 1998, 1999, 2001  Red Hat, Inc.\nCopyright (C) 2003-2013 Python Software Foundation\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands.  All rights reserved.\nCopyright (c) <xi:include href=\"year.txt\" parse=\"text\" />.\nprepcif.c - Copyright (c) 1996, 1998  Red Hat, Inc.\nself.assertEqual(list(c), list(range(2,2000)))\nCopyright (C) 2002-2007 Python Software Foundation Author: Ben Gertzfield, Barry Warsaw Contact: email-sig@python.org\n-- Copyright (c) IBM Corporation, 2000, 2008.  All rights reserved.   --\nCopyright (C) 2012   Christian Heimes (christian@python.org)\nfficommon.h - Copyright (c) 1996  Red Hat, Inc.\nCopyright (C) 2012-2016  Christian Heimes (christian@python.org)\nCopyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>\n(c) 2002 Python Software Foundation.  All Rights Reserved.\nCopyright (c) 2004 by Secret Labs AB, http://www.pythonware.com\nCopyright (c) 2004, Outercurve Foundation.\nCopyright (c) 2006-2008, R Oudkerk Licensed to PSF under a Contributor Agreement.\n.. Copyright 1995 Virginia Polytechnic Institute and State University and Fred L. Drake, Jr.  This copyright notice must be distributed on all copies, but this document otherwise may be distributed as part of the Python distribution.  No fee may be charged for this document in any representation, either on paper or electronically.  This restriction does not affect other elements in a distributed package in any way.\nCopyright 2012-2013 by Larry Hastings.\nCopyright (C) 2002-2006 Python Software Foundation Contact: email-sig@python.org email package unit tests for (optional) Asian codecs\nCopyright (c) 2002 Peter O'Gorman <ogorman@users.sourceforge.net>\nCopyright 2007 Google Inc.\nCopyright (c) 1999 by Fredrik Lundh\nCopyright (C) 2001-2010 Python Software Foundation Contact: email-sig@python.org email package unit tests\nCopyright (c) 2003-2004 by Fredrik Lundh.  All rights reserved.\nCopyright (c) 1991-1999 Unicode, Inc.  All Rights reserved.\nCopyright (c) 2000-2017 Expat development team Licensed under the MIT license:\nCopyright (c) 1997-2000 Thai Open Source Software Center Ltd\nCopyright (c) 1998 The Open Group\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands. All rights reserved.\\\n<string>%version%, (c) 2001-2016 Python Software Foundation.</string>\nCopyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com\nCopyright (C) 2005 Gerhard Häring <gh@ghaering.de>\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\nCopyright (c) 1996-2008  Red Hat, Inc and others.\nCopyright (C) 2005 Martin v. Löwis Licensed to PSF under a Contributor Agreement.\n<string>(c) 2001-2016 Python Software Foundation.</string>\n<string>%VERSION%, (c) 2001-2019 Python Software Foundation.</string>\nCopyright (C) 1997, 2002, 2003, 2007, 2008 Martin von Loewis\nCopyright (c) 2013  Marek Majkowski <marek@popcount.org>\nCopyright (c) 1999-2008 by Fredrik Lundh.  All rights reserved.\nCopyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved.\nCopyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd and Clark Cooper\n2001-04-15 fl   export copyright as Python attribute, not global 2001-04-28 fl   added copy methods (work in progress)\nCopyright (C) 2002, 2003 Python Software Foundation.\nCopyright (c) 2004, 2005, 2006 Python Software Foundation.\ndnl Copyright © 2012-2015 Dan Nicholson <dbn.lists@gmail.com>\nCopyright (c) 1999-2009 by Secret Labs AB.  All rights reserved.\nCopyright (c) 2003-2010 Python Software Foundation This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form.\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum.  All rights reserved.\n2001-10-18 fl   fixed group reset issue (from Matthew Mueller)\nCopyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.\n-- Copyright (c) IBM Corporation, 1981, 2008.  All rights reserved.   --\nCopyright (C) 2000  Bastian Kleineidam\nppc-darwinclosure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation, Inc. based on ppcclosure.S\nCopyright (c) 2001  John Beniton\nPortions copyright 1991-1995 by Stichting Mathematisch Centrum, Amsterdam, The Netherlands.  Copying is permitted under the terms associated with the main Python distribution, with the additional restriction that this additional notice be included and maintained on all distributed copies.\n\nA. HISTORY OF THE SOFTWARE\n==========================\n\nPython was created in the early 1990s by Guido van Rossum at Stichting\nMathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands\nas a successor of a language called ABC.  Guido remains Python's\nprincipal author, although it includes many contributions from others.\n\nIn 1995, Guido continued his work on Python at the Corporation for\nNational Research Initiatives (CNRI, see http://www.cnri.reston.va.us)\nin Reston, Virginia where he released several versions of the\nsoftware.\n\nIn May 2000, Guido and the Python core development team moved to\nBeOpen.com to form the BeOpen PythonLabs team.  In October of the same\nyear, the PythonLabs team moved to Digital Creations, which became\nZope Corporation.  In 2001, the Python Software Foundation (PSF, see\nhttps://www.python.org/psf/) was formed, a non-profit organization\ncreated specifically to own Python-related Intellectual Property.\nZope Corporation was a sponsoring member of the PSF.\n\nAll Python releases are Open Source (see http://www.opensource.org for\nthe Open Source Definition).  Historically, most, but not all, Python\nreleases have also been GPL-compatible; the table below summarizes\nthe various releases.\n\n    Release         Derived     Year        Owner       GPL-\n                    from                                compatible? (1)\n\n    0.9.0 thru 1.2              1991-1995   CWI         yes\n    1.3 thru 1.5.2  1.2         1995-1999   CNRI        yes\n    1.6             1.5.2       2000        CNRI        no\n    2.0             1.6         2000        BeOpen.com  no\n    1.6.1           1.6         2001        CNRI        yes (2)\n    2.1             2.0+1.6.1   2001        PSF         no\n    2.0.1           2.0+1.6.1   2001        PSF         yes\n    2.1.1           2.1+2.0.1   2001        PSF         yes\n    2.1.2           2.1.1       2002        PSF         yes\n    2.1.3           2.1.2       2002        PSF         yes\n    2.2 and above   2.1.1       2001-now    PSF         yes\n\nFootnotes:\n\n(1) GPL-compatible doesn't mean that we're distributing Python under\n    the GPL.  All Python licenses, unlike the GPL, let you distribute\n    a modified version without making your changes open source.  The\n    GPL-compatible licenses make it possible to combine Python with\n    other software that is released under the GPL; the others don't.\n\n(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,\n    because its license has a choice of law clause.  According to\n    CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1\n    is \"not incompatible\" with the GPL.\n\nThanks to the many outside volunteers who have worked under Guido's\ndirection to make these releases possible.\n\n\nB. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\n===============================================================\n\nPYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\n--------------------------------------------\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation\n(\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and\notherwise using this software (\"Python\") in source or binary form and\nits associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\ngrants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\nanalyze, test, perform and/or display publicly, prepare derivative works,\ndistribute, and otherwise use Python alone or in any derivative version,\nprovided, however, that PSF's License Agreement and PSF's notice of copyright,\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,\n2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019 Python Software Foundation;\nAll Rights Reserved\" are retained in Python alone or in any derivative version\nprepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python.\n\n4. PSF is making Python available to Licensee on an \"AS IS\"\nbasis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\nFOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any\nrelationship of agency, partnership, or joint venture between PSF and\nLicensee.  This License Agreement does not grant permission to use PSF\ntrademarks or trade name in a trademark sense to endorse or promote\nproducts or services of Licensee, or any third party.\n\n8. By copying, installing or otherwise using Python, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nBEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0\n-------------------------------------------\n\nBEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1\n\n1. This LICENSE AGREEMENT is between BeOpen.com (\"BeOpen\"), having an\noffice at 160 Saratoga Avenue, Santa Clara, CA 95051, and the\nIndividual or Organization (\"Licensee\") accessing and otherwise using\nthis software in source or binary form and its associated\ndocumentation (\"the Software\").\n\n2. Subject to the terms and conditions of this BeOpen Python License\nAgreement, BeOpen hereby grants Licensee a non-exclusive,\nroyalty-free, world-wide license to reproduce, analyze, test, perform\nand/or display publicly, prepare derivative works, distribute, and\notherwise use the Software alone or in any derivative version,\nprovided, however, that the BeOpen Python License is retained in the\nSoftware, alone or in any derivative version prepared by Licensee.\n\n3. BeOpen is making the Software available to Licensee on an \"AS IS\"\nbasis.  BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE\nSOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS\nAS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY\nDERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n5. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n6. This License Agreement shall be governed by and interpreted in all\nrespects by the law of the State of California, excluding conflict of\nlaw provisions.  Nothing in this License Agreement shall be deemed to\ncreate any relationship of agency, partnership, or joint venture\nbetween BeOpen and Licensee.  This License Agreement does not grant\npermission to use BeOpen trademarks or trade names in a trademark\nsense to endorse or promote products or services of Licensee, or any\nthird party.  As an exception, the \"BeOpen Python\" logos available at\nhttp://www.pythonlabs.com/logos.html may be used according to the\npermissions granted on that web page.\n\n7. By copying, installing or otherwise using the software, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nCNRI LICENSE AGREEMENT FOR PYTHON 1.6.1\n---------------------------------------\n\n1. This LICENSE AGREEMENT is between the Corporation for National\nResearch Initiatives, having an office at 1895 Preston White Drive,\nReston, VA 20191 (\"CNRI\"), and the Individual or Organization\n(\"Licensee\") accessing and otherwise using Python 1.6.1 software in\nsource or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, CNRI\nhereby grants Licensee a nonexclusive, royalty-free, world-wide\nlicense to reproduce, analyze, test, perform and/or display publicly,\nprepare derivative works, distribute, and otherwise use Python 1.6.1\nalone or in any derivative version, provided, however, that CNRI's\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c)\n1995-2001 Corporation for National Research Initiatives; All Rights\nReserved\" are retained in Python 1.6.1 alone or in any derivative\nversion prepared by Licensee.  Alternately, in lieu of CNRI's License\nAgreement, Licensee may substitute the following text (omitting the\nquotes): \"Python 1.6.1 is made available subject to the terms and\nconditions in CNRI's License Agreement.  This Agreement together with\nPython 1.6.1 may be located on the Internet using the following\nunique, persistent identifier (known as a handle): 1895.22/1013.  This\nAgreement may also be obtained from a proxy server on the Internet\nusing the following URL: http://hdl.handle.net/1895.22/1013\".\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python 1.6.1 or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python 1.6.1.\n\n4. CNRI is making Python 1.6.1 available to Licensee on an \"AS IS\"\nbasis.  CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\n1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. This License Agreement shall be governed by the federal\nintellectual property law of the United States, including without\nlimitation the federal copyright law, and, to the extent such\nU.S. federal law does not apply, by the law of the Commonwealth of\nVirginia, excluding Virginia's conflict of law provisions.\nNotwithstanding the foregoing, with regard to derivative works based\non Python 1.6.1 that incorporate non-separable material that was\npreviously distributed under the GNU General Public License (GPL), the\nlaw of the Commonwealth of Virginia shall govern this License\nAgreement only as to issues arising under or with respect to\nParagraphs 4, 5, and 7 of this License Agreement.  Nothing in this\nLicense Agreement shall be deemed to create any relationship of\nagency, partnership, or joint venture between CNRI and Licensee.  This\nLicense Agreement does not grant permission to use CNRI trademarks or\ntrade name in a trademark sense to endorse or promote products or\nservices of Licensee, or any third party.\n\n8. By clicking on the \"ACCEPT\" button where indicated, or by copying,\ninstalling or otherwise using Python 1.6.1, Licensee agrees to be\nbound by the terms and conditions of this License Agreement.\n\n        ACCEPT\n\n\nCWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2\n--------------------------------------------------\n\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,\nThe Netherlands.  All rights reserved.\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted,\nprovided that the above copyright notice appear in all copies and that\nboth that copyright notice and this permission notice appear in\nsupporting documentation, and that the name of Stichting Mathematisch\nCentrum or CWI not be used in advertising or publicity pertaining to\ndistribution of the software without specific, written prior\npermission.\n\nSTICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO\nTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE\nFOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\nOF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nSoftware: Python 3.8.5\nCopyright notice:\ncopyright, i.e., \"Copyright © 2001-2018 Python Software Foundation; All Rights Reserved\" are retained in Python 3.8 alone or in any derivative version prepared by Licensee.\nCopyright (c) 1999-2000 by Secret Labs AB\nCopyright (C) 2005-2007   Gregory P. Smith (greg@krypto.org)\nCopyright (c) 2003.\n.  Copyright (C) 2005-2010   Gregory P. Smith (greg@krypto.org)\nCopyright 1996,1997 by Oliver Andrich, Koblenz, Germany.\nCopyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.  All rights reserved.\nCopyright 1994 by Lance Ellinghouse Cathedral City, California Republic, United States of America.\nCopyright (C) 2001 Python Software Foundation Barry Warsaw <barry@python.org>, 2000.\nCopyright (c) 2008-2012 Stefan Krah. All rights reserved.\n2001-07-01 fl   added BIGCHARSET support (from Martin von Loewis)\n``'Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam'``\nCopyright (C) 2003 Python Software Foundation\nCopyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.\nCopyright 1995-1997, Automatrix, Inc., all rights reserved.\nCopyright (c) 2002 MyCompanyName. All rights reserved.\nCopyright (c) 2004 by Peter Astrand <astrand@lysator.liu.se>\nCopyright (c) 1999-2002 by Fredrik Lundh.\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.\\n\\\nAIX ABI support (c) 2002 Free Software Foundation, Inc.\n( Copyright (c) 2011 Stefan Krah. All rights reserved. )\n2013-02-04 mrab added fullmatch primitive\n2003-10-17 gn   implemented non recursive scheme\n2003-04-18 mvl  fully support 4-byte codes\nCopyright (c) 1999-2002 by Secret Labs AB.\nportions copyright 2001, Autonomous Zones Industries, Inc., all rights...\nCopyright © 2013 W3C® (MIT, ERCIM, Keio, Beihang), All Rights Reserved.\nCopyright (C) 1986 Gary S. Brown.  You may use this program, or code or tables extracted from it, as desired without restriction.\n-- Copyright (c) IBM Corporation, 2003, 2008.  All rights reserved.   --\n; Copyright (c) 2008-2016 Stefan Krah. All rights reserved.\nCopyright (c) 2001-2020 Python Software Foundation.  All rights reserved.\nCopyright 2008 Armin Ronacher.\nCopyright © 2000 BeOpen.com. All rights reserved.\n(c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)\nCopyright (c) 2005-2006 ActiveState Software Inc.\nCopyright (C) 1994 Steen Lumholt.\nCopyright (c) 1999 by Fredrik Lundh.\nlibffi - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (c) 2006-2008 Alexander Chemeris\nCopyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>\nCopyright (c) 1999-2003 Steve Purcell\nDarwin ABI support (c) 2001 John Hornkvist\nCopyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)\nCopyright (c) 1999-2008 by Fredrik Lundh\ni.e., \"Copyright © 1995-2001 Corporation for National Research Initiatives; All Rights Reserved\" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee.  Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): \"Python 1.6.1\n-- Copyright (c) IBM Corporation, 2005, 2009.  All rights reserved.   --\nCopyright (c) 2001-2017 Expat maintainers\nCopyright (c) 2001-2012 Python Software Foundation. All Rights Reserved.\nCopyright (C) 2002-2006 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\n(c) 2002 Gregory P. Ward.  All Rights Reserved.\nCopyright (c) 2000 BeOpen.com.\\n\\\nCopyright (C) 2001-2007 Python Software Foundation Author: Ben Gertzfield, Barry Warsaw Contact: email-sig@python.org\nCopyright (C) 2003-2004 Federico Di Gregorio <fog@debian.org>\n2001-05-14 fl   fixes for 1.5.2 compatibility\nCopyright © 1995-2000 Corporation for National Research Initiatives. All rights reserved.\nCopyright (c) 2013 W3C(R) (MIT, ERCIM, Keio, Beihang), All Rights Reserved.\nCopyright (C) 1995, 1996, 1997, 1998, and 1999 WIDE Project.\nCopyright (C) 2001-2007 Python Software Foundation Author: Barry Warsaw, Thomas Wouters, Anthony Baxter Contact: email-sig@python.org\nCopyright (C) 2005-2010   Gregory P. Smith (greg@krypto.org)\nCopyright 2001-2017 by Vinay Sajip. All Rights Reserved.\nCopyright (c) 2000 Doug White, 2006 James Knight, 2007 Christian Heimes All rights reserved.\nCopyright (C) 1999-2001 Gregory P. Ward.\nCopyright (c) 1999-2002 by Fredrik Lundh\n+   Copyright 2007 Python Software Foundation.\nelse if (config == (void )2000 && (c) == 0x9B1D) {                 \\\nCopyright (c) 1999-2002 by Secret Labs AB\n2002-11-09 fl   fixed empty sub/subn return type\nCopyright 2009 Gabriel A. Genellina\nCopyright (c) 2003-2009 by Fredrik Lundh.  All rights reserved.\nCopyright 2004-2005 Elemental Security, Inc. All Rights Reserved.\n(c) Copyright Guido van Rossum, 2000.\nCopyright (C) 1995, 1996, 1997, and 1998 WIDE Project.\nCopyright (C) 2011-2012 Vinay Sajip.\nCopyright 2006 Google, Inc. All Rights Reserved.\n(c) Copyright Marc-Andre Lemburg, 2005.\nCopyright (C) 1996-2014 Free Software Foundation, Inc.\nCopyright (C) YEAR ORGANIZATION FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\nCopyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com\nCopyright (c) 1995-2000, Corporation for National Research Initiatives.\nCopyright (c) 1999 Toby Dickenson\nCopyright (C) 2001,2002 Python Software Foundation csv package unit tests\nCopyright (C) 2005, 2006 Martin von Löwis Licensed to PSF under a Contributor Agreement.\nCopyright (c) 1997 by Fredrik Lundh\nCopyright (c) 2002-2006 Python Software Foundation.  All rights reserved.\nCopyright (c) 2002  Roger Sayle\nCopyright 1995-1996 by Fred L. Drake, Jr. and Virginia Polytechnic Institute and State University, Blacksburg, Virginia, USA.\ntypes.c - Copyright (c) 1996, 1998  Red Hat, Inc.\nCopyright 2000, Mojam Media, Inc., all rights reserved.\nCopyright (C) 1994 X Consortium\ncopyright, i.e., \"Copyright © 2001-2020 Python Software Foundation; All Rights Reserved\" are retained in Python |release| alone or in any derivative version prepared by Licensee.\nCopyright (C) 2004-2006 Python Software Foundation Authors: Baxter, Wouters and Warsaw Contact: email-sig@python.org\nCopyright (C) 2002-2004 Python Software Foundation\nCopyright (c) 2002 Jorge Acereda  <jacereda@users.sourceforge.net> &\ndarwin.S - Copyright (c) 1996, 1998, 2001, 2002, 2003  Red Hat, Inc.\nCopyright © 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands.  All rights reserved.\n\"Copyright 1995-1996 by Virginia Polytechnic Institute & State\\n\\\nx86-ffitarget.h - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (C) 1995-2011 Jean-loup Gailly and Mark Adler\nCopyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.\nCopyright © 2001-2020 Python Software Foundation. All rights reserved.\n<string>(c) 2001-2020 Python Software Foundation.</string>\nCopyright (C) 2001-2006 Python Software Foundation Author: Ben Gertzfield Contact: email-sig@python.org\nCopyright (c) 2005 Don Owens All rights reserved.\ndarwin64.S - Copyright (c) 2006 Free Software Foundation, Inc.\n(c) Copyright 2005, Marc-Andre Lemburg (mal@lemburg.com).\nlibrary/xml.etree.elementtree,,:include,  Copyright (c) <xi:include href=\"year.txt\" parse=\"text\" />.\nffi.c - Copyright (c) 1998 Geoffrey Keating\nCopyright 2006 Georg Brandl.\nCopyright (C) 2005-2010 Gerhard Häring <gh@ghaering.de>\n(c) 2013-2017 Christian Heimes <christian@python.org>\nCopyright 1992-2018 Free Software Foundation, Inc.\nCopyright (C) 2003-2013 Python Software Foundation import copy import operator import pickle import unittest import plistlib import os import datetime import codecs import binascii import collections from test import support from io import BytesIO\nCopyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.\nCopyright (c) 1990-1995, Stichting Mathematisch Centrum.\n2001-10-24 fl   added finditer primitive (for 2.2 only)\nCopyright (C) 2001-2006 Python Software Foundation Author: Keith Dart Contact: email-sig@python.org\nCopyright (c) 1999-2009 by Fredrik Lundh.\n(c) 2000 Peter Bosch.  All Rights Reserved.\nCopyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.\nCopyright (C) 2012 Free Software Foundation, Inc.\nCopyright (C) 2002-2007 Python Software Foundation Author: Ben Gertzfield Contact: email-sig@python.org\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;\nCopyright 1994 by Lance Ellinghouse, Cathedral City, California Republic, United States of America.\nCopyright (C) 2006 - 2010  Gregor Lingl email: glingl@aon.at\nCopyright (c) 2009,2010 Zmanda Inc. <http://www.zmanda.com/>\nCopyright (c) 1998-2008 The OpenSSL Project.  All rights reserved.\nCopyright 1996 by Sam Rushing\nCopyright (c) 1998-2000 Thai Open Source Software Center Ltd and Clark Cooper\n(c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 /\nCopyright (c) 1999, 2000, 2001 Steve Purcell This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form.\n-- Copyright (c) IBM Corporation, 2005, 2008.  All rights reserved.   --\nCopyright (C) 2001-2012 Python Software Foundation. All Rights Reserved.\ndnl Copyright © 2004 Scott James Remnant <scott@netsplit.com>.\nCopyright (C) 2002-2007 Python Software Foundation Contact: email-sig@python.org\nCopyright (c) 1998-2001 by Secret Labs AB.  All rights reserved.\nCopyright (c) 2009,2010 Dustin J. Mitchell <dustin@zmanda.com>\nCopyright (c) 2002  Bo Thorsen\n2001-10-21 fl   added sub/subn primitive\nCopyright 1992-1994, David Gottner\nCopyright (C) 2011-2013 Vinay Sajip.\nCopyright (c) 1991, 2000, 2001 by Lucent Technologies.\nCopyright (c) 2010 Python Software Foundation. All Rights Reserved.\n\" SRE 2.2.2 Copyright (c) 1997-2002 by Secret Labs AB \";\n;   Copyright (c) 2004, Outercurve Foundation.\nCopyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>\nCopyright (c) 1999 by Secret Labs AB\nlibffi PyOBJC - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (c) 1999-2009 by Fredrik Lundh\nCopyright 2007 Google, Inc. All Rights Reserved.\n-- Copyright (c) IBM Corporation, 2004, 2008.  All rights reserved.   --\nCopyright (C) 2001-2007 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\nCopyright (c) 2000 BeOpen.com.  All rights reserved.\n<string>%version%, (c) 2001-2020 Python Software Foundation.</string>\nCopyright © 1991-1995 Stichting Mathematisch Centrum. All rights reserved.\nCopyright (C) 2000  Luke Kenneth Casson Leighton <lkcl@samba.org>\nCopyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>\n2001-12-07 fl   fixed memory leak in sub/subn (Guido van Rossum)\n-- Copyright (c) IBM Corporation, 2001, 2008.  All rights reserved.   --\nVirginia, USA.  Portions copyright 1991-1995 by Stichting Mathematisch\\n\\\nCopyright (c) 2004 Free Software Foundation, Inc.\nso portions are Copyright (C) 2001,2002 Python Software Foundation, and were written by Barry Warsaw.\nCopyright (C) 2004-2010 Gerhard Häring <gh@ghaering.de>\nCopyright (c) 2004 Python Software Foundation.\n(c) Copyright 2000 Guido van Rossum.\nCopyright 2007 Georg Brandl.\nCopyright (c) 1999 by Secret Labs AB.\nCopyright (c) 2002 Unicode, Inc.  All Rights reserved.\nCopyright 2009 Brian Quinlan. All Rights Reserved.\nCopyright (c) 2008-2009, Google Inc.\nCopyright (c) 2001-2006 Twisted Matrix Laboratories.\n(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c) 1995-2001 Corporation for National Research Initiatives; All Rights Reserved\" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee.  Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): \"Python 1.6.1 is made available subject to the terms and conditions in CNRI's License Agreement.  This Agreement together with Python 1.6.1 may be located on the Internet using the following unique, persistent identifier (known as a handle): 1895.22/1013.  This Agreement may also be obtained from a proxy server on the Internet\nCopyright (c) Corporation for National Research Initiatives.\nCopyright (c) 2008-2016 Stefan Krah. All rights reserved.\nCopyright 2001-2016 by Vinay Sajip. All Rights Reserved.\nif (config == (void )2000 && (c) == 0x20B9F) {                     \\\nppc-ffitarget.h - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (c) 2001-2006 Gregory P. Ward.  All rights reserved.\nCopyright (c) 1997-2001 by Secret Labs AB.  All rights reserved.\nCopyright 2000 by Timothy O'Malley <timo@alum.mit.edu>\nCopyright (C) 2007-2012 Michael Foord & the mock team E-mail: fuzzyman AT voidspace DOT org DOT uk\nCopyright (C) 2011-2014 Vinay Sajip.\nppc-darwin.h - Copyright (c) 2002, 2003, 2004, Free Software Foundation, Inc.\nppc64-darwinclosure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation, Inc. based on ppcclosure.S\nx86-ffi64.c - Copyright (c) 2002  Bo Thorsen <bo@suse.de>\nCopyright (c) 2002  Ranjit Mathew\nCopyright (C) 2001-2006 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\n﻿Copyright (c) 2004, Outercurve Foundation.\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.\\n\\\nCopyright 1999, Bioreason, Inc., all rights reserved.\n2001-10-20 fl   added split primitive; re-enable unicode for 1.6/2.0/2.1\nCopyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.\nCopyright (c) 2008 by Christian Heimes <christian@cheimes.de>\nCopyright 2001-2019 by Vinay Sajip. All Rights Reserved.\nCopyright (C) 2005 Martin v. Löwis Licensed to PSF under a contributor agreement.\nppc-darwin.S - Copyright (c) 2000 John Hornkvist\nCopyright (c) 2000, BeOpen.com.\nCopyright (C) 2001-2010 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\nCopyright (C) 2001-2007 Python Software Foundation Author: Anthony Baxter Contact: email-sig@python.org\nCopyright (c) 2004 by Fredrik Lundh <fredrik@pythonware.com>\nCopyright Disney Enterprises, Inc.  All Rights Reserved.\nffi.c - Copyright (c) 1996, 1998, 1999, 2001  Red Hat, Inc.\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands.  All rights reserved.\nCopyright (c) <xi:include href=\"year.txt\" parse=\"text\" />.\nprepcif.c - Copyright (c) 1996, 1998  Red Hat, Inc.\nself.assertEqual(list(c), list(range(2,2000)))\nCopyright (C) 2002-2007 Python Software Foundation Author: Ben Gertzfield, Barry Warsaw Contact: email-sig@python.org\n-- Copyright (c) IBM Corporation, 2000, 2008.  All rights reserved.   --\nCopyright (C) 2012   Christian Heimes (christian@python.org)\nfficommon.h - Copyright (c) 1996  Red Hat, Inc.\nCopyright (C) 2012-2016  Christian Heimes (christian@python.org)\nCopyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>\n(c) 2002 Python Software Foundation.  All Rights Reserved.\nCopyright (c) 2004 by Secret Labs AB, http://www.pythonware.com\nCopyright (c) 2004, Outercurve Foundation.\nCopyright (c) 2006-2008, R Oudkerk Licensed to PSF under a Contributor Agreement.\n.. Copyright 1995 Virginia Polytechnic Institute and State University and Fred L. Drake, Jr.  This copyright notice must be distributed on all copies, but this document otherwise may be distributed as part of the Python distribution.  No fee may be charged for this document in any representation, either on paper or electronically.  This restriction does not affect other elements in a distributed package in any way.\nCopyright 2012-2013 by Larry Hastings.\nCopyright (C) 2002-2006 Python Software Foundation Contact: email-sig@python.org email package unit tests for (optional) Asian codecs\nCopyright (c) 2002 Peter O'Gorman <ogorman@users.sourceforge.net>\nCopyright 2007 Google Inc.\nCopyright (c) 1999 by Fredrik Lundh\nCopyright (C) 2001-2010 Python Software Foundation Contact: email-sig@python.org email package unit tests\nCopyright (c) 2003-2004 by Fredrik Lundh.  All rights reserved.\nCopyright (c) 1991-1999 Unicode, Inc.  All Rights reserved.\nCopyright (c) 2000-2017 Expat development team Licensed under the MIT license:\nCopyright (c) 1997-2000 Thai Open Source Software Center Ltd\nCopyright (c) 2001-2020 Python Software Foundation.\\n\\\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands. All rights reserved.\\\nCopyright (c) 1998 The Open Group\nCopyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com\nCopyright (C) 2005 Gerhard Häring <gh@ghaering.de>\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\nCopyright (c) 1996-2008  Red Hat, Inc and others.\nCopyright (C) 2005 Martin v. Löwis Licensed to PSF under a Contributor Agreement.\nCopyright (C) 1997, 2002, 2003, 2007, 2008 Martin von Loewis\n<string>%VERSION%, (c) 2001-2019 Python Software Foundation.</string>\nCopyright (c) 2013  Marek Majkowski <marek@popcount.org>\nCopyright (c) 2008 Daniel Amelang <dan@amelang.net>\nCopyright (c) 1999-2008 by Fredrik Lundh.  All rights reserved.\nCopyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved.\nCopyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd and Clark Cooper\n2001-04-15 fl   export copyright as Python attribute, not global 2001-04-28 fl   added copy methods (work in progress)\nCopyright (C) 2002, 2003 Python Software Foundation.\nCopyright (c) 2004, 2005, 2006 Python Software Foundation.\ndnl Copyright © 2012-2015 Dan Nicholson <dbn.lists@gmail.com>\nCopyright (c) 1999-2009 by Secret Labs AB.  All rights reserved.\nCopyright (c) 2003-2010 Python Software Foundation This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form.\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum.  All rights reserved.\n2001-10-18 fl   fixed group reset issue (from Matthew Mueller)\nCopyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.\n-- Copyright (c) IBM Corporation, 1981, 2008.  All rights reserved.   --\nCopyright (C) 2000  Bastian Kleineidam\nppc-darwinclosure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation, Inc. based on ppcclosure.S\nPortions copyright 1991-1995 by Stichting Mathematisch Centrum, Amsterdam, The Netherlands.  Copying is permitted under the terms associated with the main Python distribution, with the additional restriction that this additional notice be included and maintained on all distributed copies.\n\nA. HISTORY OF THE SOFTWARE\n==========================\n\nPython was created in the early 1990s by Guido van Rossum at Stichting\nMathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands\nas a successor of a language called ABC.  Guido remains Python's\nprincipal author, although it includes many contributions from others.\n\nIn 1995, Guido continued his work on Python at the Corporation for\nNational Research Initiatives (CNRI, see http://www.cnri.reston.va.us)\nin Reston, Virginia where he released several versions of the\nsoftware.\n\nIn May 2000, Guido and the Python core development team moved to\nBeOpen.com to form the BeOpen PythonLabs team.  In October of the same\nyear, the PythonLabs team moved to Digital Creations, which became\nZope Corporation.  In 2001, the Python Software Foundation (PSF, see\nhttps://www.python.org/psf/) was formed, a non-profit organization\ncreated specifically to own Python-related Intellectual Property.\nZope Corporation was a sponsoring member of the PSF.\n\nAll Python releases are Open Source (see http://www.opensource.org for\nthe Open Source Definition).  Historically, most, but not all, Python\nreleases have also been GPL-compatible; the table below summarizes\nthe various releases.\n\n    Release         Derived     Year        Owner       GPL-\n                    from                                compatible? (1)\n\n    0.9.0 thru 1.2              1991-1995   CWI         yes\n    1.3 thru 1.5.2  1.2         1995-1999   CNRI        yes\n    1.6             1.5.2       2000        CNRI        no\n    2.0             1.6         2000        BeOpen.com  no\n    1.6.1           1.6         2001        CNRI        yes (2)\n    2.1             2.0+1.6.1   2001        PSF         no\n    2.0.1           2.0+1.6.1   2001        PSF         yes\n    2.1.1           2.1+2.0.1   2001        PSF         yes\n    2.1.2           2.1.1       2002        PSF         yes\n    2.1.3           2.1.2       2002        PSF         yes\n    2.2 and above   2.1.1       2001-now    PSF         yes\n\nFootnotes:\n\n(1) GPL-compatible doesn't mean that we're distributing Python under\n    the GPL.  All Python licenses, unlike the GPL, let you distribute\n    a modified version without making your changes open source.  The\n    GPL-compatible licenses make it possible to combine Python with\n    other software that is released under the GPL; the others don't.\n\n(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,\n    because its license has a choice of law clause.  According to\n    CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1\n    is \"not incompatible\" with the GPL.\n\nThanks to the many outside volunteers who have worked under Guido's\ndirection to make these releases possible.\n\n\nB. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\n===============================================================\n\nPYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\n--------------------------------------------\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation\n(\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and\notherwise using this software (\"Python\") in source or binary form and\nits associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\ngrants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\nanalyze, test, perform and/or display publicly, prepare derivative works,\ndistribute, and otherwise use Python alone or in any derivative version,\nprovided, however, that PSF's License Agreement and PSF's notice of copyright,\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,\n2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;\nAll Rights Reserved\" are retained in Python alone or in any derivative version\nprepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python.\n\n4. PSF is making Python available to Licensee on an \"AS IS\"\nbasis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\nFOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any\nrelationship of agency, partnership, or joint venture between PSF and\nLicensee.  This License Agreement does not grant permission to use PSF\ntrademarks or trade name in a trademark sense to endorse or promote\nproducts or services of Licensee, or any third party.\n\n8. By copying, installing or otherwise using Python, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nBEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0\n-------------------------------------------\n\nBEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1\n\n1. This LICENSE AGREEMENT is between BeOpen.com (\"BeOpen\"), having an\noffice at 160 Saratoga Avenue, Santa Clara, CA 95051, and the\nIndividual or Organization (\"Licensee\") accessing and otherwise using\nthis software in source or binary form and its associated\ndocumentation (\"the Software\").\n\n2. Subject to the terms and conditions of this BeOpen Python License\nAgreement, BeOpen hereby grants Licensee a non-exclusive,\nroyalty-free, world-wide license to reproduce, analyze, test, perform\nand/or display publicly, prepare derivative works, distribute, and\notherwise use the Software alone or in any derivative version,\nprovided, however, that the BeOpen Python License is retained in the\nSoftware, alone or in any derivative version prepared by Licensee.\n\n3. BeOpen is making the Software available to Licensee on an \"AS IS\"\nbasis.  BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE\nSOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS\nAS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY\nDERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n5. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n6. This License Agreement shall be governed by and interpreted in all\nrespects by the law of the State of California, excluding conflict of\nlaw provisions.  Nothing in this License Agreement shall be deemed to\ncreate any relationship of agency, partnership, or joint venture\nbetween BeOpen and Licensee.  This License Agreement does not grant\npermission to use BeOpen trademarks or trade names in a trademark\nsense to endorse or promote products or services of Licensee, or any\nthird party.  As an exception, the \"BeOpen Python\" logos available at\nhttp://www.pythonlabs.com/logos.html may be used according to the\npermissions granted on that web page.\n\n7. By copying, installing or otherwise using the software, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nCNRI LICENSE AGREEMENT FOR PYTHON 1.6.1\n---------------------------------------\n\n1. This LICENSE AGREEMENT is between the Corporation for National\nResearch Initiatives, having an office at 1895 Preston White Drive,\nReston, VA 20191 (\"CNRI\"), and the Individual or Organization\n(\"Licensee\") accessing and otherwise using Python 1.6.1 software in\nsource or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, CNRI\nhereby grants Licensee a nonexclusive, royalty-free, world-wide\nlicense to reproduce, analyze, test, perform and/or display publicly,\nprepare derivative works, distribute, and otherwise use Python 1.6.1\nalone or in any derivative version, provided, however, that CNRI's\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c)\n1995-2001 Corporation for National Research Initiatives; All Rights\nReserved\" are retained in Python 1.6.1 alone or in any derivative\nversion prepared by Licensee.  Alternately, in lieu of CNRI's License\nAgreement, Licensee may substitute the following text (omitting the\nquotes): \"Python 1.6.1 is made available subject to the terms and\nconditions in CNRI's License Agreement.  This Agreement together with\nPython 1.6.1 may be located on the Internet using the following\nunique, persistent identifier (known as a handle): 1895.22/1013.  This\nAgreement may also be obtained from a proxy server on the Internet\nusing the following URL: http://hdl.handle.net/1895.22/1013\".\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python 1.6.1 or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python 1.6.1.\n\n4. CNRI is making Python 1.6.1 available to Licensee on an \"AS IS\"\nbasis.  CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\n1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. This License Agreement shall be governed by the federal\nintellectual property law of the United States, including without\nlimitation the federal copyright law, and, to the extent such\nU.S. federal law does not apply, by the law of the Commonwealth of\nVirginia, excluding Virginia's conflict of law provisions.\nNotwithstanding the foregoing, with regard to derivative works based\non Python 1.6.1 that incorporate non-separable material that was\npreviously distributed under the GNU General Public License (GPL), the\nlaw of the Commonwealth of Virginia shall govern this License\nAgreement only as to issues arising under or with respect to\nParagraphs 4, 5, and 7 of this License Agreement.  Nothing in this\nLicense Agreement shall be deemed to create any relationship of\nagency, partnership, or joint venture between CNRI and Licensee.  This\nLicense Agreement does not grant permission to use CNRI trademarks or\ntrade name in a trademark sense to endorse or promote products or\nservices of Licensee, or any third party.\n\n8. By clicking on the \"ACCEPT\" button where indicated, or by copying,\ninstalling or otherwise using Python 1.6.1, Licensee agrees to be\nbound by the terms and conditions of this License Agreement.\n\n        ACCEPT\n\n\nCWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2\n--------------------------------------------------\n\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,\nThe Netherlands.  All rights reserved.\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted,\nprovided that the above copyright notice appear in all copies and that\nboth that copyright notice and this permission notice appear in\nsupporting documentation, and that the name of Stichting Mathematisch\nCentrum or CWI not be used in advertising or publicity pertaining to\ndistribution of the software without specific, written prior\npermission.\n\nSTICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO\nTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE\nFOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\nOF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nSoftware: Python 3.9.2\nCopyright notice:\nCopyright (c) 1999-2000 by Secret Labs AB\nCopyright (C) 2005-2007   Gregory P. Smith (greg@krypto.org)\nCopyright (c) 2003.\n.  Copyright (C) 2005-2010   Gregory P. Smith (greg@krypto.org)\nCopyright 1996,1997 by Oliver Andrich, Koblenz, Germany.\nCopyright (c) 2008-2020 Stefan Krah. All rights reserved.\nCopyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.  All rights reserved.\nCopyright 1994 by Lance Ellinghouse Cathedral City, California Republic, United States of America.\nCopyright (C) 2001 Python Software Foundation Barry Warsaw <barry@python.org>, 2000.\nCopyright (c) 2008-2012 Stefan Krah. All rights reserved.\n2001-07-01 fl   added BIGCHARSET support (from Martin von Loewis)\n``'Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam'``\nCopyright (C) 2003 Python Software Foundation\nCopyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.\nCopyright 1995-1997, Automatrix, Inc., all rights reserved.\nCopyright (c) 2002 MyCompanyName. All rights reserved.\nCopyright (c) 2004 by Peter Astrand <astrand@lysator.liu.se>\nCopyright (c) 1999-2002 by Fredrik Lundh.\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.\\n\\\nAIX ABI support (c) 2002 Free Software Foundation, Inc.\nCopyright (C) 1996-2020 Free Software Foundation, Inc.\n2013-02-04 mrab added fullmatch primitive\n2003-10-17 gn   implemented non recursive scheme\n2003-04-18 mvl  fully support 4-byte codes\nCopyright (c) 1999-2002 by Secret Labs AB.\nportions copyright 2001, Autonomous Zones Industries, Inc., all rights...\nCopyright © 2013 W3C® (MIT, ERCIM, Keio, Beihang), All Rights Reserved.\nCopyright (C) 1986 Gary S. Brown.  You may use this program, or code or tables extracted from it, as desired without restriction.\n-- Copyright (c) IBM Corporation, 2003, 2008.  All rights reserved.   --\n; Copyright (c) 2008-2020 Stefan Krah. All rights reserved.\nCopyright 2008 Armin Ronacher.\nCopyright © 2000 BeOpen.com. All rights reserved.\n(c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)\nCopyright (c) 2005-2006 ActiveState Software Inc.\nCopyright (C) 1994 Steen Lumholt.\nCopyright (c) 1999 by Fredrik Lundh.\nlibffi - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>\nCopyright (c) 1999-2003 Steve Purcell\nDarwin ABI support (c) 2001 John Hornkvist\nCopyright (c) 2001-2021 Python Software Foundation.\\n\\\nCopyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)\nCopyright (c) 1999-2008 by Fredrik Lundh\ni.e., \"Copyright © 1995-2001 Corporation for National Research Initiatives; All Rights Reserved\" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee.  Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): \"Python 1.6.1\n-- Copyright (c) IBM Corporation, 2005, 2009.  All rights reserved.   --\nCopyright (c) 2001-2017 Expat maintainers\nCopyright (c) 2001-2012 Python Software Foundation. All Rights Reserved.\nCopyright (C) 2002-2006 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\n(c) 2002 Gregory P. Ward.  All Rights Reserved.\ncopyright, i.e., \"Copyright © 2001-2021 Python Software Foundation; All Rights Reserved\" are retained in Python |release| alone or in any derivative version prepared by Licensee.\nCopyright (c) 2000 BeOpen.com.\\n\\\nCopyright (C) 2001-2007 Python Software Foundation Author: Ben Gertzfield, Barry Warsaw Contact: email-sig@python.org\nCopyright (C) 2003-2004 Federico Di Gregorio <fog@debian.org>\n2001-05-14 fl   fixes for 1.5.2 compatibility\nCopyright (c) 2001-2021 Python Software Foundation.  All rights reserved.\nCopyright © 1995-2000 Corporation for National Research Initiatives. All rights reserved.\nCopyright (c) 2013 W3C(R) (MIT, ERCIM, Keio, Beihang), All Rights Reserved.\nCopyright (C) 1995, 1996, 1997, 1998, and 1999 WIDE Project.\nCopyright (C) 2001-2007 Python Software Foundation Author: Barry Warsaw, Thomas Wouters, Anthony Baxter Contact: email-sig@python.org\nCopyright (C) 2005-2010   Gregory P. Smith (greg@krypto.org)\nCopyright (c) 2000 Doug White, 2006 James Knight, 2007 Christian Heimes All rights reserved.\nCopyright (C) 1999-2001 Gregory P. Ward.\nCopyright (c) 1999-2002 by Fredrik Lundh\n+   Copyright 2007 Python Software Foundation.\nelse if (config == (void )2000 && (c) == 0x9B1D) {                 \\\nCopyright (c) 1999-2002 by Secret Labs AB\n2002-11-09 fl   fixed empty sub/subn return type\nCopyright (C) 2003-2013 Python Software Foundation import copy import operator import pickle import struct import unittest import plistlib import os import datetime import codecs import binascii import collections from test import support from io import BytesIO\nCopyright 2009 Gabriel A. Genellina\nCopyright (c) 2003-2009 by Fredrik Lundh.  All rights reserved.\nCopyright 2004-2005 Elemental Security, Inc. All Rights Reserved.\n(c) Copyright Guido van Rossum, 2000.\nCopyright (C) 1995, 1996, 1997, and 1998 WIDE Project.\nCopyright (C) 2011-2012 Vinay Sajip.\nCopyright 2006 Google, Inc. All Rights Reserved.\n(c) Copyright Marc-Andre Lemburg, 2005.\nCopyright (C) YEAR ORGANIZATION FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\nCopyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com\nCopyright (c) 1995-2000, Corporation for National Research Initiatives.\nCopyright (C) 2001 I'O, All Rights Reserved.\nCopyright (c) 1999 Toby Dickenson\nCopyright (C) 2001,2002 Python Software Foundation csv package unit tests\nCopyright (C) 2005, 2006 Martin von Löwis Licensed to PSF under a Contributor Agreement.\nCopyright (c) 1997 by Fredrik Lundh\nCopyright (c) 2002-2006 Python Software Foundation.  All rights reserved.\nCopyright (c) 2002  Roger Sayle\nCopyright 1995-1996 by Fred L. Drake, Jr. and Virginia Polytechnic Institute and State University, Blacksburg, Virginia, USA.\ntypes.c - Copyright (c) 1996, 1998  Red Hat, Inc.\nCopyright 2000, Mojam Media, Inc., all rights reserved.\nCopyright (C) 1994 X Consortium\nCopyright (C) 2002-2004 Python Software Foundation\nCopyright (C) 2004-2006 Python Software Foundation Authors: Baxter, Wouters and Warsaw Contact: email-sig@python.org\nCopyright (c) 2002 Jorge Acereda  <jacereda@users.sourceforge.net> &\ndarwin.S - Copyright (c) 1996, 1998, 2001, 2002, 2003  Red Hat, Inc.\nCopyright © 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands.  All rights reserved.\n\"Copyright 1995-1996 by Virginia Polytechnic Institute & State\\n\\\nx86-ffitarget.h - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (C) 1995-2011 Jean-loup Gailly and Mark Adler\nCopyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.\n<string>(c) 2001-2020 Python Software Foundation.</string>\ndarwin64.S - Copyright (c) 2006 Free Software Foundation, Inc.\nCopyright (C) 2001-2006 Python Software Foundation Author: Ben Gertzfield Contact: email-sig@python.org\nCopyright (c) 2005 Don Owens All rights reserved.\n(c) Copyright 2005, Marc-Andre Lemburg (mal@lemburg.com).\nlibrary/xml.etree.elementtree,,:include,  Copyright (c) <xi:include href=\"year.txt\" parse=\"text\" />.\nffi.c - Copyright (c) 1998 Geoffrey Keating\nCopyright 2006 Georg Brandl.\nCopyright (C) 2005-2010 Gerhard Häring <gh@ghaering.de>\n(c) 2013-2017 Christian Heimes <christian@python.org>\nCopyright 1992-2018 Free Software Foundation, Inc.\nCopyright (c) 1990-1995, Stichting Mathematisch Centrum.\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;\n2001-10-24 fl   added finditer primitive (for 2.2 only)\nCopyright (C) 2001-2006 Python Software Foundation Author: Keith Dart Contact: email-sig@python.org\nCopyright (c) 1999-2009 by Fredrik Lundh.\n(c) 2000 Peter Bosch.  All Rights Reserved.\n<string>%version%, (c) 2001-2021 Python Software Foundation.</string>\nCopyright (C) 2001 earthian@tama.or.jp, All Rights Reserved.\nCopyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.\nCopyright (C) 2012 Free Software Foundation, Inc.\nCopyright (C) 2002-2007 Python Software Foundation Author: Ben Gertzfield Contact: email-sig@python.org\nCopyright 1994 by Lance Ellinghouse, Cathedral City, California Republic, United States of America.\nCopyright (C) 2006 - 2010  Gregor Lingl email: glingl@aon.at\nCopyright © 2001-2021 Python Software Foundation. All rights reserved.\nCopyright (c) 2009,2010 Zmanda Inc. <http://www.zmanda.com/>\nCopyright (c) 1998-2008 The OpenSSL Project.  All rights reserved.\nCopyright 1996 by Sam Rushing\nCopyright (c) 1998-2000 Thai Open Source Software Center Ltd and Clark Cooper\ncopyright, i.e., \"Copyright © 2001-2018 Python Software Foundation; All Rights Reserved\" are retained in Python 3.9 alone or in any derivative version prepared by Licensee.\n(c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 /\nCopyright (c) 1999, 2000, 2001 Steve Purcell This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form.\n-- Copyright (c) IBM Corporation, 2005, 2008.  All rights reserved.   --\nCopyright (C) 2001-2012 Python Software Foundation. All Rights Reserved.\ndnl Copyright © 2004 Scott James Remnant <scott@netsplit.com>.\nCopyright (C) 2002-2007 Python Software Foundation Contact: email-sig@python.org\nCopyright (c) 1998-2001 by Secret Labs AB.  All rights reserved.\nCopyright (c) 2009,2010 Dustin J. Mitchell <dustin@zmanda.com>\nCopyright (c) 2002  Bo Thorsen\n2001-10-21 fl   added sub/subn primitive\nCopyright 1992-1994, David Gottner\nCopyright (C) 2011-2013 Vinay Sajip.\nCopyright (c) 1991, 2000, 2001 by Lucent Technologies.\nCopyright (c) 2010 Python Software Foundation. All Rights Reserved.\n\" SRE 2.2.2 Copyright (c) 1997-2002 by Secret Labs AB \";\n;   Copyright (c) 2004, Outercurve Foundation.\nCopyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>\nCopyright (c) 1999 by Secret Labs AB\nlibffi PyOBJC - Copyright (c) 1996-2003  Red Hat, Inc.\nCopyright (c) 1999-2009 by Fredrik Lundh\nCopyright 2007 Google, Inc. All Rights Reserved.\n-- Copyright (c) IBM Corporation, 2004, 2008.  All rights reserved.   --\nCopyright (C) 2001-2007 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\nCopyright (c) 2000 BeOpen.com.  All rights reserved.\n3-2926   U+00A9    COPYRIGHT SIGN    [2000]\n<string>%version%, (c) 2001-2020 Python Software Foundation.</string>\nCopyright © 1991-1995 Stichting Mathematisch Centrum. All rights reserved.\nCopyright (C) 2000  Luke Kenneth Casson Leighton <lkcl@samba.org>\nCopyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>\n2001-12-07 fl   fixed memory leak in sub/subn (Guido van Rossum)\n-- Copyright (c) IBM Corporation, 2001, 2008.  All rights reserved.   --\nVirginia, USA.  Portions copyright 1991-1995 by Stichting Mathematisch\\n\\\nCopyright (c) 2004 Free Software Foundation, Inc.\nso portions are Copyright (C) 2001,2002 Python Software Foundation, and were written by Barry Warsaw.\nCopyright (C) 2004-2010 Gerhard Häring <gh@ghaering.de>\nCopyright (c) 2004 Python Software Foundation.\n(c) Copyright 2000 Guido van Rossum.\nCopyright 2007 Georg Brandl.\nCopyright (c) 1999 by Secret Labs AB.\nCopyright (c) 2002 Unicode, Inc.  All Rights reserved.\nCopyright 2009 Brian Quinlan. All Rights Reserved.\nCopyright (c) 2008-2009, Google Inc.\nCopyright (c) 2001-2006 Twisted Matrix Laboratories.\n(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c) 1995-2001 Corporation for National Research Initiatives; All Rights Reserved\" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee.  Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): \"Python 1.6.1 is made available subject to the terms and conditions in CNRI's License Agreement.  This Agreement together with Python 1.6.1 may be located on the Internet using the following unique, persistent identifier (known as a handle): 1895.22/1013.  This Agreement may also be obtained from a proxy server on the Internet\nCopyright (c) Corporation for National Research Initiatives.\nif (config == (void )2000 && (c) == 0x20B9F) {                     \\\nCopyright 2001-2016 by Vinay Sajip. All Rights Reserved.\nppc-ffitarget.h - Copyright (c) 1996-2003  Red Hat, Inc.\nppc-darwin.h - Copyright (c) 2002, 2003, 2004, Free Software Foundation, Inc.\nCopyright (c) 2001-2006 Gregory P. Ward.  All rights reserved.\nCopyright (c) 1997-2001 by Secret Labs AB.  All rights reserved.\nCopyright 2000 by Timothy O'Malley <timo@alum.mit.edu>\nCopyright (C) 2007-2012 Michael Foord & the mock team E-mail: fuzzyman AT voidspace DOT org DOT uk\nCopyright (C) 2011-2014 Vinay Sajip.\nx86-ffi64.c - Copyright (c) 2002  Bo Thorsen <bo@suse.de>\nppc64-darwinclosure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation, Inc. based on ppcclosure.S\nCopyright (c) 2002  Ranjit Mathew\nCopyright (C) 2001-2006 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\n﻿Copyright (c) 2004, Outercurve Foundation.\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.\\n\\\nCopyright 1999, Bioreason, Inc., all rights reserved.\n2001-10-20 fl   added split primitive; re-enable unicode for 1.6/2.0/2.1\nCopyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.\nCopyright (c) 2008 by Christian Heimes <christian@cheimes.de>\nCopyright 2001-2019 by Vinay Sajip. All Rights Reserved.\nCopyright (C) 2005 Martin v. Löwis Licensed to PSF under a contributor agreement.\nppc-darwin.S - Copyright (c) 2000 John Hornkvist\nCopyright (c) 2000, BeOpen.com.\nCopyright (C) 2001-2010 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org\nCopyright (C) 2001-2007 Python Software Foundation Author: Anthony Baxter Contact: email-sig@python.org\nCopyright (c) 2004 by Fredrik Lundh <fredrik@pythonware.com>\nCopyright Disney Enterprises, Inc.  All Rights Reserved.\nffi.c - Copyright (c) 1996, 1998, 1999, 2001  Red Hat, Inc.\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands.  All rights reserved.\nCopyright (c) <xi:include href=\"year.txt\" parse=\"text\" />.\nprepcif.c - Copyright (c) 1996, 1998  Red Hat, Inc.\nself.assertEqual(list(c), list(range(2,2000)))\nCopyright (C) 2002-2007 Python Software Foundation Author: Ben Gertzfield, Barry Warsaw Contact: email-sig@python.org\n-- Copyright (c) IBM Corporation, 2000, 2008.  All rights reserved.   --\nCopyright (C) 2012   Christian Heimes (christian@python.org)\nfficommon.h - Copyright (c) 1996  Red Hat, Inc.\nCopyright (C) 2012-2016  Christian Heimes (christian@python.org)\nCopyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>\n(c) 2002 Python Software Foundation.  All Rights Reserved.\nCopyright (c) 2004 by Secret Labs AB, http://www.pythonware.com\nCopyright (c) 2004, Outercurve Foundation.\nCopyright (c) 2006-2008, R Oudkerk Licensed to PSF under a Contributor Agreement.\n.. Copyright 1995 Virginia Polytechnic Institute and State University and Fred L. Drake, Jr.  This copyright notice must be distributed on all copies, but this document otherwise may be distributed as part of the Python distribution.  No fee may be charged for this document in any representation, either on paper or electronically.  This restriction does not affect other elements in a distributed package in any way.\nCopyright 2012-2013 by Larry Hastings.\nCopyright (C) 2002-2006 Python Software Foundation Contact: email-sig@python.org email package unit tests for (optional) Asian codecs\nCopyright (c) 2002 Peter O'Gorman <ogorman@users.sourceforge.net>\nCopyright 2007 Google Inc.\nCopyright (c) 1999 by Fredrik Lundh\nCopyright (C) 2001-2010 Python Software Foundation Contact: email-sig@python.org email package unit tests\nCopyright (c) 2003-2004 by Fredrik Lundh.  All rights reserved.\nCopyright (c) 1991-1999 Unicode, Inc.  All Rights reserved.\nCopyright (c) 2000-2017 Expat development team Licensed under the MIT license:\nCopyright (c) 1997-2000 Thai Open Source Software Center Ltd\nCopyright (c) 1998 The Open Group\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands. All rights reserved.\\\nCopyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com\nCopyright (C) 2005 Gerhard Häring <gh@ghaering.de>\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\nCopyright (c) 1996-2008  Red Hat, Inc and others.\nCopyright (C) 2005 Martin v. Löwis Licensed to PSF under a Contributor Agreement.\nCopyright (C) 1997, 2002, 2003, 2007, 2008 Martin von Loewis\n<string>%VERSION%, (c) 2001-2019 Python Software Foundation.</string>\n( Copyright (c) 2011-2020 Stefan Krah. All rights reserved. )\nCopyright (c) 2013  Marek Majkowski <marek@popcount.org>\nCopyright (c) 2008 Daniel Amelang <dan@amelang.net>\nCopyright (c) 1999-2008 by Fredrik Lundh.  All rights reserved.\nCopyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved.\nCopyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd and Clark Cooper\n2001-04-15 fl   export copyright as Python attribute, not global 2001-04-28 fl   added copy methods (work in progress)\nCopyright (C) 2002, 2003 Python Software Foundation.\nCopyright (c) 2004, 2005, 2006 Python Software Foundation.\ndnl Copyright © 2012-2015 Dan Nicholson <dbn.lists@gmail.com>\nCopyright (c) 1999-2009 by Secret Labs AB.  All rights reserved.\nCopyright (c) 2003-2010 Python Software Foundation This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form.\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum.  All rights reserved.\n2001-10-18 fl   fixed group reset issue (from Matthew Mueller)\nCopyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.\n-- Copyright (c) IBM Corporation, 1981, 2008.  All rights reserved.   --\nCopyright (C) 2000  Bastian Kleineidam\nppc-darwinclosure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation, Inc. based on ppcclosure.S\nPortions copyright 1991-1995 by Stichting Mathematisch Centrum, Amsterdam, The Netherlands.  Copying is permitted under the terms associated with the main Python distribution, with the additional restriction that this additional notice be included and maintained on all distributed copies.\n\nA. HISTORY OF THE SOFTWARE\n==========================\n\nPython was created in the early 1990s by Guido van Rossum at Stichting\nMathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands\nas a successor of a language called ABC.  Guido remains Python's\nprincipal author, although it includes many contributions from others.\n\nIn 1995, Guido continued his work on Python at the Corporation for\nNational Research Initiatives (CNRI, see http://www.cnri.reston.va.us)\nin Reston, Virginia where he released several versions of the\nsoftware.\n\nIn May 2000, Guido and the Python core development team moved to\nBeOpen.com to form the BeOpen PythonLabs team.  In October of the same\nyear, the PythonLabs team moved to Digital Creations, which became\nZope Corporation.  In 2001, the Python Software Foundation (PSF, see\nhttps://www.python.org/psf/) was formed, a non-profit organization\ncreated specifically to own Python-related Intellectual Property.\nZope Corporation was a sponsoring member of the PSF.\n\nAll Python releases are Open Source (see http://www.opensource.org for\nthe Open Source Definition).  Historically, most, but not all, Python\nreleases have also been GPL-compatible; the table below summarizes\nthe various releases.\n\n    Release         Derived     Year        Owner       GPL-\n                    from                                compatible? (1)\n\n    0.9.0 thru 1.2              1991-1995   CWI         yes\n    1.3 thru 1.5.2  1.2         1995-1999   CNRI        yes\n    1.6             1.5.2       2000        CNRI        no\n    2.0             1.6         2000        BeOpen.com  no\n    1.6.1           1.6         2001        CNRI        yes (2)\n    2.1             2.0+1.6.1   2001        PSF         no\n    2.0.1           2.0+1.6.1   2001        PSF         yes\n    2.1.1           2.1+2.0.1   2001        PSF         yes\n    2.1.2           2.1.1       2002        PSF         yes\n    2.1.3           2.1.2       2002        PSF         yes\n    2.2 and above   2.1.1       2001-now    PSF         yes\n\nFootnotes:\n\n(1) GPL-compatible doesn't mean that we're distributing Python under\n    the GPL.  All Python licenses, unlike the GPL, let you distribute\n    a modified version without making your changes open source.  The\n    GPL-compatible licenses make it possible to combine Python with\n    other software that is released under the GPL; the others don't.\n\n(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,\n    because its license has a choice of law clause.  According to\n    CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1\n    is \"not incompatible\" with the GPL.\n\nThanks to the many outside volunteers who have worked under Guido's\ndirection to make these releases possible.\n\n\nB. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\n===============================================================\n\nPython software and documentation are licensed under the\nPython Software Foundation License Version 2.\n\nStarting with Python 3.8.6, examples, recipes, and other code in\nthe documentation are dual licensed under the PSF License Version 2\nand the Zero-Clause BSD license.\n\nSome software incorporated into Python is under different licenses.\nThe licenses are listed with code falling under that license.\n\n\nPYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\n--------------------------------------------\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation\n(\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and\notherwise using this software (\"Python\") in source or binary form and\nits associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\ngrants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\nanalyze, test, perform and/or display publicly, prepare derivative works,\ndistribute, and otherwise use Python alone or in any derivative version,\nprovided, however, that PSF's License Agreement and PSF's notice of copyright,\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,\n2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;\nAll Rights Reserved\" are retained in Python alone or in any derivative version\nprepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python.\n\n4. PSF is making Python available to Licensee on an \"AS IS\"\nbasis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\nFOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any\nrelationship of agency, partnership, or joint venture between PSF and\nLicensee.  This License Agreement does not grant permission to use PSF\ntrademarks or trade name in a trademark sense to endorse or promote\nproducts or services of Licensee, or any third party.\n\n8. By copying, installing or otherwise using Python, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nBEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0\n-------------------------------------------\n\nBEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1\n\n1. This LICENSE AGREEMENT is between BeOpen.com (\"BeOpen\"), having an\noffice at 160 Saratoga Avenue, Santa Clara, CA 95051, and the\nIndividual or Organization (\"Licensee\") accessing and otherwise using\nthis software in source or binary form and its associated\ndocumentation (\"the Software\").\n\n2. Subject to the terms and conditions of this BeOpen Python License\nAgreement, BeOpen hereby grants Licensee a non-exclusive,\nroyalty-free, world-wide license to reproduce, analyze, test, perform\nand/or display publicly, prepare derivative works, distribute, and\notherwise use the Software alone or in any derivative version,\nprovided, however, that the BeOpen Python License is retained in the\nSoftware, alone or in any derivative version prepared by Licensee.\n\n3. BeOpen is making the Software available to Licensee on an \"AS IS\"\nbasis.  BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE\nSOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS\nAS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY\nDERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n5. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n6. This License Agreement shall be governed by and interpreted in all\nrespects by the law of the State of California, excluding conflict of\nlaw provisions.  Nothing in this License Agreement shall be deemed to\ncreate any relationship of agency, partnership, or joint venture\nbetween BeOpen and Licensee.  This License Agreement does not grant\npermission to use BeOpen trademarks or trade names in a trademark\nsense to endorse or promote products or services of Licensee, or any\nthird party.  As an exception, the \"BeOpen Python\" logos available at\nhttp://www.pythonlabs.com/logos.html may be used according to the\npermissions granted on that web page.\n\n7. By copying, installing or otherwise using the software, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nCNRI LICENSE AGREEMENT FOR PYTHON 1.6.1\n---------------------------------------\n\n1. This LICENSE AGREEMENT is between the Corporation for National\nResearch Initiatives, having an office at 1895 Preston White Drive,\nReston, VA 20191 (\"CNRI\"), and the Individual or Organization\n(\"Licensee\") accessing and otherwise using Python 1.6.1 software in\nsource or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, CNRI\nhereby grants Licensee a nonexclusive, royalty-free, world-wide\nlicense to reproduce, analyze, test, perform and/or display publicly,\nprepare derivative works, distribute, and otherwise use Python 1.6.1\nalone or in any derivative version, provided, however, that CNRI's\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c)\n1995-2001 Corporation for National Research Initiatives; All Rights\nReserved\" are retained in Python 1.6.1 alone or in any derivative\nversion prepared by Licensee.  Alternately, in lieu of CNRI's License\nAgreement, Licensee may substitute the following text (omitting the\nquotes): \"Python 1.6.1 is made available subject to the terms and\nconditions in CNRI's License Agreement.  This Agreement together with\nPython 1.6.1 may be located on the Internet using the following\nunique, persistent identifier (known as a handle): 1895.22/1013.  This\nAgreement may also be obtained from a proxy server on the Internet\nusing the following URL: http://hdl.handle.net/1895.22/1013\".\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python 1.6.1 or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python 1.6.1.\n\n4. CNRI is making Python 1.6.1 available to Licensee on an \"AS IS\"\nbasis.  CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\n1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. This License Agreement shall be governed by the federal\nintellectual property law of the United States, including without\nlimitation the federal copyright law, and, to the extent such\nU.S. federal law does not apply, by the law of the Commonwealth of\nVirginia, excluding Virginia's conflict of law provisions.\nNotwithstanding the foregoing, with regard to derivative works based\non Python 1.6.1 that incorporate non-separable material that was\npreviously distributed under the GNU General Public License (GPL), the\nlaw of the Commonwealth of Virginia shall govern this License\nAgreement only as to issues arising under or with respect to\nParagraphs 4, 5, and 7 of this License Agreement.  Nothing in this\nLicense Agreement shall be deemed to create any relationship of\nagency, partnership, or joint venture between CNRI and Licensee.  This\nLicense Agreement does not grant permission to use CNRI trademarks or\ntrade name in a trademark sense to endorse or promote products or\nservices of Licensee, or any third party.\n\n8. By clicking on the \"ACCEPT\" button where indicated, or by copying,\ninstalling or otherwise using Python 1.6.1, Licensee agrees to be\nbound by the terms and conditions of this License Agreement.\n\n        ACCEPT\n\n\nCWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2\n--------------------------------------------------\n\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,\nThe Netherlands.  All rights reserved.\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted,\nprovided that the above copyright notice appear in all copies and that\nboth that copyright notice and this permission notice appear in\nsupporting documentation, and that the name of Stichting Mathematisch\nCentrum or CWI not be used in advertising or publicity pertaining to\ndistribution of the software without specific, written prior\npermission.\n\nSTICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO\nTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE\nFOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\nOF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION\n----------------------------------------------------------------------\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n\nSoftware: zlib 1.2.11\nCopyright notice:\nCopyright (C) 1995-2011, 2016 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1998,1999,2000 by Jacques Nomssi Nzali.\necho 'pragma comment(copyright, \"Copyright (C) 1995-2017 Jean-Loup Gailly, Mark Adler. OS/400 version by P. Monnerat.\")' >> os400.c makemodule     OS400           os400.c LINK=                            No need to rebuild service program yet.\nCopyright (C) 2007-2008 Even Rouault\nCopyright (C) 2003 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1995-2003 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 2009-2010 Mathias Svensson ( http:result42.com )\nCopyright (C) 1995-2003 by Jean-loup Gailly.\nCopyright (C) 1998-2005 Gilles Vollant\n© Copyright Henrik Ravn 2004\nCopyright (C) 2003, 2005, 2008, 2010, 2012 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h Version 1.7  12 August 2012  Mark Adler /\nCopyright (C) 1995-1998 Jean-loup Gailly.\nCopyright (C) 1995-2003, 2010 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (c) 2004, 2005 Mark Adler.\nCopyright (C) 1995-2005, 2010 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1998 - 2010 Gilles Vollant, Even Rouault, Mathias Svensson\n--              Copyright (C) 2002-2004 Dmitriy Anisimkov                   --\n\" inflate9 1.2.11 Copyright 1995-2017 Mark Adler \";\nCopyright (C) 2004, 2008, 2012, 2016 Mark Adler, all rights reserved For conditions of distribution and use, see copyright notice in gzlog.h version 2.2, 14 Aug 2012\nVALUE \"LegalCopyright\", \"(C) 1995-2017 Jean-loup Gailly & Mark Adler\\0\"\n;  Copyright (C) 1995-2003 Mark Adler ;  For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1995-2008 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 2003 by Cosmin Truta.\n[assembly: AssemblyCopyright(\"(c) 2004 by Henrik Ravn\")]\n\" unzip 1.01 Copyright 1998-2004 Gilles Vollant - http:www.winimage.com/zLibDll\";\nCopyright (C) 1995-2006, 2011, 2016 Jean-loup Gailly For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1995-2017 Jean-loup Gailly and Mark Adler .LP This software is provided 'as-is', without any express or implied warranty.  In no event will the authors be held liable for any damages arising from the use of this software.\nCopyright (c) 1997 Christian Michelsen Research AS Advanced Computing Fantoftvegen 38, 5036 BERGEN, Norway\nCopyright (C) 2004, 2010 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\n\"gzappend 1.2 (11 Oct 2012) Copyright (C) 2003, 2012 Mark Adler\\n\"\nCopyright (C) 1995-2003 Jean-loup Gailly and Mark Adler.\nCopyright (C) 2002-2013 Mark Adler, all rights reserved version 2.3, 21 Jan 2013\nCopyright (C) 1998-2010 Gilles Vollant (minizip) ( http:www.winimage.com/zLibDll/minizip.html )\nCopyright (C) 2004, 2008, 2012 Mark Adler, all rights reserved version 2.2, 14 Aug 2012\nfprintf(stderr, \"Copyright (C) 2003-2010 Mark Adler\\n\");\n\" inflate 1.2.11 Copyright 1995-2017 Mark Adler \";\n; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant.\nCopyright (C) 1995-2006, 2010, 2011, 2016 Jean-loup Gailly For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1998, 2007 Brian Raiter <breadbox@muppetlabs.com>\nVALUE \"LegalCopyright\", \"(C) 1995-2017 Jean-loup Gailly & Mark Adler\\0\"\nCopyright (C) 2004, 2005, 2012 Mark Adler, all rights reserved version 1.2, 14 Aug 2012\nCopyright (C) 1995-2017 Jean-loup Gailly, Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nconst char zipcopyright[] =\" zip 1.01 Copyright 1998-2004 Gilles Vollant - http:www.winimage.com/zLibDll\";\nCopyright (C) 1998 by Bob Dellaca.\n;;; Copyright (C) 1998 Brian Raiter <breadbox@muppetlabs.com>\nCopyright (C) 1995-2017 Jean-loup Gailly and Mark Adler\nCopyright (C) 1995-2017 Jean-loup Gailly and Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1995-2017 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 2005, 2012 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h Version 1.1  29 Sep 2012  Mark Adler /\nCopyright (C) 1995-2003, 2010, 2014, 2016 Jean-loup Gailly, Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1995-2017 Jean-loup Gailly detectdatatype() function provided freely by Cosmin Truta, 2006 For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1998 by Jacques Nomssi Nzali.\nMiniZip - Copyright (c) 1998-2010 - by Gilles Vollant - version 1.1 64 bits from Mathias Svensson\nCopyright (c) Henrik Ravn 2004\nCopyright (C) 1995-2005, 2014, 2016 Jean-loup Gailly, Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 2004-2017 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 1995-2003 Jean-loup Gailly.\nCopyright (c) 1996 L. Peter Deutsch\nCopyright (C) 2003 Chris Anderson <christop@charm.net>\nCopyright (C) 1995-2017 Jean-loup Gailly For conditions of distribution and use, see copyright notice in zlib.h\n\" deflate 1.2.11 Copyright 1995-2017 Jean-loup Gailly and Mark Adler \";\n; Copyright (C) 1995-1996 Jean-loup Gailly, Brian Raiter and Gilles Vollant.\nCopyright (C) 2003 Cosmin Truta.\nCopyright (C) 2003, 2012 Mark Adler, all rights reserved version 1.2, 11 Oct 2012\nCopyright (c) 1996 L. Peter Deutsch and Jean-Loup Gailly\n--  Copyright (C) 2002-2004 Dmitriy Anisimkov                 --\nCopyright (C) 1998 by Andreas R. Kleinert\nCopyright (C) 1995-2016 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 2011, 2016 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\n--  Copyright (C) 2002-2003 Dmitriy Anisimkov                 --\n(C) 1995-2017 Jean-loup Gailly and Mark Adler\nCopyright (C) 2003, 2012, 2013 Mark Adler version 1.3, 24 Aug 2013\nCopyright (C) 2002-2013 Mark Adler For conditions of distribution and use, see copyright notice in puff.h version 2.3, 21 Jan 2013\nCopyright (C) 1995-2016 Jean-loup Gailly For conditions of distribution and use, see copyright notice in zlib.h\n<i>Copyright (c) 2004, 2005 by Mark Adler<br>Last modified 11 December 2005</i>\n;  Copyright (C) 2003 Chris Anderson <christop@charm.net>\nCopyright (C) 1995-2016 Jean-loup Gailly, Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\n{       Copyright (c) 1997,99 Borland Corporation       }\nCopyright (C) 1995-2006, 2010, 2011, 2012, 2016 Mark Adler For conditions of distribution and use, see copyright notice in zlib.h\nCopyright (C) 2003, 2012, 2013 Mark Adler For conditions of distribution and use, see copyright notice in blast.h version 1.3, 24 Aug 2013\nCopyright (C) 2007, 2008, 2012 Mark Adler Version 1.4  18 August 2012  Mark Adler\nCopyright (c) 1990-2000 Info-ZIP.  All rights reserved.\n\n Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler\n\n  This software is provided 'as-is', without any express or implied\n  warranty.  In no event will the authors be held liable for any damages\n  arising from the use of this software.\n\n  Permission is granted to anyone to use this software for any purpose,\n  including commercial applications, and to alter it and redistribute it\n  freely, subject to the following restrictions:\n\n  1. The origin of this software must not be misrepresented; you must not\n     claim that you wrote the original software. If you use this software\n     in a product, an acknowledgment in the product documentation would be\n     appreciated but is not required.\n  2. Altered source versions must be plainly marked as such, and must not be\n     misrepresented as being the original software.\n  3. This notice may not be removed or altered from any source distribution.\n\nSoftware: openssl 1.1.0\nCopyright notice:\nCopyright 2000-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright (C) 1989, 1991 Free Software Foundation, Inc.\nCopyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)\nCopyright (C) 2006, Network Resonance, Inc. Copyright (C) 2011, RTFM, Inc.\nCopyright (C) 2017 National Security Research Institute. All Rights Reserved.\nCopyright (c) 1995-1998 Eric A. Young, Tim J. Hudson\nCopyright (c) 1998-2019 The OpenSSL Project.\nCopyright (c) 1998-2021 The OpenSSL Project\nCopyright (c) 2002 The OpenTSA Project.\nCopyright (c) 2002, Oracle and/or its affiliates. All rights reserved\nCopyright (c) 2004 Kungliga Tekniska Högskolan (Royal Institute of Technology, Stockholm, Sweden).\nCopyright (c) 2004, 2018, Richard Levitte richard@levitte.org\nCopyright (c) 2004, EdelKey Project. All Rights Reserved.\nCopyright (c) 2004, Richard Levitte richard@levitte.org\nCopyright (c) 2007 KISA(Korea Information Security Agency).\nCopyright (c) 2008 Andy Polyakov appro@openssl.org\nCopyright (c) 2012, Intel Corporation. All Rights Reserved.\nCopyright (c) 2012-2014 Daniel J. Bernstein\nCopyright (c) 2012-2016 Jean-Philippe Aumasson\nCopyright (c) 2013-2014 Timo Teräs timo.teras@gmail.com\nCopyright (c) 2014, Intel Corporation. All Rights Reserved.\nCopyright (c) 2015 CloudFlare, Inc.\nCopyright (c) 2015, CloudFlare, Inc.\nCopyright (c) 2016 Viktor Dukhovni openssl-users@dukhovni.org.\nCopyright (c) 2017 National Security Research Institute.\nCopyright (c) 2017, Oracle and/or its affiliates.\nCopyright (c) 2018, Oracle and/or its affiliates.\nCopyright 1995-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1995-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1995-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1998-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1998-2001 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1998-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1998-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1998-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1998-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1998-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1998-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1999-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1999-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1999-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2000-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2000-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2000-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2000-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2000-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2000-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2001-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2001-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2001-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2001-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2001-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2002-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2002-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2002-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2002-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2003-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2003-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2003-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2003-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2003-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2004-2014, Akamai Technologies. All Rights Reserved.\nCopyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2004-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2004-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2004-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2004-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2004-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2005 Nokia.\nCopyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2005-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2005-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2005-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2005-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2005-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2006 NTT (Nippon Telegraph and Telephone Corporation) .\nCopyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2006-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2006-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2006-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2006-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2006-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2007-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2007-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2008-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2008-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2008-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2008-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2009-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2009-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2009-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2009-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2010-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2010-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2010-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2011 Google Inc.\nCopyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2011-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2011-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2011-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2011-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2011-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2012, Samuel Neves sneves@dei.uc.pt\nCopyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2012-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2012-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2012-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2012-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2013 M. J. Dominus.\nCopyright 2013 Mark Jason Dominus\nCopyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2013-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2013-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2013-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2013-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2014 Cryptography Research, Inc.\nCopyright 2014-2016 Cryptography Research, Inc.\nCopyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2014-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2014-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2014-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2015 Cryptography Research, Inc.\nCopyright 2015-2016 Cryptography Research, Inc.\nCopyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2015-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2015-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2015-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2016 Cryptography Research, Inc.\nCopyright 2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2016 VMS Software, Inc. All Rights Reserved.\nCopyright 2016-2016 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2016-2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2016-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2016-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2017 BaishanCloud.\nCopyright 2017 Ribose Inc. All Rights Reserved.\nCopyright 2017 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2017 Ribose Inc.. All Rights Reserved.\nCopyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2017-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2017-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2017-2021 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2018 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2018-2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2018-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2019 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2019-2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 2020 The OpenSSL Project Authors. All Rights Reserved.\nCopyright 20xx-20yy The OpenSSL Project Authors. All Rights Reserved.\nCopyright Patrick Powell 1995 This code is based on code written by Patrick Powell papowell@astart.com\nCopyright 1998-2021 The OpenSSL Authors. All rights reserved.\n\nLicense: Apache License 2.0\nPlease see above.\n"
  },
  {
    "path": "build.sh",
    "content": "#!/bin/bash\nset -e\nPROJECTPATH=$(cd \"$(dirname $0)\"; pwd)\nexport BUILD_PATH=\"${PROJECTPATH}/build/\"\n\n# print usage message\nusage()\n{\n  echo \"Usage:\"\n  echo \"    bash build.sh [-j[n]] [-d] [-S on|off] \"\n  echo \"    bash build.sh -p {mindspore_shared_lib}] [-j[n]] [-d] [-S on|off] \"\n  echo \"    bash build.sh -e gpu|ascend [-V 9.2|10.1|310|910] [-j[n]] [-d] [-S on|off] \"\n  echo \"    bash build.sh -t on [-j[n]] [-d] [-S on|off] \"\n  echo \"\"\n  echo \"Options:\"\n  echo \"    -p {mindspore_shared_lib}, Use header files related to MindSpore(libmindspore.so) or Lite lib(libmindspore-lite.so)\"\n  echo \"    -e gpu|ascend, build MindSpore gpu or ascend whl package meanwhile\"\n  echo \"    -V Specify the device version, if -e gpu, default CUDA 10.1, if -e ascend, default Ascend 910\"\n  echo \"    -j[n] Set the threads when building (Default: -j8)\"\n  echo \"    -d Debug model\"\n  echo \"    -t Build testcases.\"\n  echo \"    -S Enable enable download cmake compile dependency from gitee instead of github, default off\"\n}\n\n# check value of input is 'on' or 'off'\n# usage: check_on_off arg_value arg_name\ncheck_on_off()\n{\n  if [[ \"X$1\" != \"Xon\" && \"X$1\" != \"Xoff\" ]]; then\n    echo \"Invalid value $1 for option -$2\"\n    usage\n    exit 1\n  fi\n}\n\n# check and set options\ncheckopts()\n{\n  # Init default values of build options\n  THREAD_NUM=8\n  VERBOSE=\"\"\n  DEBUG_MODE=\"off\"\n  ENABLE_COVERAGE=\"off\"\n  ENABLE_ASAN=\"off\"\n  ENABLE_PYTHON=\"on\"\n  MS_WHL_LIB_PATH=\"\"\n  MS_BACKEND=\"\"\n  MS_BACKEND_HEADER=\"on\"\n  MS_VERSION=\"\"\n  RUN_TESTCASES=\"off\"\n  ENABLE_GITEE=\"off\"\n\n  # Process the options\n  while getopts 'dvc:j:a:p:e:V:t:S:' opt\n  do\n    LOW_OPTARG=$(echo ${OPTARG} | tr '[A-Z]' '[a-z]')\n\n    case \"${opt}\" in\n      e)\n        echo \"user opt: -e\"${LOW_OPTARG}\n        if [[ \"$OPTARG\" != \"\" ]]; then\n          MS_BACKEND=$OPTARG\n        fi\n        ;;\n      V)\n        echo \"user opt: -V\"${LOW_OPTARG}\n        if [[ \"$OPTARG\" != \"\" ]]; then\n          MS_VERSION=$OPTARG\n        fi\n        ;;\n      p)\n        if [[ \"$OPTARG\"  != \"\" ]]; then\n          MS_WHL_LIB_PATH=$OPTARG\n          MS_BACKEND_HEADER=\"off\"\n        else\n          echo \"Invalid value ${LOW_OPTARG} for option -p\"\n          usage\n          exit 1\n        fi\n        ;;\n      d)\n        echo \"user opt: -d\"${LOW_OPTARG}\n        DEBUG_MODE=\"on\"\n        ;;\n      j)\n        echo \"user opt: -j\"${LOW_OPTARG}\n        THREAD_NUM=$OPTARG\n        ;;\n      v)\n        echo \"user opt: -v\"${LOW_OPTARG}\n        VERBOSE=\"VERBOSE=1\"\n        ;;\n      c)\n        check_on_off $OPTARG c\n        ENABLE_COVERAGE=\"$OPTARG\"\n        ;;\n      a)\n        check_on_off $OPTARG a\n        ENABLE_ASAN=\"$OPTARG\"\n        ;;\n      t)\n        echo \"user opt: -t\"${LOW_OPTARG}\n        RUN_TESTCASES=\"$OPTARG\"\n        MS_BACKEND_HEADER=\"off\"\n        ;;\n      S)\n        check_on_off $OPTARG S\n        ENABLE_GITEE=\"$OPTARG\"\n        echo \"enable download from gitee\"\n        ;;\n      *)\n        echo \"Unknown option ${opt}!\"\n        usage\n        exit 1\n    esac\n  done\n}\n\ncheckopts \"$@\"\necho \"---------------- MindSpore Serving: build start ----------------\"\nmkdir -pv \"${BUILD_PATH}/package/mindspore_serving/lib\"\nif [[ \"$MS_BACKEND_HEADER\" != \"off\" ]]; then\n  git submodule update --init third_party/mindspore\nfi\n\n# Create building path\nbuild_mindspore_serving()\n{\n  echo \"start build mindspore_serving project.\"\n  mkdir -pv \"${BUILD_PATH}/mindspore_serving\"\n  cd \"${BUILD_PATH}/mindspore_serving\"\n  CMAKE_ARGS=\"-DDEBUG_MODE=$DEBUG_MODE -DBUILD_PATH=$BUILD_PATH\"\n  CMAKE_ARGS=\"${CMAKE_ARGS} -DENABLE_PYTHON=${ENABLE_PYTHON}\"\n  CMAKE_ARGS=\"${CMAKE_ARGS} -DTHREAD_NUM=${THREAD_NUM}\"\n  if [[ \"X$ENABLE_COVERAGE\" = \"Xon\" ]]; then\n    CMAKE_ARGS=\"${CMAKE_ARGS} -DENABLE_COVERAGE=ON\"\n  fi\n  if [[ \"X$ENABLE_ASAN\" = \"Xon\" ]]; then\n      CMAKE_ARGS=\"${CMAKE_ARGS} -DENABLE_ASAN=ON\"\n  fi\n  if [[ \"$MS_BACKEND\" != \"\" ]]; then\n    CMAKE_ARGS=\"${CMAKE_ARGS} -DMS_BACKEND=${MS_BACKEND}\"\n  fi\n  if [[ \"$MS_WHL_LIB_PATH\" != \"\" ]]; then\n    CMAKE_ARGS=\"${CMAKE_ARGS} -DMS_WHL_LIB_PATH=${MS_WHL_LIB_PATH}\"\n  fi\n  if [[ \"$MS_BACKEND_HEADER\" != \"off\" ]]; then\n    CMAKE_ARGS=\"${CMAKE_ARGS} -DMS_BACKEND_HEADER=${MS_BACKEND_HEADER}\"\n  fi\n  if [[ \"$MS_VERSION\" != \"\" ]]; then\n    CMAKE_ARGS=\"${CMAKE_ARGS} -DMS_VERSION=${MS_VERSION}\"\n  fi\n  if [[ \"X$RUN_TESTCASES\" = \"Xon\" ]]; then\n    CMAKE_ARGS=\"${CMAKE_ARGS} -DENABLE_TESTCASES=ON\"\n  fi\n  if [[ \"X$ENABLE_GITEE\" = \"Xon\" ]]; then\n    CMAKE_ARGS=\"${CMAKE_ARGS} -DENABLE_GITEE=ON\"\n  fi\n  echo \"${CMAKE_ARGS}\"\n  cmake ${CMAKE_ARGS} ../..\n  if [[ -n \"$VERBOSE\" ]]; then\n    CMAKE_VERBOSE=\"--verbose\"\n  fi\n  cmake --build . --target package ${CMAKE_VERBOSE} -j$THREAD_NUM\n  echo \"success building mindspore_serving project!\"\n}\n\nbuild_mindspore_serving\n\necho \"---------------- mindspore_serving: build end   ----------------\"\n"
  },
  {
    "path": "cmake/check_requirements.cmake",
    "content": "## define customized find functions, print customized error messages\nfunction(find_required_package pkg_name)\n    find_package(${pkg_name})\n    if(NOT ${pkg_name}_FOUND)\n        message(FATAL_ERROR \"Required package ${pkg_name} not found, please install the package and try\"\n                \" building mindspore_serving again.\")\n    endif()\nendfunction()\n\n## find python, quit if the found python is static\nset(Python3_USE_STATIC_LIBS FALSE)\nset(Python3_FIND_VIRTUALENV ONLY)\nfind_package(Python3 COMPONENTS Interpreter Development)\nif(Python3_FOUND)\n    message(\"Python3 found, version: ${Python3_VERSION}\")\n    message(\"Python3 library path: ${Python3_LIBRARY}\")\n    message(\"Python3 interpreter: ${Python3_EXECUTABLE}\")\nelseif(Python3_LIBRARY AND Python3_EXECUTABLE AND\n        ${Python3_VERSION} VERSION_GREATER_EQUAL \"3.7.0\" AND ${Python3_VERSION} VERSION_LESS \"3.10.0\")\n    message(WARNING \"Maybe python3 environment is broken.\")\n    message(\"Python3 library path: ${Python3_LIBRARY}\")\n    message(\"Python3 interpreter: ${Python3_EXECUTABLE}\")\nelse()\n    message(FATAL_ERROR \"Python3 not found, please install Python>=3.7.5, and set --enable-shared \"\n            \"if you are building Python locally\")\nendif()\n\n## packages used both on windows and linux\nif(DEFINED ENV{MS_PATCH_PATH})\n    find_program(Patch_EXECUTABLE patch PATHS $ENV{MS_PATCH_PATH})\n    set(Patch_FOUND ${Patch_EXECUTABLE})\nelse()\n    find_package(Patch)\nendif()\nif(NOT Patch_FOUND)\n    message(FATAL_ERROR \"Patch not found, please set environment variable MS_PATCH_PATH to path where Patch is located,\"\n            \" usually found in GIT_PATH/usr/bin on Windows\")\nendif()\nmessage(PATCH_EXECUTABLE = ${Patch_EXECUTABLE})\n\nfind_required_package(Threads)\n"
  },
  {
    "path": "cmake/dependency_ms.cmake",
    "content": "# Compile MindSpore\n\nmessage(STATUS \"**********begin to compile MindSpore**********\")\nset(MS_SOURCE_DIR ${CMAKE_SOURCE_DIR}/third_party/mindspore)\nmessage(STATUS \"MindSpore dir: ${MS_SOURCE_DIR}\")\nmessage(STATUS \"MindSpore compile method: -e${MS_BACKEND}\")\nmessage(STATUS \"MindSpore compile thread num: -j${THREAD_NUM}\")\nmessage(STATUS \"MindSpore version: -V${MS_VERSION}\")\n\nif(MS_VERSION)\n    set(MS_VERSION_OPTION -V${MS_VERSION})\nendif()\n\nset(EXEC_COMMAND bash ${MS_SOURCE_DIR}/build.sh -e${MS_BACKEND} ${MS_VERSION_OPTION} -j${THREAD_NUM})\nexecute_process(\n        COMMAND ${EXEC_COMMAND}\n        WORKING_DIRECTORY ${MS_SOURCE_DIR}\n        RESULT_VARIABLE RESULT\n)\nif(NOT RESULT EQUAL \"0\")\n    message(FATAL_ERROR \"error! when ${EXEC_COMMAND} in ${MS_SOURCE_DIR}\")\nendif()\n\nmessage(STATUS \"**********end to compile MindSpore**********\")"
  },
  {
    "path": "cmake/dependency_securec.cmake",
    "content": "# securec library\n#\n#\n# SECUREC_LIBRARY\n#\n\nif(NOT TARGET securec)\n    set(_ms_tmp_CMAKE_POSITION_INDEPENDENT_CODE ${CMAKE_POSITION_INDEPENDENT_CODE})\n    set(_ms_tmp_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})\n\n    set(CMAKE_C_FLAGS \"${SECURE_CXX_FLAGS}\")\n    if(CMAKE_SYSTEM_NAME MATCHES \"Windows\")\n        add_compile_definitions(SECUREC_ONLY_DECLARE_MEMSET)\n    endif()\n    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/securec ${CMAKE_BINARY_DIR}/securec)\n    set(CMAKE_POSITION_INDEPENDENT_CODE ${_ms_tmp_CMAKE_POSITION_INDEPENDENT_CODE})\n    set(CMAKE_C_FLAGS ${_ms_tmp_CMAKE_C_FLAGS})\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR}/../third_party/securec/include)\n\nset(SECUREC_LIBRARY securec)\n"
  },
  {
    "path": "cmake/dependency_utils.cmake",
    "content": "# MS Utils\n#\n\nfunction(find_python_package out_inc out_lib)\n    # Use PYTHON_EXECUTABLE if it is defined, otherwise default to python\n    if(\"${PYTHON_EXECUTABLE}\" STREQUAL \"\")\n        set(PYTHON_EXECUTABLE \"python3\")\n    else()\n        set(PYTHON_EXECUTABLE \"${PYTHON_EXECUTABLE}\")\n    endif()\n\n    execute_process(\n            COMMAND \"${PYTHON_EXECUTABLE}\" -c \"from distutils.sysconfig import get_python_inc; print(get_python_inc())\"\n            RESULT_VARIABLE result\n            OUTPUT_VARIABLE inc)\n    string(STRIP \"${inc}\" inc)\n    set(${out_inc} ${inc} PARENT_SCOPE)\n\n    execute_process(\n            COMMAND \"${PYTHON_EXECUTABLE}\" -c \"import distutils.sysconfig as sysconfig; import os; \\\n                    print(os.path.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')))\"\n            RESULT_VARIABLE result\n            OUTPUT_VARIABLE lib)\n    string(STRIP \"${lib}\" lib)\n    set(${out_lib} ${lib} PARENT_SCOPE)\nendfunction()\n"
  },
  {
    "path": "cmake/external_libs/absl.cmake",
    "content": "if(ENABLE_GITEE_EULER)\n    set(GIT_REPOSITORY \"https://gitee.com/src-openeuler/abseil-cpp.git\")\n    set(GIT_TAG \"openEuler-22.03-LTS\")\n    set(SHA256 \"365b1ecbbcd81b4c58101808a8a28a3cf9ad7f9d05c08080a35c0d4283a44afa\")\n    set(ABSL_SRC \"${CMAKE_SOURCE_DIR}/build/mindspore_serving/_deps/absl-src\")\n    __download_pkg_with_git(absl ${GIT_REPOSITORY} ${GIT_TAG} ${SHA256})\n    execute_process(COMMAND tar -xf ${ABSL_SRC}/abseil-cpp-20210324.2.tar.gz --strip-components 1 -C ${ABSL_SRC})\nelse()\n    if(ENABLE_GITEE)\n        set(REQ_URL \"https://gitee.com/mirrors/abseil-cpp/repository/archive/20210324.2.tar.gz\")\n        set(SHA256 \"59b862f50e710277f8ede96f083a5bb8d7c9595376146838b9580be90374ee1f\")\n    else()\n        set(REQ_URL \"https://github.com/abseil/abseil-cpp/archive/20210324.2.tar.gz\")\n        set(SHA256 \"59b862f50e710277f8ede96f083a5bb8d7c9595376146838b9580be90374ee1f\")\n    endif()\nendif()\n\nif(NOT ENABLE_GLIBCXX)\n    set(absl_CXXFLAGS \"${absl_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0\")\nendif()\n\nmindspore_add_pkg(absl\n        VER 20210324.2\n        LIBS absl_strings absl_throw_delegate absl_raw_logging_internal absl_int128 absl_bad_optional_access\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        CMAKE_OPTION\n        -DCMAKE_BUILD_TYPE:STRING=Release\n        -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=TRUE\n        -DCMAKE_CXX_STANDARD=11\n        )\n\ninclude_directories(${absl_INC})\n\nadd_library(mindspore_serving::absl_strings ALIAS absl::absl_strings)\nadd_library(mindspore_serving::absl_throw_delegate ALIAS absl::absl_throw_delegate)\nadd_library(mindspore_serving::absl_raw_logging_internal ALIAS absl::absl_raw_logging_internal)\nadd_library(mindspore_serving::absl_int128 ALIAS absl::absl_int128)\nadd_library(mindspore_serving::absl_bad_optional_access ALIAS absl::absl_bad_optional_access)\n"
  },
  {
    "path": "cmake/external_libs/c-ares.cmake",
    "content": "if(ENABLE_GITEE)\n    set(REQ_URL \"https://gitee.com/mirrors/c-ares/repository/archive/cares-1_15_0.tar.gz\")\n    set(SHA256 \"7deb7872cbd876c29036d5f37e30c4cbc3cc068d59d8b749ef85bb0736649f04\")\nelse()\n    set(REQ_URL \"https://github.com/c-ares/c-ares/releases/download/cares-1_15_0/c-ares-1.15.0.tar.gz\")\n    set(SHA256 \"6cdb97871f2930530c97deb7cf5c8fa4be5a0b02c7cea6e7c7667672a39d6852\")\nendif()\n\nmindspore_add_pkg(c-ares\n        VER 1.15.0\n        LIBS cares\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        CMAKE_OPTION -DCMAKE_BUILD_TYPE:STRING=Release\n        -DCARES_SHARED:BOOL=OFF\n        -DCARES_STATIC:BOOL=ON\n        -DCARES_STATIC_PIC:BOOL=ON\n        -DHAVE_LIBNSL:BOOL=OFF\n        PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/c-ares/CVE-2021-3672.patch)\n\ninclude_directories(${c-ares_INC})\nadd_library(mindspore_serving::cares ALIAS c-ares::cares)\n"
  },
  {
    "path": "cmake/external_libs/eigen.cmake",
    "content": "set(Eigen3_CXXFLAGS \"-D_FORTIFY_SOURCE=2 -O2\")\nset(Eigen3_CFLAGS \"-D_FORTIFY_SOURCE=2 -O2\")\n\n\nset(REQ_URL \"https://gitlab.com/libeigen/eigen/-/archive/3.4.0/eigen-3.4.0.tar.gz\")\nset(SHA256 \"8586084f71f9bde545ee7fa6d00288b264a2b7ac3607b974e54d13e7162c1c72\")\n\n\nmindspore_add_pkg(Eigen3\n        VER 3.4.0\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        CMAKE_OPTION -DBUILD_TESTING=OFF)\nfind_package(Eigen3 3.4.0 REQUIRED ${MS_FIND_NO_DEFAULT_PATH})\ninclude_directories(${Eigen3_INC})\ninclude_directories(${EIGEN3_INCLUDE_DIR})\nset_property(TARGET Eigen3::Eigen PROPERTY IMPORTED_GLOBAL TRUE)\nadd_library(mindspore_serving::eigen ALIAS Eigen3::Eigen)\n"
  },
  {
    "path": "cmake/external_libs/glog.cmake",
    "content": "set(glog_CXXFLAGS \"-D_FORTIFY_SOURCE=2 -O2 ${SECURE_CXX_FLAGS} -Dgoogle=mindspore_serving_private\")\nset(glog_CFLAGS \"-D_FORTIFY_SOURCE=2 -O2\")\nset(glog_patch ${CMAKE_SOURCE_DIR}/third_party/patch/glog/glog.patch001)\nset(glog_lib mindspore_serving_glog)\n\nif(NOT ENABLE_GLIBCXX)\n    set(glog_CXXFLAGS \"${glog_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0\")\nendif()\n\nif(ENABLE_GITEE OR ENABLE_GITEE_EULER) # Channel GITEE_EULER is NOT supported now, use GITEE instead.\n    set(REQ_URL \"https://gitee.com/mirrors/glog/repository/archive/v0.4.0.tar.gz\")\n    set(SHA256 \"e17cd4bb7c06951a12fc9db5130ec63a9f090b84340b8556fa0d530f73c6b634\")\nelse()\n    set(REQ_URL \"https://github.com/google/glog/archive/v0.4.0.tar.gz\")\n    set(SHA256 \"f28359aeba12f30d73d9e4711ef356dc842886968112162bc73002645139c39c\")\nendif()\n\nset(glog_option -DBUILD_TESTING=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_SHARED_LIBS=ON -DWITH_GFLAGS=OFF\n        -DCMAKE_BUILD_TYPE=Release)\n\nif(WIN32 AND NOT MSVC)\n    if(CMAKE_SIZEOF_VOID_P EQUAL 4)\n        set(glog_option ${glog_option} -DHAVE_DBGHELP=ON)\n    endif()\nendif()\n\nmindspore_add_pkg(glog\n        VER 0.4.0\n        LIBS ${glog_lib}\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        PATCHES ${glog_patch}\n        CMAKE_OPTION ${glog_option})\ninclude_directories(${glog_INC})\nadd_library(mindspore_serving::glog ALIAS glog::${glog_lib})\n"
  },
  {
    "path": "cmake/external_libs/grpc.cmake",
    "content": "set(grpc_USE_STATIC_LIBS OFF)\nif(${CMAKE_SYSTEM_NAME} MATCHES \"Darwin\")\n    set(grpc_CXXFLAGS \"-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2 \\\n        -Dgrpc=mindspore_serving_grpc -Dgrpc_impl=mindspore_serving_grpc_impl -Dgrpc_core=mindspore_serving_grpc_core\")\nelseif(${CMAKE_SYSTEM_NAME} MATCHES \"Windows\")\n    set(grpc_CXXFLAGS \"-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2\")\nelse()\n    set(grpc_CXXFLAGS \"-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2 \\\n        -Dgrpc=mindspore_serving_grpc -Dgrpc_impl=mindspore_serving_grpc_impl -Dgrpc_core=mindspore_serving_grpc_core\")\n    set(grpc_CFLAGS \"-fstack-protector-all -D_FORTIFY_SOURCE=2 -O2\")\n    if(NOT ENABLE_GLIBCXX)\n        set(grpc_CXXFLAGS \"${grpc_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0\")\n    endif()\nendif()\n\nif(NOT ${CMAKE_SYSTEM_NAME} MATCHES \"Darwin\")\n    set(grpc_LDFLAGS \"-Wl,-z,relro,-z,now,-z,noexecstack\")\nendif()\n\nif(EXISTS ${protobuf_ROOT}/lib64)\n    set(_FINDPACKAGE_PROTOBUF_CONFIG_DIR \"${protobuf_ROOT}/lib64/cmake/protobuf\")\nelse()\n    set(_FINDPACKAGE_PROTOBUF_CONFIG_DIR \"${protobuf_ROOT}/lib/cmake/protobuf\")\nendif()\nmessage(\"grpc using Protobuf_DIR : \" ${_FINDPACKAGE_PROTOBUF_CONFIG_DIR})\n\nif(EXISTS ${absl_ROOT}/lib64)\n    set(_FINDPACKAGE_ABSL_CONFIG_DIR \"${absl_ROOT}/lib64/cmake/absl\")\nelse()\n    set(_FINDPACKAGE_ABSL_CONFIG_DIR \"${absl_ROOT}/lib/cmake/absl\")\nendif()\nmessage(\"grpc using absl_DIR : \" ${_FINDPACKAGE_ABSL_CONFIG_DIR})\n\nif(EXISTS ${re2_ROOT}/lib64)\n    set(_FINDPACKAGE_RE2_CONFIG_DIR \"${re2_ROOT}/lib64/cmake/re2\")\nelse()\n    set(_FINDPACKAGE_RE2_CONFIG_DIR \"${re2_ROOT}/lib/cmake/re2\")\nendif()\nmessage(\"grpc using re2_DIR : \" ${_FINDPACKAGE_RE2_CONFIG_DIR})\n\nif(EXISTS ${openssl_ROOT})\n    set(_CMAKE_ARGS_OPENSSL_ROOT_DIR \"-DOPENSSL_ROOT_DIR:PATH=${openssl_ROOT}\")\nendif()\n\nif(ENABLE_GITEE)\n    set(REQ_URL \"https://gitee.com/mirrors/grpc/repository/archive/v1.36.1.tar.gz\")\n    set(SHA256 \"17a3ac19345a6aeda01b2baba5400e1136b02b44770dbdfe8581255a091aaf87\")\nelse()\n    set(REQ_URL \"https://github.com/grpc/grpc/archive/v1.36.1.tar.gz\")\n    set(SHA256 \"adf51558bf3d057a65651880c9814e09e77b61573eb950c2be1142a624d58e69\")\nendif()\n\nmindspore_add_pkg(grpc\n        VER 1.36.1\n        LIBS mindspore_serving_grpc++ mindspore_serving_grpc mindspore_serving_gpr mindspore_serving_upb\n        mindspore_serving_address_sorting\n        EXE grpc_cpp_plugin grpc_python_plugin\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/grpc/grpc.patch001\n        CMAKE_OPTION -DCMAKE_BUILD_TYPE:STRING=Release\n        -DBUILD_SHARED_LIBS=ON\n        -DgRPC_INSTALL:BOOL=ON\n        -DgRPC_BUILD_TESTS:BOOL=OFF\n        -DgRPC_PROTOBUF_PROVIDER:STRING=package\n        -DgRPC_PROTOBUF_PACKAGE_TYPE:STRING=CONFIG\n        -DProtobuf_DIR:PATH=${_FINDPACKAGE_PROTOBUF_CONFIG_DIR}\n        -DgRPC_ZLIB_PROVIDER:STRING=package\n        -DZLIB_ROOT:PATH=${zlib_ROOT}\n        -DgRPC_ABSL_PROVIDER:STRING=package\n        -Dabsl_DIR:PATH=${_FINDPACKAGE_ABSL_CONFIG_DIR}\n        -DgRPC_CARES_PROVIDER:STRING=package\n        -Dc-ares_DIR:PATH=${c-ares_ROOT}/lib/cmake/c-ares\n        -DgRPC_SSL_PROVIDER:STRING=package\n        ${_CMAKE_ARGS_OPENSSL_ROOT_DIR}\n        -DgRPC_RE2_PROVIDER:STRING=package\n        -Dre2_DIR:PATH=${_FINDPACKAGE_RE2_CONFIG_DIR}\n        )\n\ninclude_directories(${grpc_INC})\n\nadd_library(mindspore_serving::grpc++ ALIAS grpc::mindspore_serving_grpc++)\n\n# link other grpc libs\ntarget_link_libraries(grpc::mindspore_serving_grpc++ INTERFACE grpc::mindspore_serving_grpc grpc::mindspore_serving_gpr\n  grpc::mindspore_serving_upb grpc::mindspore_serving_address_sorting)\n\n# modify mindspore macro define\nadd_compile_definitions(grpc=mindspore_serving_grpc)\nadd_compile_definitions(grpc_impl=mindspore_serving_grpc_impl)\nadd_compile_definitions(grpc_core=mindspore_serving_grpc_core)\n\nfunction(ms_grpc_generate c_var h_var)\n    if(NOT ARGN)\n        message(SEND_ERROR \"Error: ms_grpc_generate() called without any proto files\")\n        return()\n    endif()\n\n    set(${c_var})\n    set(${h_var})\n\n    foreach(proto_file_with_path ${ARGN})\n        message(proto_file_with_path: ${proto_file_with_path})\n        get_filename_component(proto_file_absolute \"${proto_file_with_path}\" ABSOLUTE)\n        message(proto_file_absolute: ${proto_file_absolute})\n        get_filename_component(file_dir ${proto_file_absolute} DIRECTORY)\n        get_filename_component(proto_I_DIR \"${file_dir}/../../\" ABSOLUTE)\n        get_filename_component(proto_file ${proto_file_absolute} NAME)\n        get_filename_component(proto_file_prefix ${proto_file_absolute} NAME_WE)\n        set(proto_file_relative \"mindspore_serving/proto/${proto_file}\")\n\n        set(protoc_output_prefix ${CMAKE_BINARY_DIR}/mindspore_serving/proto)\n        set(hw_proto_srcs \"${protoc_output_prefix}/${proto_file_prefix}.pb.cc\")\n        set(hw_proto_hdrs \"${protoc_output_prefix}/${proto_file_prefix}.pb.h\")\n        set(hw_grpc_srcs \"${protoc_output_prefix}/${proto_file_prefix}.grpc.pb.cc\")\n        set(hw_grpc_hdrs \"${protoc_output_prefix}/${proto_file_prefix}.grpc.pb.h\")\n        set(hw_py_pb2 \"${protoc_output_prefix}/${proto_file_prefix}_pb2.py\")\n        set(hw_py_pb2_grpc \"${protoc_output_prefix}/${proto_file_prefix}_pb2_grpc.py\")\n        add_custom_command(\n                OUTPUT ${hw_proto_srcs} ${hw_proto_hdrs} ${hw_grpc_srcs} ${hw_grpc_hdrs} ${hw_py_pb2} ${hw_py_pb2_grpc}\n                WORKING_DIRECTORY ${proto_I_DIR}\n                COMMAND $<TARGET_FILE:protobuf::protoc>\n                ARGS --grpc_out \"${CMAKE_BINARY_DIR}\"\n                --cpp_out \"${CMAKE_BINARY_DIR}\"\n                -I \"${proto_I_DIR}\"\n                --plugin=protoc-gen-grpc=$<TARGET_FILE:grpc::grpc_cpp_plugin>\n                \"${proto_file_relative}\"\n                COMMAND $<TARGET_FILE:protobuf::protoc>\n                ARGS --grpc_out \"${CMAKE_BINARY_DIR}\"\n                --python_out \"${CMAKE_BINARY_DIR}\"\n                -I \"${proto_I_DIR}\"\n                --plugin=protoc-gen-grpc=$<TARGET_FILE:grpc::grpc_python_plugin>\n                \"${proto_file_relative}\"\n                DEPENDS \"${proto_file_absolute}\")\n\n        list(APPEND ${c_var} ${hw_proto_srcs} ${hw_grpc_srcs})\n        list(APPEND ${h_var} ${hw_proto_hdrs} ${hw_grpc_hdrs})\n    endforeach()\n\n    set_source_files_properties(${${c_var}} ${${h_var}} PROPERTIES GENERATED TRUE)\n    set(${c_var} ${${c_var}} PARENT_SCOPE)\n    set(${h_var} ${${h_var}} PARENT_SCOPE)\nendfunction()\n"
  },
  {
    "path": "cmake/external_libs/gtest.cmake",
    "content": "set(gtest_CXXFLAGS \"-D_FORTIFY_SOURCE=2 -O2\")\nset(gtest_CFLAGS \"-D_FORTIFY_SOURCE=2 -O2\")\n\nset(CMAKE_OPTION\n        -DBUILD_TESTING=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_SHARED_LIBS=ON\n        -DCMAKE_MACOSX_RPATH=TRUE -Dgtest_disable_pthreads=ON)\n\nif(NOT ENABLE_GLIBCXX)\n    set(gtest_CXXFLAGS \"${gtest_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0\")\nendif()\n\nif(ENABLE_GITEE)\n    set(REQ_URL \"https://gitee.com/mirrors/googletest/repository/archive/release-1.8.1.tar.gz\")\n    set(SHA256 \"9bf1fe5182a604b4135edc1a425ae356c9ad15e9b23f9f12a02e80184c3a249c\")\nelse()\n    set(REQ_URL \"https://github.com/google/googletest/archive/release-1.8.1.tar.gz\")\n    set(SHA256 \"9bf1fe5182a604b4135edc1a425ae356c9ad15e9b23f9f12a02e80184c3a249c\")\nendif()\n\nmindspore_add_pkg(gtest\n        VER 1.8.1\n        LIBS gtest gmock\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        CMAKE_OPTION ${CMAKE_OPTION})\ninclude_directories(${gtest_INC})\nadd_library(mindspore_serving::gtest ALIAS gtest::gtest)\nadd_library(mindspore_serving::gmock ALIAS gtest::gmock)\nif(CMAKE_SYSTEM_NAME MATCHES \"Windows\")\n    file(COPY ${gtest_DIRPATH}/bin/libgtest${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION\n            ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)\n    file(COPY ${gtest_DIRPATH}/bin/libgtest_main${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION\n            ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)\n    file(COPY ${gtest_DIRPATH}/bin/libgmock_main${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION\n            ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)\n    file(COPY ${gtest_DIRPATH}/bin/libgmock${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION\n            ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)\nelse()\n    file(COPY ${gtest_LIBPATH}/libgtest${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION\n            ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)\n    file(COPY ${gtest_LIBPATH}/libgtest_main${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION\n            ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)\n    file(COPY ${gtest_LIBPATH}/libgmock${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION\n            ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)\n    file(COPY ${gtest_LIBPATH}/libgmock_main${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION\n            ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)\nendif()"
  },
  {
    "path": "cmake/external_libs/json.cmake",
    "content": "if(MSVC)\n    set(flatbuffers_CXXFLAGS \"${CMAKE_CXX_FLAGS}\")\n    set(flatbuffers_CFLAGS \"${CMAKE_CXX_FLAGS}\")\n    set(flatbuffers_LDFLAGS \"${CMAKE_SHARED_LINKER_FLAGS}\")\nelse()\n    set(nlohmann_json3101_CXXFLAGS \"-D_FORTIFY_SOURCE=2 -O2\")\n    set(nlohmann_json3101_CFLAGS \"-D_FORTIFY_SOURCE=2 -O2\")\nendif()\n\nif(ENABLE_GITEE OR ENABLE_GITEE_EULER) # Channel GITEE_EULER is NOT supported now, use GITEE instead.\n    set(REQ_URL \"https://gitee.com/mirrors/JSON-for-Modern-CPP/repository/archive/v3.10.1.zip\")\n    set(SHA256 \"5c7d0a0542431fef628f8dc4c34fd022fe8747ccb577012d58f38672d8747e0d\")\n    set(INCLUDE \"./include\")\nelse()\n\n    set(REQ_URL \"https://github.com/nlohmann/json/releases/download/v3.10.1/include.zip\")\n    set(SHA256 \"144268f7f85afb0f0fbea7c796723c849724c975f9108ffdadde9ecedaa5f0b1\")\n    set(INCLUDE \"./include\")\nendif()\n\nmindspore_add_pkg(nlohmann_json3101\n        VER 3.10.1\n        HEAD_ONLY ${INCLUDE}\n        URL ${REQ_URL}\n        SHA256 ${SHA256})\ninclude_directories(${nlohmann_json3101_INC})\nadd_library(mindspore_serving::json ALIAS nlohmann_json3101)\n"
  },
  {
    "path": "cmake/external_libs/libevent.cmake",
    "content": "set(openssl_USE_STATIC_LIBS ON)\nset(libevent_CFLAGS \"-fPIC -fvisibility=hidden -fstack-protector-all -D_FORTIFY_SOURCE=2 -O2\")\nif(NOT CMAKE_SYSTEM_NAME MATCHES \"Darwin\")\n    set(libevent_LDFLAGS \"-Wl,-z,now\")\nendif()\n\nif(NOT MINDSPORE_PROJECT_DIR)\n    set(MINDSPORE_PROJECT_DIR ${CMAKE_SOURCE_DIR})\nendif()\n\nif(ENABLE_GITEE OR ENABLE_GITEE_EULER) # Channel GITEE_EULER is NOT supported now, use GITEE instead.\n    set(REQ_URL \"https://gitee.com/mirrors/libevent/repository/archive/release-2.1.12-stable.tar.gz\")\n    set(SHA256 \"7180a979aaa7000e1264da484f712d403fcf7679b1e9212c4e3d09f5c93efc24\")\nelse()\n    set(REQ_URL\n        \"https://github.com/libevent/libevent/releases/download/release-2.1.12-stable/libevent-2.1.12-stable.tar.gz\")\n    set(SHA256 \"92e6de1be9ec176428fd2367677e61ceffc2ee1cb119035037a27d346b0403bb\")\nendif()\n\nmessage(\"libevent using openssl stub dir: \" ${openssl_ROOT})\n\nmindspore_add_pkg(libevent\n        VER 2.1.12\n        LIBS event event_pthreads event_core event_openssl\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        PATCHES ${MINDSPORE_PROJECT_DIR}/third_party/patch/libevent/libevent.patch001\n        CMAKE_OPTION -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_TESTING=OFF -DOPENSSL_ROOT_DIR:PATH=${openssl_ROOT}\n        -DEVENT__LIBRARY_TYPE:STRING=STATIC)\n\ninclude_directories(${libevent_INC})\n\nadd_library(mindspore_serving::event ALIAS libevent::event)\nadd_library(mindspore_serving::event_pthreads ALIAS libevent::event_pthreads)\nadd_library(mindspore_serving::event_core ALIAS libevent::event_core)\nadd_library(mindspore_serving::event_openssl ALIAS libevent::event_openssl)\n"
  },
  {
    "path": "cmake/external_libs/openssl.cmake",
    "content": "if(ENABLE_GITEE OR ENABLE_GITEE_EULER) # Channel GITEE_EULER is NOT supported now, use GITEE instead.\n    set(REQ_URL \"https://gitee.com/mirrors/openssl/repository/archive/OpenSSL_1_1_1k.tar.gz\")\n    set(SHA256 \"b92f9d3d12043c02860e5e602e50a73ed21a69947bcc74d391f41148e9f6aa95\")\nelse()\n    set(REQ_URL \"https://github.com/openssl/openssl/archive/refs/tags/OpenSSL_1_1_1k.tar.gz\")\n    set(SHA256 \"b92f9d3d12043c02860e5e602e50a73ed21a69947bcc74d391f41148e9f6aa95\")\nendif()\n\nset(OPENSSL_PATCH_ROOT ${CMAKE_SOURCE_DIR}/third_party/patch/openssl)\nif(${CMAKE_SYSTEM_NAME} MATCHES \"Linux\" OR APPLE)\n    set(openssl_CFLAGS -fvisibility=hidden)\n    mindspore_add_pkg(openssl\n            VER 1.1.1k\n            LIBS ssl crypto\n            URL ${REQ_URL}\n            SHA256 ${SHA256}\n            CONFIGURE_COMMAND ./config no-zlib no-shared\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2021-3711.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2021-3712.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2021-4160.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2022-0778.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2022-1292.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2022-2068.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2022-2097.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2022-4304.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2022-4450.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2023-0215.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2023-0286.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2023-0464.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2023-0465.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2023-0466.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2023-2650.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2023-3446.patch\n            PATCHES ${OPENSSL_PATCH_ROOT}/CVE-2023-4807.patch\n            )\n    include_directories(${openssl_INC})\n    add_library(mindspore_serving::ssl ALIAS openssl::ssl)\n    add_library(mindspore_serving::crypto ALIAS openssl::crypto)\nendif()"
  },
  {
    "path": "cmake/external_libs/protobuf.cmake",
    "content": "set(protobuf_USE_STATIC_LIBS ON)\n\nif(${CMAKE_SYSTEM_NAME} MATCHES \"Darwin\")\n    set(protobuf_CXXFLAGS \"-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC \\\n        -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2\")\nelseif(${CMAKE_SYSTEM_NAME} MATCHES \"Windows\")\n    set(protobuf_CXXFLAGS \"-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter \\\n        -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2\")\nelse()\n    set(protobuf_CXXFLAGS \"-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter \\\n        -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2\")\n    if(NOT ENABLE_GLIBCXX)\n        set(protobuf_CXXFLAGS \"${protobuf_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0\")\n    endif()\nendif()\nset(protobuf_LDFLAGS \"-Wl,-z,relro,-z,now,-z,noexecstack\")\n\nset(_ms_tmp_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})\nset(CMAKE_CXX_FLAGS ${_ms_tmp_CMAKE_CXX_FLAGS})\nstring(REPLACE \" -Wall\" \"\" CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")\nstring(REPLACE \" -Werror\" \"\" CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")\n\nif(ENABLE_GITEE OR ENABLE_GITEE_EULER) # Channel GITEE_EULER is NOT supported now, use GITEE instead.\n    set(REQ_URL \"https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz\")\n    set(SHA256 \"ab9b39e7053a6fb06b01bf75fb6ec6a71a1ada5a5f8e2446f927336e97b9e7bb\")\nelse()\n    set(REQ_URL \"https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz\")\n    set(SHA256 \"9b4ee22c250fe31b16f1a24d61467e40780a3fbb9b91c3b65be2a376ed913a1a\")\nendif()\n\nset(PROTOBUF_PATCH_ROOT ${CMAKE_SOURCE_DIR}/third_party/patch/protobuf)\n\nmindspore_add_pkg(protobuf\n        VER 3.13.0\n        LIBS protobuf\n        EXE protoc\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        CMAKE_PATH cmake/\n        CMAKE_OPTION -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release\n        PATCHES ${PROTOBUF_PATCH_ROOT}/CVE-2021-22570.patch\n        PATCHES ${PROTOBUF_PATCH_ROOT}/CVE-2022-1941.patch)\n\ninclude_directories(${protobuf_INC})\ninclude_directories(${CMAKE_BINARY_DIR}/proto_py)\nadd_library(mindspore_serving::protobuf ALIAS protobuf::protobuf)\nset(CMAKE_CXX_FLAGS  ${_ms_tmp_CMAKE_CXX_FLAGS})\n# recover original value\nif(MSVC)\n    set(CMAKE_STATIC_LIBRARY_PREFIX, ${_ms_tmp_CMAKE_STATIC_LIBRARY_PREFIX})\nendif()\n"
  },
  {
    "path": "cmake/external_libs/pybind11.cmake",
    "content": "set(PYTHON_VERSION ${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR})\n\nif(ENABLE_GITEE OR ENABLE_GITEE_EULER) # Channel GITEE_EULER is NOT supported now, use GITEE instead.\n    if(PYTHON_VERSION MATCHES \"3.9\")\n        set(REQ_URL \"https://gitee.com/mirrors/pybind11/repository/archive/v2.6.1.tar.gz\")\n        set(SHA256 \"c840509be94ac97216c3b4a3ed9f3fdba9948dbe38c16fcfaee3acc6dc93ed0e\")\n    elseif(PYTHON_VERSION MATCHES \"3.8\")\n        set(REQ_URL \"https://gitee.com/mirrors/pybind11/repository/archive/v2.6.1.tar.gz\")\n        set(SHA256 \"c840509be94ac97216c3b4a3ed9f3fdba9948dbe38c16fcfaee3acc6dc93ed0e\")\n    elseif(PYTHON_VERSION MATCHES \"3.7\")\n        set(REQ_URL \"https://gitee.com/mirrors/pybind11/repository/archive/v2.4.3.tar.gz\")\n        set(SHA256 \"182cf9e2c5a7ae6f03f84cf17e826d7aa2b02aa2f3705db684dfe686c0278b36\")\n    else()\n        message(\"Could not find 'Python 3.8' or 'Python 3.7' or 'Python 3.9'\")\n        return()\n    endif()\nelse()\n    if(PYTHON_VERSION MATCHES \"3.9\")\n        set(REQ_URL \"https://github.com/pybind/pybind11/archive/v2.6.1.tar.gz\")\n        set(SHA256 \"cdbe326d357f18b83d10322ba202d69f11b2f49e2d87ade0dc2be0c5c34f8e2a\")\n    elseif(PYTHON_VERSION MATCHES \"3.8\")\n        set(REQ_URL \"https://github.com/pybind/pybind11/archive/v2.6.1.tar.gz\")\n        set(SHA256 \"cdbe326d357f18b83d10322ba202d69f11b2f49e2d87ade0dc2be0c5c34f8e2a\")\n    elseif(PYTHON_VERSION MATCHES \"3.7\")\n        set(REQ_URL \"https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz\")\n        set(SHA256 \"1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d\")\n    else()\n        message(\"Could not find 'Python 3.8' or 'Python 3.7' or 'Python 3.9'\")\n        return()\n    endif()\nendif()\nset(pybind11_CXXFLAGS \"-D_FORTIFY_SOURCE=2 -O2\")\nset(pybind11_CFLAGS \"-D_FORTIFY_SOURCE=2 -O2\")\nset(pybind11_patch ${CMAKE_SOURCE_DIR}/third_party/patch/pybind11/pybind11.patch001)\n\nif(PYTHON_VERSION MATCHES \"3.9\")\n    mindspore_add_pkg(pybind11\n            VER 2.6.1\n            URL ${REQ_URL}\n            SHA256 ${SHA256}\n            PATCHES ${pybind11_patch}\n            CMAKE_OPTION -DPYBIND11_TEST=OFF -DPYBIND11_LTO_CXX_FLAGS=FALSE\n            )\nelseif(PYTHON_VERSION MATCHES \"3.8\")\n    mindspore_add_pkg(pybind11\n            VER 2.6.1\n            URL ${REQ_URL}\n            SHA256 ${SHA256}\n            CMAKE_OPTION -DPYBIND11_TEST=OFF -DPYBIND11_LTO_CXX_FLAGS=FALSE\n            )\nelse()\n    mindspore_add_pkg(pybind11\n            VER 2.4.3\n            URL ${REQ_URL}\n            SHA256 ${SHA256}\n            CMAKE_OPTION -DPYBIND11_TEST=OFF -DPYBIND11_LTO_CXX_FLAGS=FALSE\n            )\nendif()\n\ninclude_directories(${pybind11_INC})\nfind_package(pybind11 REQUIRED)\nset_property(TARGET pybind11::module PROPERTY IMPORTED_GLOBAL TRUE)\nadd_library(mindspore_serving::pybind11_module ALIAS pybind11::module)\n"
  },
  {
    "path": "cmake/external_libs/re2.cmake",
    "content": "if(ENABLE_GITEE)\n    set(REQ_URL \"https://gitee.com/mirrors/re2/repository/archive/2019-12-01.tar.gz\")\n    set(SHA256 \"7268e1b4254d9ffa5ccf010fee954150dbb788fd9705234442e7d9f0ee5a42d3\")\nelse()\n    set(REQ_URL \"https://github.com/google/re2/archive/2019-12-01.tar.gz\")\n    set(SHA256 \"7268e1b4254d9ffa5ccf010fee954150dbb788fd9705234442e7d9f0ee5a42d3\")\nendif()\n\nif(NOT ENABLE_GLIBCXX)\n    set(re2_CXXFLAGS \"${re2_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0\")\nendif()\n\nmindspore_add_pkg(re2\n        VER 20191201\n        LIBS re2\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        CMAKE_OPTION -DCMAKE_BUILD_TYPE:STRING=Release -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=TRUE)\n\ninclude_directories(${re2_INC})\nadd_library(mindspore_serving::re2 ALIAS re2::re2)"
  },
  {
    "path": "cmake/external_libs/zlib.cmake",
    "content": "if(ENABLE_GITEE)\n    set(REQ_URL \"https://gitee.com/mirrors/zlib/repository/archive/v1.2.11.tar.gz\")\n    set(SHA256 \"f21b3885cc7732f0ab93dbe06ff1ec58069bb58657b3fda89531d1562d8ad708\")\nelse()\n    set(REQ_URL \"https://github.com/madler/zlib/archive/v1.2.11.tar.gz\")\n    set(SHA256 \"629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff\")\nendif()\n\nmindspore_add_pkg(zlib\n        VER 1.2.11\n        LIBS z\n        URL ${REQ_URL}\n        SHA256 ${SHA256}\n        CMAKE_OPTION -DCMAKE_BUILD_TYPE:STRING=Release\n        PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/zlib/CVE-2018-25032.patch\n        PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/zlib/CVE-2022-37434.patch)\n\ninclude_directories(${zlib_INC})\nadd_library(mindspore_serving::z ALIAS zlib::z)\n"
  },
  {
    "path": "cmake/mind_expression.cmake",
    "content": "set(SECURE_CXX_FLAGS \"\")\nif(\"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"GNU\")\n    set(SECURE_CXX_FLAGS \"-fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack\")\nendif()\nset(_ms_tmp_CMAKE_CXX_FLAGS_F ${CMAKE_CXX_FLAGS})\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fvisibility=hidden\")\n\n# define third party library download function\ninclude(cmake/utils.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/eigen.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/dependency_securec.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/json.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/protobuf.cmake)\n\n# build dependencies of gRPC\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/absl.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/c-ares.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/zlib.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/openssl.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/re2.cmake)\n# build gRPC\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/grpc.cmake)\n# build event\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/libevent.cmake)\n\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/pybind11.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/gtest.cmake)\ninclude(${CMAKE_SOURCE_DIR}/cmake/external_libs/glog.cmake)\n\nset(CMAKE_CXX_FLAGS ${_ms_tmp_CMAKE_CXX_FLAGS_F})\n\nif(MS_BACKEND)\n    include(${CMAKE_SOURCE_DIR}/cmake/dependency_ms.cmake)\nendif()\n"
  },
  {
    "path": "cmake/options.cmake",
    "content": "option(DEBUG_MODE \"Debug mode, default off\" OFF)\noption(ENABLE_COVERAGE \"Enable code coverage report\" OFF)\noption(ENABLE_PYTHON \"Enable python\" ON)\noption(ENABLE_ASAN \"Enable Google Sanitizer to find memory bugs\")\noption(MS_WHL_LIB_PATH \"MindSpore lib path\")\noption(MS_BACKEND \"Compile MindSpore\")\noption(RUN_TESTCASES \"Compile UT\")\n\nif(MS_WHL_LIB_PATH)\n    message(\"MindSpore whl lib path:\" ${MS_WHL_LIB_PATH})\nelseif(MS_BACKEND)\n    message(\"MindSpore backend method:\" ${MS_BACKEND})\nelseif(MS_BACKEND_HEADER)\n    message(\"MindSpore backend method:\" ${MS_BACKEND_HEADER})\nelseif(RUN_TESTCASES)\n    message(\"MindSpore Serving Compile UT:\" ${RUN_TESTCASES})\nelseif()\n    message(FATAL_ERROR \"Please confirm how to use MindSpore.\")\nendif()\n\nif(CMAKE_CXX_COMPILER_ID STREQUAL \"GNU\" AND Linux)\n    set(OPTION_CXX_FLAGS \"${OPTION_CXX_FLAGS} -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack\")\nendif()\n\nif(ENABLE_COVERAGE)\n    set(COVERAGE_COMPILER_FLAGS \"-g --coverage -fprofile-arcs -ftest-coverage\")\n    set(OPTION_CXX_FLAGS \"${OPTION_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}\")\nendif()\n\nif(ENABLE_ASAN)\n    if(CMAKE_CXX_COMPILER_ID STREQUAL \"GNU\")\n        set(OPTION_CXX_FLAGS \"${OPTION_CXX_FLAGS} -fsanitize=address -fsanitize-recover=address \\\n                              -fno-omit-frame-pointer -fsanitize=undefined\")\n    else()\n        set(OPTION_CXX_FLAGS \"${OPTION_CXX_FLAGS} -fsanitize=address -fno-omit-frame-pointer -static-libsan \\\n                              -fsanitize=undefined\")\n    endif()\nendif()\n\nif(DEBUG_MODE)\n    set(CMAKE_BUILD_TYPE \"Debug\")\n    add_compile_definitions(MEM_REUSE_DEBUG)\nelse()\n    set(CMAKE_BUILD_TYPE \"Release\")\nendif()\n\nif((CMAKE_SYSTEM_PROCESSOR MATCHES \"aarch64\") OR (CMAKE_BUILD_TYPE STREQUAL Release))\n    set(PYBIND11_LTO_CXX_FLAGS FALSE)\nendif()\n\nif(NOT BUILD_PATH)\n    set(BUILD_PATH \"${CMAKE_SOURCE_DIR}/build\")\nendif()\n\nif(NOT CMAKE_SYSTEM_NAME MATCHES \"Windows\")\n    set(MS_BUILD_GRPC ON)\nendif()\n\nadd_compile_definitions(USE_GLOG)\n"
  },
  {
    "path": "cmake/package.cmake",
    "content": "# include dependency\ninclude(CMakePackageConfigHelpers)\ninclude(GNUInstallDirs)\n\n# set package information\nset(CPACK_PACKAGE_NAME ${PROJECT_NAME})\nset(CPACK_GENERATOR \"External\")\nset(CPACK_EXTERNAL_PACKAGE_SCRIPT ${CMAKE_SOURCE_DIR}/cmake/package_script.cmake)\nset(CPACK_EXTERNAL_ENABLE_STAGING true)\nset(CPACK_TEMPORARY_PACKAGE_FILE_NAME ${CMAKE_SOURCE_DIR}/build/package/mindspore_serving)\nset(CPACK_TEMPORARY_INSTALL_DIRECTORY ${CMAKE_SOURCE_DIR}/build/package/mindspore_serving)\n\nset(CPACK_MS_PACKAGE_NAME \"mindspore_serving\")\ninclude(CPack)\n\n# set install path\nset(INSTALL_LIB_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH \"Installation directory for libraries\")\nset(INSTALL_PY_DIR \".\")\nset(INSTALL_BASE_DIR \".\")\nset(INSTALL_LIB_DIR \"lib\")\n\n# grpc\ninstall(FILES ${grpc_LIBPATH}/libmindspore_serving_grpc++.so.1.36.1\n  DESTINATION ${INSTALL_LIB_DIR} RENAME libmindspore_serving_grpc++.so.1 COMPONENT mindspore_serving)\ninstall(FILES ${grpc_LIBPATH}/libmindspore_serving_grpc.so.15.0.0\n  DESTINATION ${INSTALL_LIB_DIR} RENAME libmindspore_serving_grpc.so.15 COMPONENT mindspore_serving)\ninstall(FILES ${grpc_LIBPATH}/libmindspore_serving_gpr.so.15.0.0\n  DESTINATION ${INSTALL_LIB_DIR} RENAME libmindspore_serving_gpr.so.15 COMPONENT mindspore_serving)\ninstall(FILES ${grpc_LIBPATH}/libmindspore_serving_upb.so.15.0.0\n  DESTINATION ${INSTALL_LIB_DIR} RENAME libmindspore_serving_upb.so.15 COMPONENT mindspore_serving)\ninstall(FILES ${grpc_LIBPATH}/libmindspore_serving_address_sorting.so.15.0.0\n  DESTINATION ${INSTALL_LIB_DIR} RENAME libmindspore_serving_address_sorting.so.15 COMPONENT mindspore_serving)\n\n# glog\ninstall(FILES ${glog_LIBPATH}/libmindspore_serving_glog.so.0.4.0\n        DESTINATION ${INSTALL_LIB_DIR} RENAME libmindspore_serving_glog.so.0 COMPONENT mindspore)\n\n# set python files\nfile(GLOB MS_PY_LIST ${CMAKE_SOURCE_DIR}/mindspore_serving/*.py)\ninstall(\n        FILES ${MS_PY_LIST}\n        DESTINATION ${INSTALL_PY_DIR}\n        COMPONENT mindspore_serving\n)\n\ninstall(\n        TARGETS _mindspore_serving\n        DESTINATION ${INSTALL_BASE_DIR}\n        COMPONENT mindspore_serving\n)\ninstall(\n        TARGETS serving_common\n        DESTINATION ${INSTALL_LIB_DIR}\n        COMPONENT mindspore_serving\n)\ninstall(\n        TARGETS serving_ascend\n        DESTINATION ${INSTALL_LIB_DIR}\n        COMPONENT mindspore_serving\n)\ninstall(\n        DIRECTORY\n        ${CMAKE_SOURCE_DIR}/mindspore_serving/server\n        ${CMAKE_SOURCE_DIR}/mindspore_serving/client\n        DESTINATION ${INSTALL_PY_DIR}\n        COMPONENT mindspore_serving\n)\ninstall(\n        FILES ${CMAKE_BINARY_DIR}/mindspore_serving/proto/ms_service_pb2.py\n        ${CMAKE_BINARY_DIR}/mindspore_serving/proto/ms_service_pb2_grpc.py\n        DESTINATION ${INSTALL_PY_DIR}/proto\n        COMPONENT mindspore_serving\n)\n"
  },
  {
    "path": "cmake/package_script.cmake",
    "content": "# find exec\nfind_package(Python3 3.7 COMPONENTS Interpreter)\nif(NOT Python3_FOUND)\n    message(FATAL_ERROR \"No python3 found.\")\nendif()\n\nset(PYTHON ${Python3_EXECUTABLE})\nset(PYTHON_VERSION ${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR})\n\nif(NOT (PYTHON_VERSION MATCHES \"3.7\" OR PYTHON_VERSION MATCHES \"3.8\" OR PYTHON_VERSION MATCHES \"3.9\"))\n    message(FATAL_ERROR \"FIND PYTHON VERSION ${PYTHON_VERSION} BUT CAN NOT MATCH PYTHON VERSION 3.7, 3.8 OR 3.9\")\nendif()\n\nfind_package(Git)\nif(NOT GIT_FOUND)\n    message(\"No git found.\")\n    return()\nendif()\nset(GIT ${GIT_EXECUTABLE})\n\n# set path\nset(MS_ROOT_DIR ${CPACK_PACKAGE_DIRECTORY}/../../)\nset(MS_PACK_ROOT_DIR ${MS_ROOT_DIR}/build/package)\n\n# set package file name\nif(CMAKE_SYSTEM_NAME MATCHES \"Linux\")\n    if(PYTHON_VERSION MATCHES \"3.7\")\n        set(PY_TAGS \"cp37-cp37m\")\n    elseif(PYTHON_VERSION MATCHES \"3.8\")\n        set(PY_TAGS \"cp38-cp38\")\n    elseif(PYTHON_VERSION MATCHES \"3.9\")\n        set(PY_TAGS \"cp39-cp39\")\n    else()\n        message(\"Could not find 'Python 3.7', 'Python 3.8' or 'Python 3.9'\")\n        return()\n    endif()\n    string(TOLOWER linux_${CMAKE_HOST_SYSTEM_PROCESSOR} PLATFORM_TAG)\nelseif(CMAKE_SYSTEM_NAME MATCHES \"Darwin\")\n    if(PYTHON_VERSION MATCHES \"3.7\")\n        set(PY_TAGS \"py37-none\")\n    elseif(PYTHON_VERSION MATCHES \"3.8\")\n        set(PY_TAGS \"py38-none\")\n    elseif(PYTHON_VERSION MATCHES \"3.9\")\n        set(PY_TAGS \"py39-none\")\n    else()\n        message(\"Could not find 'Python 3.7', 'Python 3.8' or 'Python 3.9'\")\n        return()\n    endif()\n    set(PLATFORM_TAG \"any\")\nelseif(CMAKE_SYSTEM_NAME MATCHES \"Windows\")\n    if(PYTHON_VERSION MATCHES \"3.7\")\n        set(PY_TAGS \"cp37-cp37m\")\n    elseif(PYTHON_VERSION MATCHES \"3.8\")\n        set(PY_TAGS \"cp38-cp38\")\n    elseif(PYTHON_VERSION MATCHES \"3.9\")\n        set(PY_TAGS \"cp39-cp39\")\n    else()\n        message(\"Could not find 'Python 3.7', 'Python 3.8' or 'Python 3.9'\")\n        return()\n    endif()\n    set(PLATFORM_TAG \"win_amd64\")\nelse()\n    message(FATAL_ERROR \"other platform: ${CMAKE_SYSTEM_NAME}\")\nendif()\n\n# get git commit id\nset(GIT_COMMIT_ID \"\")\nexecute_process(\n        COMMAND ${GIT} log --format='[sha1]:%h,[branch]:%d' --abbrev=8 -1\n        OUTPUT_VARIABLE GIT_COMMIT_ID\n        WORKING_DIRECTORY ${MS_ROOT_DIR}\n        ERROR_QUIET)\nstring(REPLACE \" \" \"\" GIT_COMMIT_ID ${GIT_COMMIT_ID})\n\nset(ENV{MS_PACKAGE_NAME} ${CPACK_MS_PACKAGE_NAME})\nset(ENV{COMMIT_ID} ${GIT_COMMIT_ID})\n\nexecute_process(\n        COMMAND ${PYTHON} ${MS_ROOT_DIR}/setup.py \"bdist_wheel\"\n        WORKING_DIRECTORY ${MS_PACK_ROOT_DIR}\n)\n\n# finally\nset(PACKAGE_NAME ${CPACK_MS_PACKAGE_NAME})\nif(NOT CMAKE_SYSTEM_NAME MATCHES \"Windows\")\n    string(REPLACE \"-\" \"_\" PACKAGE_NAME ${PACKAGE_NAME})\n    execute_process(\n            COMMAND chmod -R 700 ${MS_PACK_ROOT_DIR}/mindspore_serving/\n            COMMAND chmod -R 700 ${MS_PACK_ROOT_DIR}/${PACKAGE_NAME}.egg-info/\n    )\nendif()\n\nfile(GLOB WHL_FILE ${MS_PACK_ROOT_DIR}/dist/*.whl)\nget_filename_component(ORIGIN_FILE_NAME ${WHL_FILE} NAME)\nstring(REPLACE \"-\" \";\" ORIGIN_FILE_NAME ${ORIGIN_FILE_NAME})\nlist(GET ORIGIN_FILE_NAME 1 VERSION)\nset(NEW_FILE_NAME ${PACKAGE_NAME}-${VERSION}-${PY_TAGS}-${PLATFORM_TAG}.whl)\nfile(RENAME ${WHL_FILE} ${MS_PACK_ROOT_DIR}/${NEW_FILE_NAME})\nfile(REMOVE_RECURSE ${MS_ROOT_DIR}/output)\nfile(MAKE_DIRECTORY ${MS_ROOT_DIR}/output)\nfile(COPY ${MS_PACK_ROOT_DIR}/${NEW_FILE_NAME} DESTINATION ${MS_ROOT_DIR}/output/)\n\nfile(SHA256 ${MS_ROOT_DIR}/output/${NEW_FILE_NAME} SHA256_VAR)\nfile(WRITE ${MS_ROOT_DIR}/output/${NEW_FILE_NAME}.sha256 ${SHA256_VAR} \" \" ${NEW_FILE_NAME})\n"
  },
  {
    "path": "cmake/utils.cmake",
    "content": "include(FetchContent) # 下载第三方库\nset(FETCHCONTENT_QUIET OFF)\n\nfunction(mindspore_add_submodule_obj des_submodule_objs sub_dir submodule_name_obj)\n\n    add_subdirectory(${sub_dir})\n\n    if(NOT TARGET ${submodule_name_obj})\n        message(FATAL_ERROR \"Can not find submodule '${submodule_name_obj}'. in ${CMAKE_CURRENT_LIST_FILE}\")\n    endif()\n    if(\"$<TARGET_OBJECTS:${submodule_name_obj}>\" IN_LIST ${des_submodule_objs})\n        message(FATAL_ERROR \"submodule '${submodule_name_obj}' added more than once. in ${CMAKE_CURRENT_LIST_FILE}\")\n    endif()\n\n    set(${des_submodule_objs} ${${des_submodule_objs}} $<TARGET_OBJECTS:${submodule_name_obj}> PARENT_SCOPE)\n\nendfunction()\n\nif(DEFINED ENV{MSLIBS_CACHE_PATH})\n    set(_MS_LIB_CACHE $ENV{MSLIBS_CACHE_PATH})\nelse()\n    set(_MS_LIB_CACHE ${CMAKE_BINARY_DIR}/.mslib)\nendif()\nmessage(\"MS LIBS CACHE PATH:  ${_MS_LIB_CACHE}\")\n\nif(NOT EXISTS ${_MS_LIB_CACHE})\n    file(MAKE_DIRECTORY ${_MS_LIB_CACHE})\nendif()\n\nif(DEFINED ENV{MSLIBS_SERVER})  # export MSLIBS_SERVER=49.4.0.74\n    set(LOCAL_LIBS_SERVER $ENV{MSLIBS_SERVER})\n    message(\"LOCAL_LIBS_SERVER:  ${LOCAL_LIBS_SERVER}\")\nendif()\n\ninclude(ProcessorCount) # 确定处理器/核的数量并将值保存在${var}中\nProcessorCount(N)\nif(JOBS)\n    set(THNUM ${JOBS})\nelse()\n    set(JOBS 8)\n    if(${JOBS} GREATER ${N})\n        set(THNUM ${N})\n    else()\n        set(THNUM ${JOBS})\n    endif()\nendif()\nmessage(\"set make thread num: ${THNUM}\")\n\nif(LOCAL_LIBS_SERVER)\n    if(NOT ENV{no_proxy})\n        set(ENV{no_proxy} \"${LOCAL_LIBS_SERVER}\")\n    else()\n        string(FIND $ENV{no_proxy} ${LOCAL_LIBS_SERVER} IP_POS)\n        if(${IP_POS} EQUAL -1)\n            set(ENV{no_proxy} \"$ENV{no_proxy},${LOCAL_LIBS_SERVER}\")\n        endif()\n    endif()\nendif()\n\nfunction(__download_pkg pkg_name pkg_url pkg_sha256)\n\n    if(LOCAL_LIBS_SERVER)\n        get_filename_component(_URL_FILE_NAME ${pkg_url} NAME)\n        set(pkg_url \"http://${LOCAL_LIBS_SERVER}:8081/libs/${pkg_name}/${_URL_FILE_NAME}\" ${pkg_url})\n    endif()\n\n    FetchContent_Declare(  # 获取项目。可以是一个URL也可以是一个Git仓库。\n            ${pkg_name}\n            URL ${pkg_url}\n            URL_HASH SHA256=${pkg_sha256}\n            )\n    FetchContent_GetProperties(${pkg_name}) # 获取我们需要的变量MyName_*。\n    message(\"download: ${${pkg_name}_SOURCE_DIR} , ${pkg_name} , ${pkg_url}\")\n    if(NOT ${pkg_name}_POPULATED)\n        FetchContent_Populate(${pkg_name}) # 将信息记录在可以随时查询的全局属性中\n        set(${pkg_name}_SOURCE_DIR ${${pkg_name}_SOURCE_DIR} PARENT_SCOPE)\n    endif()\n\nendfunction()\n\nfunction(__download_pkg_with_git pkg_name pkg_url pkg_git_commit pkg_sha256)\n\n    if(LOCAL_LIBS_SERVER)\n        set(pkg_url \"http://${LOCAL_LIBS_SERVER}:8081/libs/${pkg_name}/${pkg_git_commit}\")\n        FetchContent_Declare(\n                ${pkg_name}\n                URL ${pkg_url}\n                URL_HASH SHA256=${pkg_sha256}\n        )\n    else()\n        FetchContent_Declare(\n                ${pkg_name}\n                GIT_REPOSITORY ${pkg_url}\n                GIT_TAG ${pkg_git_commit})\n    endif()\n    FetchContent_GetProperties(${pkg_name})\n    message(\"download: ${${pkg_name}_SOURCE_DIR} , ${pkg_name} , ${pkg_url}\")\n    if(NOT ${pkg_name}_POPULATED)\n        FetchContent_Populate(${pkg_name})\n        set(${pkg_name}_SOURCE_DIR ${${pkg_name}_SOURCE_DIR} PARENT_SCOPE)\n    endif()\n\nendfunction()\n\n\nfunction(__find_pkg_then_add_target_lib pkg_name lib_path)\n\n    unset(${pkg_name}_LIBS)\n\n    message(\"_FIND:${${pkg_name}_BASE_DIR}\")\n\n    foreach(_LIB_NAME ${ARGN})\n        set(_LIB_SEARCH_NAME ${_LIB_NAME})\n        set(_LIB_TYPE SHARED)\n        if(${pkg_name}_USE_STATIC_LIBS)\n            set(_LIB_SEARCH_NAME \"${CMAKE_STATIC_LIBRARY_PREFIX}${_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}\")\n            set(_LIB_TYPE STATIC)\n        endif()\n        set(${_LIB_NAME}_LIB ${_LIB_NAME}_LIB-NOTFOUND)\n        find_library(${_LIB_NAME}_LIB ${_LIB_SEARCH_NAME} PATHS ${${pkg_name}_BASE_DIR}/${lib_path} NO_DEFAULT_PATH)\n        if(NOT ${_LIB_NAME}_LIB AND BUILD_LITE AND PLATFORM_ARM)\n            set(${_LIB_NAME}_LIB \"${${pkg_name}_BASE_DIR}/${lib_path}/lib${_LIB_SEARCH_NAME}.so\")\n        endif()\n        if(NOT ${_LIB_NAME}_LIB)\n            return()\n        endif()\n\n        add_library(${pkg_name}::${_LIB_NAME} ${_LIB_TYPE} IMPORTED GLOBAL)\n        if(WIN32 AND ${_LIB_TYPE} STREQUAL \"SHARED\")\n            set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES IMPORTED_IMPLIB_RELEASE ${${_LIB_NAME}_LIB})\n        else()\n            set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES IMPORTED_LOCATION ${${_LIB_NAME}_LIB})\n        endif()\n\n        if(EXISTS ${${pkg_name}_BASE_DIR}/include)\n            set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES\n                    INTERFACE_INCLUDE_DIRECTORIES \"${${pkg_name}_BASE_DIR}/include\")\n        endif()\n\n        list(APPEND ${pkg_name}_LIBS ${pkg_name}::${_LIB_NAME})\n        message(\"found ${${_LIB_NAME}_LIB}\")\n        STRING(REGEX REPLACE \"(.+)/(.+)\" \"\\\\1\" LIBPATH ${${_LIB_NAME}_LIB})\n        set(${pkg_name}_LIBPATH ${LIBPATH} CACHE STRING INTERNAL)\n    endforeach()\n\n    set(${pkg_name}_LIBS ${${pkg_name}_LIBS} PARENT_SCOPE)\nendfunction()\n\n\nfunction(__find_pkg_then_add_target_exe pkg_name lib_path)\n    message(\"_FIND:${${pkg_name}_BASE_DIR}\")\n    foreach(pkg_exe ${ARGN})\n        # find_program：该命令用于查找程序。<VAR>创建名为的缓存条目以存储此命令的结果。\n        # 如果找到程序，则结果存储在变量中，除非清除变量，否则将不会重复搜索。如果什么也没找到，结果将是<VAR>-NOTFOUND。\n        find_program(${pkg_exe}_EXE ${pkg_exe} PATHS ${${pkg_name}_BASE_DIR}/bin NO_DEFAULT_PATH)\n        if(NOT ${pkg_exe}_EXE)\n            return()\n        endif()\n        # add_executable： 使用给定的源文件，为工程引入一个可执行文件。\n        # IMPORTED：一个导入的可执行目标引用了一个位于工程之外的可执行文件。\n        add_executable(${pkg_name}::${pkg_exe} IMPORTED GLOBAL)\n        set_target_properties(${pkg_name}::${pkg_exe} PROPERTIES\n                IMPORTED_LOCATION ${${pkg_exe}_EXE}\n                )\n        message(\"found ${${pkg_exe}_EXE}\")\n    endforeach()\nendfunction()\n\nfunction(__exec_cmd)\n    set(options)\n    set(oneValueArgs WORKING_DIRECTORY)\n    set(multiValueArgs COMMAND)\n\n    cmake_parse_arguments(EXEC \"${options}\" \"${oneValueArgs}\" \"${multiValueArgs}\" ${ARGN})\n\n    execute_process(COMMAND ${EXEC_COMMAND}\n            WORKING_DIRECTORY ${EXEC_WORKING_DIRECTORY}\n            RESULT_VARIABLE RESULT)\n    if(NOT RESULT EQUAL \"0\")\n        message(FATAL_ERROR \"error! when ${EXEC_COMMAND} in ${EXEC_WORKING_DIRECTORY}\")\n    endif()\nendfunction()\n\nfunction(__check_patches pkg_patches)\n    # check patches\n    if(PKG_PATCHES)\n        file(TOUCH ${_MS_LIB_CACHE}/${pkg_name}_patch.sha256)\n        file(READ ${_MS_LIB_CACHE}/${pkg_name}_patch.sha256 ${pkg_name}_PATCHES_SHA256)\n\n        message(\"patches sha256:${${pkg_name}_PATCHES_SHA256}\")\n\n        set(${pkg_name}_PATCHES_NEW_SHA256)\n        foreach(_PATCH ${PKG_PATCHES})\n            file(SHA256 ${_PATCH} _PF_SHA256)\n            set(${pkg_name}_PATCHES_NEW_SHA256 \"${${pkg_name}_PATCHES_NEW_SHA256},${_PF_SHA256}\")\n        endforeach()\n\n        if(NOT ${pkg_name}_PATCHES_SHA256 STREQUAL ${pkg_name}_PATCHES_NEW_SHA256)\n            set(${pkg_name}_PATCHES ${PKG_PATCHES})\n            file(REMOVE_RECURSE \"${_MS_LIB_CACHE}/${pkg_name}-subbuild\")\n            file(WRITE ${_MS_LIB_CACHE}/${pkg_name}_patch.sha256 ${${pkg_name}_PATCHES_NEW_SHA256})\n            message(\"patches changed : ${${pkg_name}_PATCHES_NEW_SHA256}\")\n        endif()\n    endif()\nendfunction()\n\nset(MS_FIND_NO_DEFAULT_PATH NO_CMAKE_PATH NO_CMAKE_ENVIRONMENT_PATH NO_SYSTEM_ENVIRONMENT_PATH\n        NO_CMAKE_BUILDS_PATH NO_CMAKE_PACKAGE_REGISTRY NO_CMAKE_SYSTEM_PATH\n        NO_CMAKE_SYSTEM_PACKAGE_REGISTRY)\nset(MS_FIND_NO_DEFAULT_PATH ${MS_FIND_NO_DEFAULT_PATH} PARENT_SCOPE)\nfunction(mindspore_add_pkg pkg_name)\n\n    message(\"---------add pkg: \" ${pkg_name} \"---------\")\n    set(options)\n    set(oneValueArgs URL SHA256 GIT_REPOSITORY GIT_TAG VER DIR HEAD_ONLY CMAKE_PATH RELEASE LIB_PATH CUSTOM_CMAKE)\n    set(multiValueArgs CMAKE_OPTION LIBS EXE PRE_CONFIGURE_COMMAND CONFIGURE_COMMAND BUILD_OPTION INSTALL_INCS\n            INSTALL_LIBS PATCHES SUBMODULES SOURCEMODULES ONLY_MAKE ONLY_MAKE_INCS ONLY_MAKE_LIBS)\n    cmake_parse_arguments(PKG \"${options}\" \"${oneValueArgs}\" \"${multiValueArgs}\" ${ARGN})\n\n    if(NOT PKG_LIB_PATH)\n        set(PKG_LIB_PATH lib)\n    endif()\n\n    if(NOT PKG_EXE)\n        set(PKG_EXE 0)\n    endif()\n\n    set(__FIND_PKG_NAME ${pkg_name})\n    string(TOLOWER ${pkg_name} pkg_name)\n    message(\"pkg name:${__FIND_PKG_NAME},${pkg_name}\")\n\n    set(${pkg_name}_PATCHES_HASH)\n    foreach(_PATCH ${PKG_PATCHES})\n        file(SHA256 ${_PATCH} _PF_SHA256)\n        set(${pkg_name}_PATCHES_HASH \"${${pkg_name}_PATCHES_HASH},${_PF_SHA256}\")\n    endforeach()\n\n    # check options\n    set(${pkg_name}_CONFIG_TXT\n            \"${CMAKE_CXX_COMPILER_VERSION}-${CMAKE_C_COMPILER_VERSION}\n            ${ARGN} - ${${pkg_name}_USE_STATIC_LIBS}- ${${pkg_name}_PATCHES_HASH}\n            ${${pkg_name}_CXXFLAGS}--${${pkg_name}_CFLAGS}--${${pkg_name}_LDFLAGS}\")\n    string(REPLACE \";\" \"-\" ${pkg_name}_CONFIG_TXT ${${pkg_name}_CONFIG_TXT})\n    string(SHA256 ${pkg_name}_CONFIG_HASH ${${pkg_name}_CONFIG_TXT})\n\n    message(\"${pkg_name} config hash: ${${pkg_name}_CONFIG_HASH}\")\n\n    set(${pkg_name}_BASE_DIR ${_MS_LIB_CACHE}/${pkg_name}_${${pkg_name}_CONFIG_HASH})\n    set(${pkg_name}_DIRPATH ${${pkg_name}_BASE_DIR} CACHE STRING INTERNAL)\n\n    if(EXISTS ${${pkg_name}_BASE_DIR}/options.txt AND PKG_HEAD_ONLY)\n        set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/${PKG_HEAD_ONLY} PARENT_SCOPE)\n        add_library(${pkg_name} INTERFACE)\n        target_include_directories(${pkg_name} INTERFACE ${${pkg_name}_INC})\n        if(${PKG_RELEASE})\n            __find_pkg_then_add_target_exe(${pkg_name} ${PKG_LIB_PATH} ${PKG_EXE})\n            __find_pkg_then_add_target_lib(${pkg_name} ${PKG_LIB_PATH} ${PKG_LIBS})\n        endif()\n        return()\n    endif()\n\n    set(${__FIND_PKG_NAME}_ROOT ${${pkg_name}_BASE_DIR})\n    set(${__FIND_PKG_NAME}_ROOT ${${pkg_name}_BASE_DIR} PARENT_SCOPE)\n\n    if(PKG_LIBS)\n        __find_pkg_then_add_target_exe(${pkg_name} ${PKG_LIB_PATH} ${PKG_EXE})\n        __find_pkg_then_add_target_lib(${pkg_name} ${PKG_LIB_PATH} ${PKG_LIBS})\n        if(${pkg_name}_LIBS)\n            set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE)\n            message(\"Found libs: ${${pkg_name}_LIBS}\")\n            return()\n        endif()\n    elseif(NOT PKG_HEAD_ONLY)\n        find_package(${__FIND_PKG_NAME} ${PKG_VER} ${MS_FIND_NO_DEFAULT_PATH})\n        if(${__FIND_PKG_NAME}_FOUND)\n            set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE)\n            message(\"Found pkg: ${__FIND_PKG_NAME}\")\n            return()\n        endif()\n    endif()\n\n    if(NOT PKG_DIR)\n        if(PKG_GIT_REPOSITORY)\n            __download_pkg_with_git(${pkg_name} ${PKG_GIT_REPOSITORY} ${PKG_GIT_TAG} ${PKG_SHA256})\n        else()\n            __download_pkg(${pkg_name} ${PKG_URL} ${PKG_SHA256})\n        endif()\n        foreach(_SUBMODULE_FILE ${PKG_SUBMODULES})\n            STRING(REGEX REPLACE \"(.+)_(.+)\" \"\\\\1\" _SUBMODEPATH ${_SUBMODULE_FILE})\n            STRING(REGEX REPLACE \"(.+)/(.+)\" \"\\\\2\" _SUBMODENAME ${_SUBMODEPATH})\n            file(GLOB ${pkg_name}_INSTALL_SUBMODULE ${_SUBMODULE_FILE}/*)\n            file(COPY ${${pkg_name}_INSTALL_SUBMODULE} DESTINATION ${${pkg_name}_SOURCE_DIR}/3rdparty/${_SUBMODENAME})\n        endforeach()\n    else()\n        set(${pkg_name}_SOURCE_DIR ${PKG_DIR})\n    endif()\n    file(WRITE ${${pkg_name}_BASE_DIR}/options.txt ${${pkg_name}_CONFIG_TXT})\n    message(\"${pkg_name}_SOURCE_DIR : ${${pkg_name}_SOURCE_DIR}\")\n\n    foreach(_PATCH_FILE ${PKG_PATCHES})\n        get_filename_component(_PATCH_FILE_NAME ${_PATCH_FILE} NAME)\n        set(_LF_PATCH_FILE ${CMAKE_BINARY_DIR}/_ms_patch/${_PATCH_FILE_NAME})\n        configure_file(${_PATCH_FILE} ${_LF_PATCH_FILE} NEWLINE_STYLE LF @ONLY)\n\n        message(\"patching ${${pkg_name}_SOURCE_DIR} -p1 < ${_LF_PATCH_FILE}\")\n        execute_process(COMMAND ${Patch_EXECUTABLE} -p1 INPUT_FILE ${_LF_PATCH_FILE}\n                WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}\n                RESULT_VARIABLE Result)\n        if(NOT Result EQUAL \"0\")\n            message(FATAL_ERROR \"Failed patch: ${_LF_PATCH_FILE}\")\n        endif()\n    endforeach()\n    foreach(_SOURCE_DIR ${PKG_SOURCEMODULES})\n        file(GLOB ${pkg_name}_INSTALL_SOURCE ${${pkg_name}_SOURCE_DIR}/${_SOURCE_DIR}/*)\n        file(COPY ${${pkg_name}_INSTALL_SOURCE} DESTINATION ${${pkg_name}_BASE_DIR}/${_SOURCE_DIR}/)\n    endforeach()\n    file(LOCK ${${pkg_name}_BASE_DIR} DIRECTORY GUARD FUNCTION RESULT_VARIABLE ${pkg_name}_LOCK_RET TIMEOUT 600)\n    if(NOT ${pkg_name}_LOCK_RET EQUAL \"0\")\n        message(FATAL_ERROR \"error! when try lock ${${pkg_name}_BASE_DIR} : ${${pkg_name}_LOCK_RET}\")\n    endif()\n\n    if(PKG_CUSTOM_CMAKE)\n        file(GLOB ${pkg_name}_cmake ${PKG_CUSTOM_CMAKE}/CMakeLists.txt)\n        file(COPY ${${pkg_name}_cmake} DESTINATION ${${pkg_name}_SOURCE_DIR})\n    endif()\n\n    if(${pkg_name}_SOURCE_DIR)\n        if(PKG_HEAD_ONLY)\n            file(GLOB ${pkg_name}_SOURCE_SUBDIRS ${${pkg_name}_SOURCE_DIR}/*)\n            file(COPY ${${pkg_name}_SOURCE_SUBDIRS} DESTINATION ${${pkg_name}_BASE_DIR})\n            set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/${PKG_HEAD_ONLY} PARENT_SCOPE)\n            if(NOT PKG_RELEASE)\n                add_library(${pkg_name} INTERFACE)\n                target_include_directories(${pkg_name} INTERFACE ${${pkg_name}_INC})\n            endif()\n\n        elseif(PKG_ONLY_MAKE)\n            __exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} ${${pkg_name}_CXXFLAGS} -j${THNUM}\n                    WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})\n            set(PKG_INSTALL_INCS ${PKG_ONLY_MAKE_INCS})\n            set(PKG_INSTALL_LIBS ${PKG_ONLY_MAKE_LIBS})\n            file(GLOB ${pkg_name}_INSTALL_INCS ${${pkg_name}_SOURCE_DIR}/${PKG_INSTALL_INCS})\n            file(GLOB ${pkg_name}_INSTALL_LIBS ${${pkg_name}_SOURCE_DIR}/${PKG_INSTALL_LIBS})\n            file(COPY ${${pkg_name}_INSTALL_INCS} DESTINATION ${${pkg_name}_BASE_DIR}/include)\n            file(COPY ${${pkg_name}_INSTALL_LIBS} DESTINATION ${${pkg_name}_BASE_DIR}/lib)\n\n        elseif(PKG_CMAKE_OPTION)\n            # in cmake\n            file(MAKE_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build)\n            if(${pkg_name}_CFLAGS)\n                set(${pkg_name}_CMAKE_CFLAGS \"-DCMAKE_C_FLAGS=${${pkg_name}_CFLAGS}\")\n            endif()\n            if(${pkg_name}_CXXFLAGS)\n                set(${pkg_name}_CMAKE_CXXFLAGS \"-DCMAKE_CXX_FLAGS=${${pkg_name}_CXXFLAGS}\")\n            endif()\n\n            if(${pkg_name}_LDFLAGS)\n                if(${pkg_name}_USE_STATIC_LIBS)\n                    #set(${pkg_name}_CMAKE_LDFLAGS \"-DCMAKE_STATIC_LINKER_FLAGS=${${pkg_name}_LDFLAGS}\")\n                else()\n                    set(${pkg_name}_CMAKE_LDFLAGS \"-DCMAKE_SHARED_LINKER_FLAGS=${${pkg_name}_LDFLAGS}\")\n                endif()\n            endif()\n\n            __exec_cmd(COMMAND ${CMAKE_COMMAND} ${PKG_CMAKE_OPTION} -G ${CMAKE_GENERATOR}\n                    ${${pkg_name}_CMAKE_CFLAGS} ${${pkg_name}_CMAKE_CXXFLAGS} ${${pkg_name}_CMAKE_LDFLAGS}\n                    -DCMAKE_INSTALL_PREFIX=${${pkg_name}_BASE_DIR} ${${pkg_name}_SOURCE_DIR}/${PKG_CMAKE_PATH}\n                    WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build)\n\n            __exec_cmd(COMMAND ${CMAKE_COMMAND} --build . --target install -- -j${THNUM}\n                    WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build)\n\n        else()\n            if(${pkg_name}_CFLAGS)\n                set(${pkg_name}_MAKE_CFLAGS \"CFLAGS=${${pkg_name}_CFLAGS}\")\n            endif()\n            if(${pkg_name}_CXXFLAGS)\n                set(${pkg_name}_MAKE_CXXFLAGS \"CXXFLAGS=${${pkg_name}_CXXFLAGS}\")\n            endif()\n            if(${pkg_name}_LDFLAGS)\n                set(${pkg_name}_MAKE_LDFLAGS \"LDFLAGS=${${pkg_name}_LDFLAGS}\")\n            endif()\n            # in configure && make\n            if(PKG_PRE_CONFIGURE_COMMAND)\n                __exec_cmd(COMMAND ${PKG_PRE_CONFIGURE_COMMAND}\n                        WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})\n            endif()\n\n            if(PKG_CONFIGURE_COMMAND)\n                __exec_cmd(COMMAND ${PKG_CONFIGURE_COMMAND}\n                        ${${pkg_name}_MAKE_CFLAGS} ${${pkg_name}_MAKE_CXXFLAGS} ${${pkg_name}_MAKE_LDFLAGS}\n                        --prefix=${${pkg_name}_BASE_DIR}\n                        WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})\n            endif()\n            set(${pkg_name}_BUILD_OPTION ${PKG_BUILD_OPTION})\n            if(NOT PKG_CONFIGURE_COMMAND)\n                set(${pkg_name}_BUILD_OPTION ${${pkg_name}_BUILD_OPTION}\n                        ${${pkg_name}_MAKE_CFLAGS} ${${pkg_name}_MAKE_CXXFLAGS} ${${pkg_name}_MAKE_LDFLAGS})\n            endif()\n            # build\n            __exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} ${${pkg_name}_BUILD_OPTION} -j${THNUM}\n                    WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})\n\n            if(PKG_INSTALL_INCS OR PKG_INSTALL_LIBS)\n                file(GLOB ${pkg_name}_INSTALL_INCS ${${pkg_name}_SOURCE_DIR}/${PKG_INSTALL_INCS})\n                file(GLOB ${pkg_name}_INSTALL_LIBS ${${pkg_name}_SOURCE_DIR}/${PKG_INSTALL_LIBS})\n                file(COPY ${${pkg_name}_INSTALL_INCS} DESTINATION ${${pkg_name}_BASE_DIR}/include)\n                file(COPY ${${pkg_name}_INSTALL_LIBS} DESTINATION ${${pkg_name}_BASE_DIR}/lib)\n            else()\n                __exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} install WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR})\n            endif()\n        endif()\n    endif()\n\n    if(PKG_LIBS)\n        __find_pkg_then_add_target_exe(${pkg_name} ${PKG_LIB_PATH} ${PKG_EXE})\n        __find_pkg_then_add_target_lib(${pkg_name} ${PKG_LIB_PATH} ${PKG_LIBS})\n        set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE)\n        if(NOT ${pkg_name}_LIBS)\n            message(FATAL_ERROR \"Can not find pkg: ${pkg_name}\")\n        endif()\n    else()\n        find_package(${__FIND_PKG_NAME} ${PKG_VER} QUIET ${MS_FIND_NO_DEFAULT_PATH})\n        if(${__FIND_PKG_NAME}_FOUND)\n            set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE)\n            message(\"Found pkg: ${${__FIND_PKG_NAME}_LIBRARIES}\")\n            return()\n        endif()\n    endif()\nendfunction()\n"
  },
  {
    "path": "docs/api/api_python/client/mindspore_serving.client.Client.rst",
    "content": "﻿\n.. py:class:: mindspore_serving.client.Client(address, servable_name, method_name, version_number=0, ssl_config=None)\n\n    通过Client访问Serving服务器gRPC接口，可用于创建请求、访问服务和解析结果。\n\n    .. note:: Serving客户端在一个请求中可以发送的最大数据量为512MB，Serving服务器可以接收的最大数据量可以配置为1~512MB，默认为100MB。\n\n    参数：\n        - **address** (str) - Serving服务器gRPC接口地址。\n        - **servable_name** (str) - Serving服务器提供的服务的名称。\n        - **method_name** (str) - 服务中方法的名称。\n        - **version_number** (int, optional) - 服务的版本号，``0`` 表示指定所有正在运行的一个或多个版本的服务中最大的版本号。默认值：``0``。\n        - **ssl_config** (mindspore_serving.client.SSLConfig, optional) - SSL配置，如果 ``None``，则禁用SSL。默认值：``None``。\n\n    异常：\n        - **RuntimeError** - 参数的类型或值无效，或发生其他错误。\n\n    .. py:method:: infer(instances)\n\n        用于创建请求、访问服务、解析和返回结果。\n\n        参数：\n            - **instances** (Union[dict, tuple[dict]]) - 一个实例或一组实例的输入，每个实例都是dict。dict的key是输入名称，value是输入值。value的类型可以是Python int、float、bool、str、bytes、numpy scalar或numpy array对象。\n\n        异常：\n            - **RuntimeError** - 参数的类型或值无效，或发生其他错误。\n\n    .. py:method:: infer_async(instances)\n\n        用于创建请求，异步访问服务。\n\n        参数：\n            - **instances** (Union[dict, tuple[dict]]) - 一个实例或一组实例的输入，每个实例都是dict。dict的key是输入名称，value是输入值。value的类型可以是Python int、float、bool、str、bytes、numpy scalar或numpy array对象。\n\n        异常：\n            - **RuntimeError** - 参数的类型或值无效，或发生其他错误。\n"
  },
  {
    "path": "docs/api/api_python/client/mindspore_serving.client.SSLConfig.rst",
    "content": "﻿\n.. py:class:: mindspore_serving.client.SSLConfig(certificate=None, private_key=None, custom_ca=None)\n\n    Serving服务器gRPC SSL使能时，通过SSLConfig封装SSL证书等相关参数。\n\n    参数：\n        - **certificate** (str, 可选) - PEM编码的证书链内容，如果为 ``None``，表示不使用证书链。默认值：``None``。\n        - **private_key** (str, 可选) - PEM编码的私钥内容，如果为 ``None``，表示不使用私钥。默认值：``None``。\n        - **custom_ca** (str, 可选) - PEM编码的根证书内容，如果为 ``None``，gRPC运行时将从默认位置加载根证书。默认值：``None``。\n\n    异常：\n        - **RuntimeError** - 参数的类型或值无效。\n"
  },
  {
    "path": "docs/api/api_python/client/mindspore_serving.client.rst",
    "content": "﻿MindSpore Serving客户端API，用于通过gRPC访问MindSpore Serving服务器。\n"
  },
  {
    "path": "docs/api/api_python/mindspore_serving.client.rst",
    "content": "mindspore_serving.client\n==========================\n\n.. include:: client/mindspore_serving.client.rst\n\n.. include:: client/mindspore_serving.client.Client.rst\n\n.. include:: client/mindspore_serving.client.SSLConfig.rst\n\n.. automodule:: mindspore_serving.client\n    :members:\n"
  },
  {
    "path": "docs/api/api_python/mindspore_serving.server.rst",
    "content": "mindspore_serving.server\n=========================\n\n.. include:: server/mindspore_serving.server.rst\n\n.. include:: server/mindspore_serving.server.start_grpc_server.rst\n\n.. include:: server/mindspore_serving.server.start_restful_server.rst\n\n.. include:: server/mindspore_serving.server.stop.rst\n\n.. include:: server/mindspore_serving.server.start_servables.rst\n\n.. include:: server/mindspore_serving.server.ServableStartConfig.rst\n\n.. include:: server/mindspore_serving.server.SSLConfig.rst\n\n.. automodule:: mindspore_serving.server\n    :members:\n\nmindspore_serving.server.register\n----------------------------------\n\n.. include:: server/register/mindspore_serving.server.register.rst\n\n.. include:: server/register/mindspore_serving.server.register.declare_model.rst\n\n.. include:: server/register/mindspore_serving.server.register.Model.rst\n\n.. include:: server/register/mindspore_serving.server.register.AscendDeviceInfo.rst\n\n.. include:: server/register/mindspore_serving.server.register.CPUDeviceInfo.rst\n\n.. include:: server/register/mindspore_serving.server.register.GPUDeviceInfo.rst\n\n.. include:: server/register/mindspore_serving.server.register.Context.rst\n\n.. include:: server/register/mindspore_serving.server.register.register_method.rst\n\n.. include:: server/register/mindspore_serving.server.register.add_stage.rst\n\n.. automodule:: mindspore_serving.server.register\n    :members:\n\nmindspore_serving.server.distributed\n-------------------------------------\n\n.. include:: server/distributed/mindspore_serving.server.distributed.rst\n\n.. include:: server/distributed/mindspore_serving.server.distributed.start_servable.rst\n\n.. include:: server/distributed/mindspore_serving.server.distributed.startup_agents.rst\n\n.. include:: server/distributed/mindspore_serving.server.distributed.declare_servable.rst\n\n.. automodule:: mindspore_serving.server.distributed\n    :members:\n"
  },
  {
    "path": "docs/api/api_python/server/distributed/mindspore_serving.server.distributed.declare_servable.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.distributed.declare_servable(rank_size, stage_size, with_batch_dim=True, without_batch_dim_inputs=None, enable_pipeline_infer=False)\n\n    用于在servable_config.py中声明分布式服务，详细可参考\n    `基于MindSpore Serving部署分布式推理服务 <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_distributed_example.html>`_ 。\n\n    参数：\n        - **rank_size** (int) - 分布式模型的rank大小。\n        - **stage_size** (int) - 分布式模型的stage大小。\n        - **with_batch_dim** (bool, 可选) - 模型输入和输出shape的第一个维度是否是batch维度。默认值：``True``。\n        - **without_batch_dim_inputs** (Union[int, tuple[int], list[int]], 可选) - 当 `with_batch_dim` 为 ``True`` 时，用于指定shape不包括batch维度的模型输入的索引，比如模型输入0的shape不包括batch维度，则 `without_batch_dim_inputs=(0,)` 。默认值：``None``。\n        - **enable_pipeline_infer** (bool, 可选) - 是否开启流水线并行推理，流水线并行可有效提升推理性能，详情可参考 `流水线并行 <https://www.mindspore.cn/tutorials/experts/zh-CN/master/parallel/pipeline_parallel.html>`_ 。默认值：``False``。\n\n    返回：\n        `Model` ，此模型的标识，可以用来调用 `Model.call` 或作为 `add_stage` 的输入。\n\n    异常：\n        - **RuntimeError** - 参数的类型或值无效。\n"
  },
  {
    "path": "docs/api/api_python/server/distributed/mindspore_serving.server.distributed.rst",
    "content": "﻿Serving服务器启动分布式模型服务的接口。如何配置和启动分布式模型，请查看 \n`基于MindSpore Serving部署分布式推理服务 <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_distributed_example.html>`_ 。\n"
  },
  {
    "path": "docs/api/api_python/server/distributed/mindspore_serving.server.distributed.start_servable.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.distributed.start_servable(servable_directory, servable_name, rank_table_json_file, version_number=1, distributed_address='0.0.0.0:6200', wait_agents_time_in_seconds=0)\n\n    启动在 `servable_directory` 中定义的名为 `servable_name` 的分布式服务。\n\n    参数：\n        - **servable_directory** (str) - 服务所在的目录。预期有一个名为 `servable_name` 的目录。详细信息可以查看 `通过配置模型提供Servable <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html>`_ 。\n        - **servable_name** (str) - 服务名称。\n        - **version_number** (int, 可选) - 要加载的服务版本号。版本号应为正整数，从1开始。默认值：``1``。\n        - **rank_table_json_file** (str) - rank table json文件名。\n        - **distributed_address** (str, 可选) - Worker代理（Agent）连接的分布式Worker服务器地址。默认值： ``\"0.0.0.0:6200\"`` 。\n        - **wait_agents_time_in_seconds** (int, 可选) - 等待所有Worker代理就绪的最长时间（以秒为单位），``0`` 表示无限时间。默认值：``0``。\n\n    异常：\n        - **RuntimeError** - 启动分布式服务失败。\n"
  },
  {
    "path": "docs/api/api_python/server/distributed/mindspore_serving.server.distributed.startup_agents.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.distributed.startup_agents(distributed_address, model_files, group_config_files=None, agent_start_port=7000, agent_ip=None, rank_start=None, dec_key=None, dec_mode='AES-GCM')\n\n    在当前计算机上启动所有所需的Worker代理（Agent），这组Worker代理进程将负责本机器设备上的推理任务，详细可参考\n    `基于MindSpore Serving部署分布式推理服务 <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_distributed_example.html>`_ 。\n\n    参数：\n        - **distributed_address** (str) - Worker代理连接分布式Worker服务器地址。\n        - **model_files** (Union[list[str], tuple[str]]) - 当前计算机中需要的所有模型文件，为绝对路径或相对于此启动Python脚本的路径。\n        - **group_config_files** (Union[list[str], tuple[str]], 可选) - 当前计算机中需要的所有组配置文件，相对于此启动Python脚本的绝对路径或相对路径，为 ``None`` 时表示没有配置文件。默认值：``None``。\n        - **agent_start_port** (int, 可选) - Worker代理连接Worker服务器的起始端口号。默认值：``7000``。\n        - **agent_ip** (str, 可选) - 本地Worker代理ip，如果为无，则代理ip将从rank table文件中获取。参数 `agent_ip` 和参数 `rank_start` 必须同时有值，或者同时是 ``None``。默认值：``None``。\n        - **rank_start** (int, 可选) - 此计算机的起始rank id，如果为 ``None``，则将从rank table文件中获取rank id。参数 `agent_ip` 和参数 `rank_start` 必须同时有值，或者同时是 ``None``。默认值：``None``。\n        - **dec_key** (bytes, 可选) - 用于解密的密钥，类型为字节。有效长度为16、24或32。默认值：``None``。\n        - **dec_mode** (str, 可选) - 指定解密模式，在设置了 `dec_key` 时生效。值可为： ``'AES-GCM'`` 或 ``'AES-CBC'`` 。默认值： ``'AES-GCM'`` 。\n\n    异常：\n        - **RuntimeError** - 启动Worker代理失败。\n"
  },
  {
    "path": "docs/api/api_python/server/mindspore_serving.server.SSLConfig.rst",
    "content": "﻿\n.. py:class:: mindspore_serving.server.SSLConfig(certificate, private_key, custom_ca=None, verify_client=False)\n\n    Serving服务器中，使能gRPC或RESTful服务器SSL功能时，SSL的参数配置。\n\n    参数：\n        - **certificate** (str) - PEM编码的证书链内容，如果值为 ``None``，则表示不使用证书链。\n        - **private_key** (str) - PEM编码的私钥内容，如果值为 ``None``，则表示不使用私钥。\n        - **custom_ca** (str, 可选) - PEM编码的根证书内容。当 `verify_client` 为 ``True`` 时， `custom_ca` 必须指定。当 `verify_client` 为 ``False`` 时，将忽略此参数。默认值：``None``。\n        - **verify_client** (bool, 可选) - 如果 `verify_client` 为 ``True``，则启用客户端服务器双向认证。如果为 ``False``，则仅启用客户端对服务器的单向认证。默认值：``False``。\n\n    异常：\n        - **RuntimeError** - 参数的类型或值无效。\n"
  },
  {
    "path": "docs/api/api_python/server/mindspore_serving.server.ServableStartConfig.rst",
    "content": "﻿\n.. py:class:: mindspore_serving.server.ServableStartConfig(servable_directory, servable_name, device_ids=None, version_number=0, device_type=None, num_parallel_workers=0, dec_key=None, dec_mode='AES-GCM')\n\n    启动一个服务的配置。详情请查看\n    `基于MindSpore Serving部署推理服务 <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_example.html>`_ 和\n    `通过配置模型提供Servable <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html>`_ 。\n\n    参数：\n        - **servable_directory** (str) - 服务所在的目录。预期有一个名为 `servable_name` 的目录。\n        - **servable_name** (str) - 服务名称。\n        - **device_ids** (Union[int, list[int], tuple[int]], 可选) - 模型部署和运行的设备列表，列表中的每个会设备将部署和运行一个服务副本。当设备类型为Nvidia GPU、Ascend 310P/910时使用。默认值：``None``。\n        - **version_number** (int, 可选) - 要加载的服务的版本号。版本号应为正整数，从1开始，``0`` 表示加载最新版本。默认值：``0``。\n        - **device_type** (str, 可选) - 模型部署的目标设备类型，目前支持 ``\"Ascend\"``、``\"GPU\"``、``\"CPU\"`` 和 ``None``。默认值：``None``。\n\n          - ``\"Ascend\"``：目标设备为Ascend 310P/910等。\n          - ``\"GPU\"``：目标设备为Nvidia GPU。\n          - ``\"CPU\"``：目标设备为CPU。\n          - ``None``：系统根据实际的后端设备和MindSpor推理包决定目标设备，推荐使用默认值 ``None``。\n\n        - **num_parallel_workers** (int, 可选) - 处理Python任务的进程数，用于提高预处理、后处理等Python任务的处理能力。值小于 `device_ids` 的长度时，处理Python任务的进程数为 `device_ids` 的长度。值的范围为[0,64]。默认值：``0``。\n        - **dec_key** (bytes, 可选) - 用于解密的字节类型密钥。有效长度为16、24或32。默认值：``None``。\n        - **dec_mode** (str, 可选) - 指定解密模式，设置 `dec_key` 时生效。值可为： ``'AES-GCM'`` 或 ``'AES-CBC'`` 。默认值： ``'AES-GCM'`` 。\n\n    异常：\n        - **RuntimeError** - 参数的类型或值无效。\n\n"
  },
  {
    "path": "docs/api/api_python/server/mindspore_serving.server.rst",
    "content": "MindSpore Serving是一个轻量级、高性能的服务模块，旨在帮助MindSpore开发者在生产环境中高效部署在线推理服务。\n\n用户可通过MindSpore Serving server API启动服务，启动gRPC和RESTful（HTTP）服务器。其中一个服务一般可由一个模型或者一组模型组合提供。客户端通过gRPC和RESTful（HTTP）服务器发送推理任务，接收推理结果。"
  },
  {
    "path": "docs/api/api_python/server/mindspore_serving.server.start_grpc_server.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.start_grpc_server(address, max_msg_mb_size=100, ssl_config=None)\n\n    启动gRPC服务器，用于Serving客户端和Serving服务器之间的通信。\n\n    参数：\n        - **address** (str) - gRPC服务器地址，地址可以是 `{ip}:{port}` 或 `unix:{unix_domain_file_path}` 。\n\n          - `{ip}:{port}` - Internet domain socket地址。\n          - `unix:{unix_domain_file_path}` - Unix domain socket地址，用于与同一台计算机上的多个进程通信。 `{unix_domain_file_path}` 可以是相对路径或绝对路径，但文件所在的目录必须已经存在。\n\n        - **max_msg_mb_size** (int, 可选) - 可接收的最大gRPC消息大小（MB），取值范围[1, 512]。默认值：``100``。\n        - **ssl_config** (mindspore_serving.server.SSLConfig, 可选) - 服务器的SSL配置，如果 ``None``，则禁用SSL。默认值：``None``。\n\n    异常：\n        - **RuntimeError** - 启动gRPC服务器失败：参数校验失败，gRPC地址错误或端口重复。\n"
  },
  {
    "path": "docs/api/api_python/server/mindspore_serving.server.start_restful_server.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.start_restful_server(address, max_msg_mb_size=100, ssl_config=None)\n\n    启动RESTful服务器，用于Serving客户端和Serving服务器之间的通信。\n\n    参数：\n        - **address** (str) - RESTful服务器地址，地址应为Internet domain socket地址。\n        - **max_msg_mb_size** (int, 可选) - 最大可接收的RESTful消息大小，以MB为单位，取值范围[1, 512]。默认值：``100``。\n        - **ssl_config** (mindspore_serving.server.SSLConfig, 可选) - 服务器的SSL配置，如果是 ``None``，则禁用SSL。默认值：``None``。\n\n    异常：\n        - **RuntimeError** - 启动RESTful服务器失败：参数校验失败，RESTful地址错误或端口重复。\n"
  },
  {
    "path": "docs/api/api_python/server/mindspore_serving.server.start_servables.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.start_servables(servable_configs, enable_lite=False)\n\n    用于Serving服务器中启动一个或多个服务，一个模型可结合预处理、后处理提供一个服务，多个模型也可串接组合提供一个服务。\n\n    本接口可以用来启动多个不同的服务。一个服务可以部署在多个设备上，其中每个设备运行一个服务副本。\n\n    在Ascend 910硬件平台上，每个服务的每个副本都独占一个设备。不同的服务或同一服务的不同版本需要部署在不同的设备上。在Ascend 310P和GPU硬件平台上，一个设备可以被多个服务共享，不同服务或同一服务的不同版本可以部署在同一设备上，实现设备复用。\n\n    如何配置模型提供服务请查看\n    `基于MindSpore Serving部署推理服务 <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_example.html>`_ 和\n    `通过配置模型提供Servable <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html>`_ 。\n\n    参数：\n        - **servable_configs** (Union[ServableStartConfig, list[ServableStartConfig], tuple[ServableStartConfig]]) - 一个或多个服务的启动配置。\n        - **enable_lite** (bool) - 是否使用MindSpore Lite推理后端。 默认值：``False``。\n\n    异常：\n        - **RuntimeError** - 启动一个或多个服务失败。相关日志可查看本Serving服务器启动脚本所在目录的子目录serving_logs。\n"
  },
  {
    "path": "docs/api/api_python/server/mindspore_serving.server.stop.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.stop()\n\n    停止Serving服务器的运行。\n"
  },
  {
    "path": "docs/api/api_python/server/register/mindspore_serving.server.register.AscendDeviceInfo.rst",
    "content": "﻿\n.. py:class:: mindspore_serving.server.register.AscendDeviceInfo(**kwargs)\n\n    用于设置Ascend设备配置。\n\n    参数：\n        - **insert_op_cfg_path** (str, 可选) - AIPP配置文件的路径。\n        - **input_format** (str, 可选) - 模型输入格式，取值可以是 ``\"ND\"`` 、 ``\"NCHW\"`` 、 ``\"NHWC\"`` 、 ``\"CHWN\"`` 、 ``\"NC1HWC0\"`` 或 ``\"NHWC1C0\"`` 。\n        - **input_shape** (str, 可选) - 模型输入形状，如 ``\"input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1\"`` 。\n        - **output_type** (str, 可选) - 模型输出类型，值可以是 ``\"FP16\"`` 、 ``\"UINT8\"`` 或 ``\"FP32\"`` ，默认值： ``\"FP32\"`` 。\n        - **precision_mode** (str, 可选) - 模型精度模式，取值可以是 ``\"force_fp16\"`` 、 ``\"allow_fp32_to_fp16\"`` 、 ``\"must_keep_origin_dtype\"`` 或者 ``\"allow_mix_precision\"`` 。默认值： ``\"force_fp16\"`` 。\n        - **op_select_impl_mode** (str, 可选) - 运算符选择模式，值可以是 ``\"high_performance\"`` 或 ``\"high_precision\"`` 。默认值： ``\"high_performance\"`` 。\n        - **fusion_switch_config_path** (str, 可选) - 融合配置文件路径，包括图融合和UB融合。系统内置图融合和UB融合规则，默认启用。您可以通过设置此参数禁用指定的融合规则。\n        - **buffer_optimize_mode** (str, 可选) - 数据缓存优化策略，值可以是 ``\"l1_optimize\"`` 、 ``\"l2_optimize\"`` 、 ``\"off_optimize\"`` 或者 ``\"l1_and_l2_optimize\"`` 。默认 ``\"l2_optimize\"`` 。\n\n    异常：\n        - **RuntimeError** - Ascend设备配置无效。\n"
  },
  {
    "path": "docs/api/api_python/server/register/mindspore_serving.server.register.CPUDeviceInfo.rst",
    "content": "﻿\n.. py:class:: mindspore_serving.server.register.CPUDeviceInfo(**kwargs)\n\n    用于CPU设备配置。\n\n    参数：\n        - **precision_mode** (str, 可选) - 推理精度选项，值可以是 ``\"origin\"`` 或 ``\"fp16\"`` ， ``\"origin\"`` 表示以模型中指定精度进行推理， ``\"fp16\"`` 表示以FP16精度进行推理。默认值： ``\"origin\"`` 。\n\n    异常：\n        - **RuntimeError** - 选项无效，或值类型不是字符串。\n"
  },
  {
    "path": "docs/api/api_python/server/register/mindspore_serving.server.register.Context.rst",
    "content": "﻿\n.. py:class:: mindspore_serving.server.register.Context(**kwargs)\n\n    Context用于自定义设备配置，如果不指定Context，MindSpore Serving将使用默认设备配置。当使用推理后端为MindSpore Lite，且目标设备为Ascend或Nvidia GPU时，模型部分算子可能运行在CPU设备上，将额外配置 `CPUDeviceInfo` 。\n\n    参数：\n        - **thread_num** (int, 可选) - 设置运行时的CPU线程数量，该选项仅当推理后端为MindSpore Lite有效。\n        - **thread_affinity_core_list** (tuple[int], list[int], 可选) - 设置运行时的CPU绑核列表，该选项仅当推理后端为MindSpore Lite有效。\n        - **enable_parallel** (bool, 可选) - 设置运行时是否支持并行，该选项仅当推理后端为MindSpore Lite有效。\n\n    异常：\n        - **RuntimeError** - 输入参数的类型或值无效。\n\n    .. py:method:: append_device_info(device_info)\n\n       用于添加一个用户自定义的设备配置。\n\n       参数：\n           - **device_info** (Union[CPUDeviceInfo, GPUDeviceInfo, AscendDeviceInfo]) - 用户自定义设备配置，用户不指定设备配置时将使用默认值。可以为每个可能的设备自定义设备配置，系统根据实际的后端设备和推理包选择所需的设备信息。\n\n       异常：\n           - **RuntimeError** - 输入参数的类型或值无效。\n"
  },
  {
    "path": "docs/api/api_python/server/register/mindspore_serving.server.register.GPUDeviceInfo.rst",
    "content": "﻿\n.. py:class:: mindspore_serving.server.register.GPUDeviceInfo(**kwargs)\n\n    用于GPU设备配置。\n\n    参数：\n        - **precision_mode** (str, 可选) - 推理精度选项，值可以是 ``\"origin\"`` 或 ``\"fp16\"`` ， ``\"origin\"`` 表示以模型中指定精度进行推理， ``\"fp16\"`` 表示以FP16精度进行推理。默认值： ``\"origin\"`` 。\n\n    异常：\n        - **RuntimeError** - 选项无效，或值类型不是字符串。\n"
  },
  {
    "path": "docs/api/api_python/server/register/mindspore_serving.server.register.Model.rst",
    "content": "﻿\n.. py:class:: mindspore_serving.server.register.Model(model_key)\n\n    用于表示一个声明的模型。用户不应该直接构造 `Model` 对象，而是来自于 `declare_model` 或 `declare_servable` 的返回。\n\n    参数：\n        - **model_key** (str) - 模型的唯一标志。\n\n    .. py:method:: call(*args, subgraph=0)\n\n        调用模型推理接口。\n\n        参数：\n            - **args** - 实例的元组/列表，或一个实例的输入。\n            - **subgraph** (int, 可选) - 子图索引，当一个模型中存在多个子图时使用。默认值：``0``。\n\n        返回：\n            当输入参数 `args` 为元组/列表时，返回为instances的元组，当前输入 `args` 为一个实例的输入时，输出为这个实例的输出。\n\n        异常：\n            - **RuntimeError** - 输入无效。\n"
  },
  {
    "path": "docs/api/api_python/server/register/mindspore_serving.server.register.add_stage.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.register.add_stage(stage, *args, outputs_count, batch_size=None, tag=None)\n\n    在服务的 `servable_config.py` 中，通过 `register_method` 装饰（wrap）Python函数定义服务的一个方法（method），本接口用于定义这个方法中的一个运行步骤（stage），可以是一个Python函数或者模型。\n\n    .. note:: 入参 `args` 的长度应等于函数或模型的输入个数。 \n\n    参数：\n        - **stage** (Union(function, Model)) - 用户定义的Python函数或由 `declare_model` 返回 `Model` 对象。\n        - **outputs_count** (int) - 用户定义的Python函数或模型的输出个数。\n        - **batch_size** (int, 可选) - 仅当stage是Python函数，且函数一次可以处理多实例时，此参数有效。默认值：``None``。\n\n          - ``None``，函数的输入将是一个实例的输入。\n          - ``0``，函数的输入将是实例的元组对象，实例元组的最大长度由服务器根据模型的batch大小确定。\n          - int value >= 1，函数的输入将是实例的元组对象，实例元组的最大长度是 `batch_size` 指定的值。\n\n        - **args** - stage输入占位符，可以是 `register_method` 装饰（wrap）的函数的输入或其他 `add_stage` 的输出。 `args` 的长度应等于Python函数或模型的输入数量。\n        - **tag** (str, 可选) - stage的自定义标签，如 ``\"preprocess\"``，默认值：``None``。\n\n    异常：\n        - **RuntimeError** - 参数的类型或值无效，或发生其他错误。\n"
  },
  {
    "path": "docs/api/api_python/server/register/mindspore_serving.server.register.declare_model.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.register.declare_model(model_file, model_format, with_batch_dim=True, options=None, without_batch_dim_inputs=None, context=None, config_file=None)\n\n    在服务的servable_config.py配置文件中使用，用于声明一个模型。\n\n    .. note:: 本接口需要在Serving服务器导入servable_config.py时生效。因此，建议在servable_config.py中全局使用此接口。\n\n    .. warning:: 参数 `options` 从1.6.0版本中已弃用，并将在未来版本中删除，请改用参数 `context` 。\n\n    参数：\n        - **model_file** (Union[str, list[str]]) - 模型文件名。\n        - **model_format** (str) - 模型格式， ``\"MindIR\"`` 或 ``\"MindIR_Lite\"`` ，忽略大小写。\n        - **with_batch_dim** (bool, 可选) - 模型输入和输出的shape第一个维度是否是batch维度。默认值：``True``。\n        - **options** (Union[AclOptions, GpuOptions], 可选) - 模型的选项，支持 ``AclOptions`` 或 ``GpuOptions`` 。默认值：``None``。\n        - **context** (Context) - 用于配置设备环境的上下文信息，值为 ``None`` 时，Serving将依据部署的设备设置默认的设备上下文。默认值：``None``。\n        - **without_batch_dim_inputs** (Union[int, tuple[int], list[int]], 可选) - 当 `with_batch_dim` 为 ``True`` 时，用于指定shape不包括batch维度的模型输入的索引，比如模型输入0的shape不包括batch维度，则 `without_batch_dim_inputs` 可赋值为 `(0,)` 。默认值：``None``。\n        - **config_file** (str, 可选) - 用于设置混合精度推理的配置文件。文件路径可以是servable_config.py所在目录的绝对路径或相对路径。默认值：``None``。\n\n    返回：\n        `Model` ，此模型的标识，可以用来调用 `Model.call` 或作为 `add_stage` 的输入。\n\n    异常：\n        - **RuntimeError** - 参数的类型或值无效。\n"
  },
  {
    "path": "docs/api/api_python/server/register/mindspore_serving.server.register.register_method.rst",
    "content": "﻿\n.. py:function:: mindspore_serving.server.register.register_method(output_names)\n\n    在服务的servable_config.py配置文件中使用，用于注册服务的方法，一个服务可以包括一个或多个方法，每个方法可基于模型提供不同的功能，客户端访问服务时需要指定服务和方法。MindSpore Serving支持由多个Python函数和多个模型组合串接提供服务。\n\n    .. note:: 本接口需要在Serving服务器导入servable_config.py时生效。因此，建议在servable_config.py中全局使用此接口。\n\n    此接口将定义方法的签名和处理流程。\n\n    签名包括方法名称、方法的输入和输出名称。当Serving客户端访问服务时，客户端需要指定服务名称、方法名称，并提供一个或多个推理实例。每个实例通过输入名称指定输入数据，并通过输出名称获取输出结果。\n\n    处理流程由一个或多个阶段（stage）组成，每个阶段可以是一个Python函数或模型。即，一个方法的处理流程可以包括一个或多个Python函数和一个或多个模型。此外，接口还定义了这些阶段之间的数据流。\n\n    参数：\n        - **output_names** (Union[str, tuple[str], list[str]]) - 指定方法的输出名称。输入名称通过注册函数的参数名称指定。\n\n    异常：\n        - **RuntimeError** - 参数的类型或值无效，或发生其他错误。\n"
  },
  {
    "path": "docs/api/api_python/server/register/mindspore_serving.server.register.rst",
    "content": "﻿服务注册接口，在服务的servable_config.py配置文件中使用。如何配置servable_config.py文件，请查看 \n`通过配置模型提供Servable <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html>`_ 。\n"
  },
  {
    "path": "engine/README.md",
    "content": "## Overview\n\nAn engine supports finetune and inference.\n"
  },
  {
    "path": "example/add_sub_pipeline/add_sub/servable_config.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"add model servable config\"\"\"\n\nimport numpy as np\nfrom mindspore_serving.server import register\n\n\ndef add_trans_datatype(x1, x2):\n    \"\"\"define preprocess, this example has two inputs and two outputs\"\"\"\n    return x1.astype(np.float32), x2.astype(np.float32)\n\n\ndef add_1(x):\n    return x + 1\n\n\n# when with_batch_dim is set to False, only 2x2 add is supported\n# when with_batch_dim is set to True(default), Nx2 add is supported, while N is viewed as batch\n# float32 inputs/outputs\nadd_model = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\nsub_model = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n\n# register add_sub_only_model method in add_sub\n@register.register_method(output_names=[\"y\"])\ndef add_sub_only_model(x1, x2, x3):  # x1+x2-x3\n    \"\"\"method add_sub_only_model data flow definition\"\"\"\n    y = register.add_stage(add_model, x1, x2, outputs_count=1)\n    y = register.add_stage(sub_model, y, x3, outputs_count=1)\n    return y\n\n\n# register add_sub_complex method in add_sub\n@register.register_method(output_names=[\"y\"])\ndef add_sub_complex(x1, x2, x3):  # x1+x2+1-x3+1\n    \"\"\"method add_sub_complex data flow definition\"\"\"\n    x1, x2 = register.add_stage(add_trans_datatype, x1, x2, outputs_count=2)  # cast input to float32\n    y = register.add_stage(add_model, x1, x2, outputs_count=1)\n    y = register.add_stage(add_1, y, outputs_count=1)\n    y = register.add_stage(sub_model, y, x3, outputs_count=1)\n    y = register.add_stage(add_1, y, outputs_count=1)\n    return y\n"
  },
  {
    "path": "example/add_sub_pipeline/export_model/add_sub_model.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"add model generator\"\"\"\nimport os\nfrom shutil import copyfile\nimport numpy as np\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nimport mindspore.ops as ops\nimport mindspore as ms\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\n\nclass AddNet(nn.Cell):\n    \"\"\"Define Net of add\"\"\"\n\n    def __init__(self):\n        super(AddNet, self).__init__()\n        self.add = ops.Add()\n\n    def construct(self, x_, y_):\n        \"\"\"construct add net\"\"\"\n        return self.add(x_, y_)\n\n\nclass SubNet(nn.Cell):\n    \"\"\"Define Net of sub\"\"\"\n\n    def __init__(self):\n        super(SubNet, self).__init__()\n        self.sub = ops.Sub()\n\n    def construct(self, x_, y_):\n        \"\"\"construct add net\"\"\"\n        return self.sub(x_, y_)\n\n\ndef export_net():\n    \"\"\"Export add net of 2x2 + 2x2, and copy output model `tensor_add.mindir` and `tensor_sub.mindir` to directory\n    ../add_sub/1\"\"\"\n    x = np.ones([2, 2]).astype(np.float32)\n    y = np.ones([2, 2]).astype(np.float32)\n    add = AddNet()\n    ms.export(add, ms.Tensor(x), ms.Tensor(y), file_name='tensor_add', file_format='MINDIR')\n\n    sub = SubNet()\n    ms.export(sub, ms.Tensor(x), ms.Tensor(y), file_name='tensor_sub', file_format='MINDIR')\n\n    dst_dir = '../add_sub/1'\n    try:\n        os.mkdir(dst_dir)\n    except OSError:\n        pass\n\n    dst_file = os.path.join(dst_dir, 'tensor_add.mindir')\n    copyfile('tensor_add.mindir', dst_file)\n    print(\"copy tensor_add.mindir to \" + dst_dir + \" success\")\n\n    dst_file = os.path.join(dst_dir, 'tensor_sub.mindir')\n    copyfile('tensor_sub.mindir', dst_file)\n    print(\"copy tensor_sub.mindir to \" + dst_dir + \" success\")\n\n\nif __name__ == \"__main__\":\n    export_net()\n"
  },
  {
    "path": "example/add_sub_pipeline/serving_client.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The client of example add_sub pipeline\"\"\"\n\nimport numpy as np\n\nfrom mindspore_serving.client import Client\n\n\ndef is_float_equal(left, right):\n    \"\"\"Check whether two float numbers are equal\"\"\"\n    return (np.abs(left-right) < 0.0001).all()\n\n\ndef run_add_sub_only_model():\n    \"\"\"invoke servable add_sub method add_sub_only_model\"\"\"\n    # x1+x2-x3\n    client = Client(\"127.0.0.1:5500\", \"add_sub\", \"add_sub_only_model\")\n    instances = []\n\n    # instance 1\n    x1 = np.asarray([[30, 30], [20, 20]]).astype(np.float32)\n    x2 = np.asarray([[20, 20], [20, 20]]).astype(np.float32)\n    x3 = np.asarray([[10, 10], [10, 10]]).astype(np.float32)\n    instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n    expect_y = x1 + x2 - x3\n\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == len(instances)\n    assert is_float_equal(result[0][\"y\"], expect_y)\n\n\ndef run_add_sub_complex():\n    \"\"\"invoke servable add_sub method add_sub_complex\"\"\"\n    # x1+x2+1-x3+1\n    client = Client(\"127.0.0.1:5500\", \"add_sub\", \"add_sub_complex\")\n    instances = []\n\n    # instance 1\n    x1 = np.asarray([[30, 30], [20, 20]]).astype(np.float32)\n    x2 = np.asarray([[20, 20], [20, 20]]).astype(np.float32)\n    x3 = np.asarray([[10, 10], [10, 10]]).astype(np.float32)\n    instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n    expect_y = x1 + x2 + 1 - x3 + 1\n\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == len(instances)\n    assert is_float_equal(result[0][\"y\"], expect_y)\n\n\nif __name__ == '__main__':\n    run_add_sub_only_model()\n    run_add_sub_complex()\n"
  },
  {
    "path": "example/add_sub_pipeline/serving_server.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The server of example add_sub pipeline\"\"\"\n\nimport os\nfrom mindspore_serving import server\n\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(__file__))\n\n    servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name=\"add_sub\",\n                                                 device_ids=(0, 1))\n    server.start_servables(servable_configs=servable_config)\n\n    server.start_grpc_server(address=\"127.0.0.1:5500\")\n    server.start_restful_server(address=\"127.0.0.1:1500\")\n\n\nif __name__ == \"__main__\":\n    start()\n"
  },
  {
    "path": "example/lenet/export_model/export_lenet.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"export Lenet for mnist dataset\"\"\"\n\nimport os\nfrom shutil import copyfile\nfrom lenet.export import export_lenet\n\nif __name__ == '__main__':\n    export_lenet()\n    dst_dir = '../lenet/1'\n    try:\n        os.mkdir(dst_dir)\n    except OSError:\n        pass\n    dst_file = os.path.join(dst_dir, 'lenet.mindir')\n    copyfile('lenet.mindir', dst_file)\n"
  },
  {
    "path": "example/lenet/export_model/lenet/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Resnet export model\"\"\"\n"
  },
  {
    "path": "example/lenet/export_model/lenet/export.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"export checkpoint file into air, onnx, mindir models\"\"\"\n\nimport os\nimport numpy as np\nfrom easydict import EasyDict as ed\nimport mindspore\nfrom mindspore import Tensor, context, load_checkpoint, load_param_into_net, export\nfrom .src.lenet import LeNet5\n\nconfig = ed({\n    'num_classes': 10,\n    'batch_size': 2,\n    'image_height': 32,\n    'image_width': 32\n})\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\ncontext.set_context(device_id=0)\n\n\ndef export_lenet():\n    \"\"\"define lenet network\"\"\"\n    network = LeNet5(config.num_classes)\n    # load network checkpoint\n    cur_dir = os.path.dirname(os.path.realpath(__file__))\n    ckpt_file = os.path.join(cur_dir, 'lenet_ascend_v111_offical_cv_mnist_bs32_acc98.ckpt')\n    param_dict = load_checkpoint(ckpt_file)\n    load_param_into_net(network, param_dict)\n\n    # export network\n    inputs = Tensor(np.ones([config.batch_size, 1, config.image_height, config.image_width]), mindspore.float32)\n    export(network, inputs, file_name=\"lenet\", file_format=\"MINDIR\")\n\n\nif __name__ == \"__main__\":\n    export_lenet()\n"
  },
  {
    "path": "example/lenet/export_model/lenet/src/lenet.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"LeNet.\"\"\"\nimport mindspore.nn as nn\nfrom mindspore.common.initializer import Normal\n\n\nclass LeNet5(nn.Cell):\n    \"\"\"\n    Lenet network\n\n    Args:\n        num_class (int): Number of classes. Default: 10.\n        num_channel (int): Number of channels. Default: 1.\n\n    Returns:\n        Tensor, output tensor\n    Examples:\n        >>> LeNet(num_class=10)\n\n    \"\"\"\n\n    def __init__(self, num_class=10, num_channel=1, include_top=True):\n        super(LeNet5, self).__init__()\n        self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')\n        self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')\n        self.relu = nn.ReLU()\n        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)\n        self.include_top = include_top\n        if self.include_top:\n            self.flatten = nn.Flatten()\n            self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))\n            self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))\n            self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))\n\n    def construct(self, x):\n        \"\"\"Construct lenet\"\"\"\n        x = self.conv1(x)\n        x = self.relu(x)\n        x = self.max_pool2d(x)\n        x = self.conv2(x)\n        x = self.relu(x)\n        x = self.max_pool2d(x)\n        if not self.include_top:\n            return x\n        x = self.flatten(x)\n        x = self.relu(self.fc1(x))\n        x = self.relu(self.fc2(x))\n        x = self.fc3(x)\n        return x\n"
  },
  {
    "path": "example/lenet/lenet/servable_config.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Lenet config python file\"\"\"\nfrom io import BytesIO\nimport numpy as np\nfrom PIL import Image\n\nfrom mindspore_serving.server import register\n\n\ndef preprocess_eager(image):\n    \"\"\"\n    Define preprocess, input is image numpy, return preprocess result.\n    Return type can be numpy, str, bytes, int, float, or bool.\n    Use MindData Eager, this image processing can also use other image processing library, likes numpy, PIL or cv2 etc.\n    \"\"\"\n    image = Image.open(BytesIO(image.tobytes())).convert('L').resize((32, 32), Image.ANTIALIAS)\n    image = np.array(image, np.float32)\n    image = image / 255.0\n    return image\n\n\ndef postprocess_top1(score):\n    \"\"\"\n    Define postprocess. This example has one input and one output.\n    The input is the numpy tensor of the score, and the output is the label str of top one.\n    \"\"\"\n    max_idx = np.argmax(score)\n    return max_idx\n\n\nlenet_model = register.declare_model(model_file=\"lenet.mindir\", model_format=\"MindIR\")\n\n\n@register.register_method(output_names=[\"label\"])\ndef classify_top1(image):\n    \"\"\"Define method `classify_top1` for servable `resnet50`.\n     The input is `image` and the output is `lable`.\"\"\"\n    x = register.add_stage(preprocess_eager, image, outputs_count=1)\n    x = register.add_stage(lenet_model, x, outputs_count=1)\n    x = register.add_stage(postprocess_top1, x, outputs_count=1)\n    return x\n"
  },
  {
    "path": "example/lenet/serving_client.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Client for lenet\"\"\"\nimport os\nfrom mindspore_serving.client import Client\n\n\ndef read_images():\n    \"\"\"Read images for directory test_image\"\"\"\n    images_buffer = []\n    image_files = []\n    for path, _, file_list in os.walk(\"./test_image/\"):\n        for file_name in file_list:\n            image_file = os.path.join(path, file_name)\n            image_files.append(image_file)\n    for image_file in image_files:\n        with open(image_file, \"rb\") as fp:\n            images_buffer.append(fp.read())\n    return images_buffer, image_files\n\n\ndef run_classify_top1():\n    \"\"\"Client for servable lenet and method classify_top1\"\"\"\n    print(\"run_classify_top1-----------\")\n    client = Client(\"localhost:5500\", \"lenet\", \"classify_top1\")\n    instances = []\n    images_buffer, image_files = read_images()\n    for image in images_buffer:\n        instances.append({\"image\": image})\n    result = client.infer(instances)\n    print(result)\n    for item, file in zip(result, image_files):\n        print(f\"file: {file}, result: {item['label']}\")\n\n\ndef run_classify_top1_async():\n    \"\"\"Client for servable lenet and method classify_top1\"\"\"\n    print(\"run_classify_top1_async-----------\")\n    client = Client(\"localhost:5500\", \"lenet\", \"classify_top1\")\n    instances = []\n    images_buffer, image_files = read_images()\n    for image in images_buffer:\n        instances.append({\"image\": image})\n\n    result_future = client.infer_async(instances)\n    result = result_future.result()\n\n    print(result)\n    for item, file in zip(result, image_files):\n        print(f\"file: {file}, result: {item['label']}\")\n\n\ndef run_restful_classify_top1():\n    \"\"\"RESTful Client for servable lenet and method classify_top1\"\"\"\n    print(\"run_restful_classify_top1-----------\")\n    import base64\n    import requests\n    import json\n    instances = []\n    images_buffer, image_files = read_images()\n    for image in images_buffer:\n        base64_data = base64.b64encode(image).decode()\n        instances.append({\"image\": {\"b64\": base64_data}})\n    instances_map = {\"instances\": instances}\n    post_payload = json.dumps(instances_map)\n    ip = \"localhost\"\n    restful_port = 1500\n    servable_name = \"lenet\"\n    method_name = \"classify_top1\"\n    result = requests.post(f\"http://{ip}:{restful_port}/model/{servable_name}:{method_name}\", data=post_payload)\n    print(result.text)\n    result = json.loads(result.text)\n    for item, file in zip(result[\"instances\"], image_files):\n        print(f\"file: {file}, result: {item['label']}\")\n\n\nif __name__ == '__main__':\n    run_classify_top1()\n    run_classify_top1_async()\n    run_restful_classify_top1()\n"
  },
  {
    "path": "example/lenet/serving_server.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Start Servable lenet\"\"\"\n\nimport os\nimport sys\nfrom mindspore_serving import server\n\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    config = server.ServableStartConfig(servable_directory=servable_dir, servable_name=\"lenet\", device_ids=(0, 1))\n    server.start_servables(config)\n\n    server.start_grpc_server(\"127.0.0.1:5500\")\n    server.start_restful_server(\"127.0.0.1:1500\")\n\n\nif __name__ == \"__main__\":\n    start()\n"
  },
  {
    "path": "example/matmul_distributed/export_model/distributed_inference.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n'''distributed inference\nThe sample can be run on Ascend 910 AI processor.\n'''\nimport numpy as np\nfrom net import Net\nfrom mindspore import context, Model, Tensor, export\nfrom mindspore.communication import init\n\n\ndef test_inference():\n    \"\"\"distributed inference after distributed training\"\"\"\n    context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n    init(backend_name=\"hccl\")\n    context.set_auto_parallel_context(full_batch=True, parallel_mode=\"semi_auto_parallel\",\n                                      device_num=8, group_ckpt_save_file=\"./group_config.pb\")\n\n    predict_data = create_predict_data()\n    network = Net(matmul_size=(96, 16))\n    model = Model(network)\n    model.infer_predict_layout(Tensor(predict_data))\n    export(model.predict_network, Tensor(predict_data), file_name=\"matmul\", file_format=\"MINDIR\")\n\n\ndef create_predict_data():\n    \"\"\"user-defined predict data\"\"\"\n    inputs_np = np.random.randn(128, 96).astype(np.float32)\n    return Tensor(inputs_np)\n"
  },
  {
    "path": "example/matmul_distributed/export_model/export_model.sh",
    "content": "#!/bin/bash\n\nEXEC_PATH=$(pwd)\n\nexport RANK_TABLE_FILE=${EXEC_PATH}/rank_table_8pcs.json\nexport RANK_SIZE=8\n\nrm -rf device*\nfor ((i = 1; i < ${RANK_SIZE}; i++)); do\n  mkdir device$i\n  cp *.py ./device$i\n  cd ./device$i\n  export DEVICE_ID=$i\n  export RANK_ID=$i\n  echo \"start inference for device $i\"\n  pytest -sv ./distributed_inference.py::test_inference >inference.log$i 2>&1 &\n  cd ../\ndone\n\nmkdir device0\ncp *.py ./device0\ncd ./device0\nexport DEVICE_ID=0\nexport RANK_ID=0\necho \"start inference for device 0\"\npytest -sv ./distributed_inference.py::test_inference >inference.log0 2>&1\nif [ $? -eq 0 ]; then\n  echo \"inference success\"\nelse\n  echo \"inference failed\"\n  cat inference.log0\n  exit 2\nfi\ncd ../\n\nls device*/ -l\n\nnum=`ls device*/matmul.mindir -l | wc -l`\nif [ ${num} -ne 8 ]\nthen\n  echo \"export matmul mindir failed\"\n  cat device0/inference.log0\n  exit 2\nfi\n\noutput_dir=../model\nrm -rf ${output_dir}/device*\nfor ((i = 0; i < ${RANK_SIZE}; i++)); do\n  mkdir -p ${output_dir}/device${i}\n  cp device${i}/*.mindir ${output_dir}/device${i}/\n  cp device${i}/*.pb ${output_dir}/device${i}/\ndone\necho \"copy models success\"\n"
  },
  {
    "path": "example/matmul_distributed/export_model/net.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n'''net\nThe sample can be run on Ascend 910 AI processor.\n'''\nimport numpy as np\nfrom mindspore import Tensor, Parameter, ops\nfrom mindspore.nn import Cell\n\n\nclass Net(Cell):\n    \"\"\"Net\"\"\"\n\n    def __init__(self, matmul_size, transpose_a=False, transpose_b=False, strategy=None):\n        \"\"\"init\"\"\"\n        super().__init__()\n        matmul_np = np.full(matmul_size, 0.5, dtype=np.float32)\n        self.matmul_weight = Parameter(Tensor(matmul_np))\n        self.matmul = ops.MatMul(transpose_a=transpose_a, transpose_b=transpose_b)\n        self.neg = ops.Neg()\n        if strategy is not None:\n            self.matmul.shard(strategy)\n\n    def construct(self, inputs):\n        \"\"\"construct\"\"\"\n        x = self.matmul(inputs, self.matmul_weight)\n        x = self.neg(x)\n        return x\n"
  },
  {
    "path": "example/matmul_distributed/export_model/rank_table_8pcs.json",
    "content": "{\n  \"version\": \"1.0\",\n  \"server_count\": \"1\",\n  \"server_list\": [\n    {\n      \"server_id\": \"127.0.0.1\",\n      \"device\": [\n        {\n          \"device_id\": \"0\",\n          \"device_ip\": \"192.1.27.6\",\n          \"rank_id\": \"0\"\n        },\n        {\n          \"device_id\": \"1\",\n          \"device_ip\": \"192.2.27.6\",\n          \"rank_id\": \"1\"\n        },\n        {\n          \"device_id\": \"2\",\n          \"device_ip\": \"192.3.27.6\",\n          \"rank_id\": \"2\"\n        },\n        {\n          \"device_id\": \"3\",\n          \"device_ip\": \"192.4.27.6\",\n          \"rank_id\": \"3\"\n        },\n        {\n          \"device_id\": \"4\",\n          \"device_ip\": \"192.1.27.7\",\n          \"rank_id\": \"4\"\n        },\n        {\n          \"device_id\": \"5\",\n          \"device_ip\": \"192.2.27.7\",\n          \"rank_id\": \"5\"\n        },\n        {\n          \"device_id\": \"6\",\n          \"device_ip\": \"192.3.27.7\",\n          \"rank_id\": \"6\"\n        },\n        {\n          \"device_id\": \"7\",\n          \"device_ip\": \"192.4.27.7\",\n          \"rank_id\": \"7\"\n        }\n      ],\n      \"host_nic_ip\": \"reserve\"\n    }\n  ],\n  \"status\": \"completed\"\n}\n"
  },
  {
    "path": "example/matmul_distributed/matmul/servable_config.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Distributed matmul config python file\"\"\"\nfrom mindspore_serving.server import distributed\nfrom mindspore_serving.server import register\n\nmodel = distributed.declare_servable(rank_size=8, stage_size=1, with_batch_dim=False)\n\n\n@register.register_method(output_names=[\"y\"])\ndef predict(x):\n    y = register.add_stage(model, x, outputs_count=1)\n    return y\n"
  },
  {
    "path": "example/matmul_distributed/rank_table_8pcs.json",
    "content": "{\n  \"version\": \"1.0\",\n  \"server_count\": \"1\",\n  \"server_list\": [\n    {\n      \"server_id\": \"127.0.0.1\",\n      \"device\": [\n        {\n          \"device_id\": \"0\",\n          \"device_ip\": \"192.1.27.6\",\n          \"rank_id\": \"0\"\n        },\n        {\n          \"device_id\": \"1\",\n          \"device_ip\": \"192.2.27.6\",\n          \"rank_id\": \"1\"\n        },\n        {\n          \"device_id\": \"2\",\n          \"device_ip\": \"192.3.27.6\",\n          \"rank_id\": \"2\"\n        },\n        {\n          \"device_id\": \"3\",\n          \"device_ip\": \"192.4.27.6\",\n          \"rank_id\": \"3\"\n        },\n        {\n          \"device_id\": \"4\",\n          \"device_ip\": \"192.1.27.7\",\n          \"rank_id\": \"4\"\n        },\n        {\n          \"device_id\": \"5\",\n          \"device_ip\": \"192.2.27.7\",\n          \"rank_id\": \"5\"\n        },\n        {\n          \"device_id\": \"6\",\n          \"device_ip\": \"192.3.27.7\",\n          \"rank_id\": \"6\"\n        },\n        {\n          \"device_id\": \"7\",\n          \"device_ip\": \"192.4.27.7\",\n          \"rank_id\": \"7\"\n        }\n      ],\n      \"host_nic_ip\": \"reserve\"\n    }\n  ],\n  \"status\": \"completed\"\n}\n"
  },
  {
    "path": "example/matmul_distributed/serving_agent.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Start agents of Distributed Servable matmul\"\"\"\n\nfrom mindspore_serving.server import distributed\n\n\ndef start_agents():\n    \"\"\"Start all the agents in current machine\"\"\"\n    model_files = []\n    group_configs = []\n    for i in range(8):\n        model_files.append(f\"model/device{i}/matmul.mindir\")\n        group_configs.append(f\"model/device{i}/group_config.pb\")\n\n    distributed.startup_agents(distributed_address=\"127.0.0.1:6200\", model_files=model_files,\n                               group_config_files=group_configs)\n\n\nif __name__ == '__main__':\n    start_agents()\n"
  },
  {
    "path": "example/matmul_distributed/serving_client.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Client for distributed matmul\"\"\"\nimport numpy as np\nfrom mindspore_serving.client import Client\n\n\ndef run_matmul():\n    \"\"\"Run client of distributed matmul\"\"\"\n    client = Client(\"localhost:5500\", \"matmul\", \"predict\")\n    instance = {\"x\": np.ones((128, 96), np.float32)}\n    result = client.infer(instance)\n    print(\"result:\\n\", result)\n    assert len(result) == 1\n    assert \"y\" in result[0]\n\n\nif __name__ == '__main__':\n    run_matmul()\n"
  },
  {
    "path": "example/matmul_distributed/serving_server.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Start Distributed Servable matmul\"\"\"\n\nimport os\nimport sys\nfrom mindspore_serving import server\nfrom mindspore_serving.server import distributed\n\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    distributed.start_servable(servable_dir, \"matmul\",\n                               rank_table_json_file=\"rank_table_8pcs.json\",\n                               version_number=1,\n                               distributed_address=\"127.0.0.1:6200\")\n\n    server.start_grpc_server(\"127.0.0.1:5500\")\n    server.start_restful_server(\"127.0.0.1:1500\")\n\n\nif __name__ == \"__main__\":\n    start()\n"
  },
  {
    "path": "example/matmul_multi_subgraphs/export_model/export_matmul.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n'''net\nThe sample can be run on Ascend 910 AI processor.\n'''\nimport os\nfrom shutil import copyfile\nimport numpy as np\nimport mindspore.context as context\nfrom mindspore import Tensor, Parameter, ops, export\nfrom mindspore.nn import Cell\n\n\nclass Net(Cell):\n    \"\"\"Net\"\"\"\n\n    def __init__(self, matmul_size, init_val, transpose_a=False, transpose_b=False):\n        \"\"\"init\"\"\"\n        super().__init__()\n        matmul_np = np.full(matmul_size, init_val, dtype=np.float32)\n        self.matmul_weight = Parameter(Tensor(matmul_np))\n        self.matmul = ops.MatMul(transpose_a=transpose_a, transpose_b=transpose_b)\n        self.sum = ops.ReduceSum()\n\n    def construct(self, inputs):\n        \"\"\"construct\"\"\"\n        x = self.matmul(inputs, self.matmul_weight)\n        x = self.sum(x, 0)\n        return x\n\n\ndef export_net():\n    \"\"\"Export matmul net , and copy output model `matmul_0.mindir` and `matmul_1.mindir` to directory ../matmul/1\"\"\"\n    context.set_context(mode=context.GRAPH_MODE)\n    network = Net(matmul_size=(96, 16), init_val=0.5)\n    # subgraph 0: 128,96 matmul 16,96 -> 128,16 reduce sum axis 0-> 16\n    predict_data = np.random.randn(128, 96).astype(np.float32)\n    # pylint: disable=protected-access\n    export(network, Tensor(predict_data), file_name=\"matmul_0\", file_format=\"MINDIR\")\n\n    # subgraph 1: 8,96 matmul 16,96 -> 8,16 reduce sum axis 0-> 16\n    predict_data = np.random.randn(8, 96).astype(np.float32)\n    # pylint: disable=protected-access\n    export(network, Tensor(predict_data), file_name=\"matmul_1\", file_format=\"MINDIR\")\n\n    dst_dir = '../matmul/1'\n    try:\n        os.mkdir(dst_dir)\n    except OSError:\n        pass\n\n    dst_file = os.path.join(dst_dir, 'matmul_0.mindir')\n    copyfile('matmul_0.mindir', dst_file)\n    print(\"copy matmul_0.mindir to \" + dst_dir + \" success\")\n\n    dst_file = os.path.join(dst_dir, 'matmul_1.mindir')\n    copyfile('matmul_1.mindir', dst_file)\n    print(\"copy matmul_1.mindir to \" + dst_dir + \" success\")\n\n\nif __name__ == \"__main__\":\n    export_net()\n"
  },
  {
    "path": "example/matmul_multi_subgraphs/matmul/servable_config.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Distributed matmul config python file\"\"\"\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=[\"matmul_0.mindir\", \"matmul_1.mindir\"], model_format=\"MindIR\",\n                               with_batch_dim=False)\n\n\ndef process(x, y):\n    z1 = model.call(x, subgraph=0)  # 128,96 matmul 16,96 -> reduce sum axis 0-> 16\n    z2 = model.call(y, subgraph=1)  # 8,96 matmul 16,96 -> reduce sum axis 0-> 16\n    return z1 + z2\n\n\n@register.register_method(output_names=[\"z\"])\ndef predict(x, y):\n    z = register.add_stage(process, x, y, outputs_count=1)\n    return z\n"
  },
  {
    "path": "example/matmul_multi_subgraphs/serving_client.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Client for distributed matmul\"\"\"\nimport numpy as np\nfrom mindspore_serving.client import Client\n\n\ndef run_matmul():\n    \"\"\"Run client of distributed matmul\"\"\"\n    client = Client(\"localhost:5500\", \"matmul\", \"predict\")\n    instance = {\"x\": np.ones((128, 96), np.float32), \"y\": np.ones((8, 96), np.float32)}\n    result = client.infer(instance)\n    print(\"result:\\n\", result)\n    assert \"z\" in result[0]\n\n\nif __name__ == '__main__':\n    run_matmul()\n"
  },
  {
    "path": "example/matmul_multi_subgraphs/serving_server.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Start Distributed Servable matmul\"\"\"\n\nimport os\nimport sys\nfrom mindspore_serving import server\n\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n\n    servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name=\"matmul\",\n                                                 device_ids=(0, 1))\n    server.start_servables(servable_config)\n\n    server.start_grpc_server(\"127.0.0.1:5500\")\n    server.start_restful_server(\"127.0.0.1:1500\")\n\n\nif __name__ == \"__main__\":\n    start()\n"
  },
  {
    "path": "example/resnet/export_model/export_resnet.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"export resnet50 for cifar10 dataset\"\"\"\n\nimport os\nimport sys\nfrom shutil import copyfile\nfrom resnet.export import export_resnet\n\nif __name__ == '__main__':\n    if len(sys.argv) > 1 and sys.argv[1] == 'False':  # python export_resnet.py False\n        ckpt_file = None\n    else:\n        ckpt_file = \"resnet50_ascend_v111_cifar10_offical_cv_bs32_acc92.ckpt\"\n        if not os.path.exists(ckpt_file):\n            print(\"downloading resnet50 cifar10 checkpoint---------------------------------\")\n            os.system(f\"wget https://download.mindspore.cn/model_zoo/r1.1/\"\n                      f\"resnet50_ascend_v111_cifar10_offical_cv_bs32_acc92/{ckpt_file} --no-check-certificate\")\n            print(\"end downloading resnet50 cifar10 checkpoint---------------------------------\")\n    export_resnet('resnet50_cifar10', ckpt_file, 'resnet50_1b_cifar10')\n\n    dst_dir = '../resnet50/1'\n    try:\n        os.mkdir(dst_dir)\n    except OSError:\n        pass\n    dst_file = os.path.join(dst_dir, 'resnet50_1b_cifar10.mindir')\n    copyfile('resnet50_1b_cifar10.mindir', dst_file)\n"
  },
  {
    "path": "example/resnet/export_model/resnet/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Resnet export model\"\"\"\n"
  },
  {
    "path": "example/resnet/export_model/resnet/export.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n##############export checkpoint file into air and onnx models#################\npython export.py\n\"\"\"\nimport argparse\nimport numpy as np\n\nfrom mindspore import Tensor, export\nfrom mindspore import load_checkpoint, load_param_into_net\n\n\ndef export_resnet(network_dataset, ckpt_file, output_file):\n    \"\"\"export resnet\"\"\"\n\n    if network_dataset == 'resnet50_cifar10':\n        from .src.config import config1 as config\n        from .src.resnet import resnet50 as resnet\n    elif network_dataset == 'resnet50_imagenet2012':\n        from .src.config import config2 as config\n        from .src.resnet import resnet50 as resnet\n    elif network_dataset == 'resnet101_imagenet2012':\n        from .src.config import config3 as config\n        from .src.resnet import resnet101 as resnet\n    elif network_dataset == 'se-resnet50_imagenet2012':\n        from .src.config import config4 as config\n        from .src.resnet import se_resnet50 as resnet\n    else:\n        raise ValueError(\"network and dataset is not support.\")\n\n    net = resnet(config.class_num)\n\n    if ckpt_file is not None:\n        param_dict = load_checkpoint(ckpt_file)\n        load_param_into_net(net, param_dict)\n\n    input_arr = Tensor(np.zeros([1, 3, 224, 224], np.float32))\n    export(net, input_arr, file_name=output_file, file_format=\"MINDIR\")\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser(description='resnet export')\n    parser.add_argument('--network_dataset', type=str, default='resnet50_cifar10', choices=['resnet50_cifar10',\n                                                                                            'resnet50_imagenet2012',\n                                                                                            'resnet101_imagenet2012',\n                                                                                            \"se-resnet50_imagenet2012\"],\n                        help='network and dataset name.')\n    parser.add_argument('--ckpt_file', type=str, default='', help='resnet ckpt file.')\n    parser.add_argument('--output_file', type=str, default='', help='resnet output air name.')\n    args_opt = parser.parse_args()\n    export_resnet(args_opt.network_dataset, args_opt.ckpt_file, args_opt.output_file)\n"
  },
  {
    "path": "example/resnet/export_model/resnet/src/config.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nnetwork config setting, will be used in train.py and eval.py\n\"\"\"\nfrom easydict import EasyDict as ed\n\n# config for resent50, cifar10\nconfig1 = ed({\n    \"class_num\": 10,\n    \"batch_size\": 32,\n    \"loss_scale\": 1024,\n    \"momentum\": 0.9,\n    \"weight_decay\": 1e-4,\n    \"epoch_size\": 90,\n    \"pretrain_epoch_size\": 0,\n    \"save_checkpoint\": True,\n    \"save_checkpoint_epochs\": 5,\n    \"keep_checkpoint_max\": 10,\n    \"save_checkpoint_path\": \"./\",\n    \"warmup_epochs\": 5,\n    \"lr_decay_mode\": \"poly\",\n    \"lr_init\": 0.01,\n    \"lr_end\": 0.00001,\n    \"lr_max\": 0.1\n})\n\n# config for resnet50, imagenet2012\nconfig2 = ed({\n    \"class_num\": 1001,\n    \"batch_size\": 256,\n    \"loss_scale\": 1024,\n    \"momentum\": 0.9,\n    \"weight_decay\": 1e-4,\n    \"epoch_size\": 90,\n    \"pretrain_epoch_size\": 0,\n    \"save_checkpoint\": True,\n    \"save_checkpoint_epochs\": 5,\n    \"keep_checkpoint_max\": 10,\n    \"save_checkpoint_path\": \"./\",\n    \"warmup_epochs\": 0,\n    \"lr_decay_mode\": \"linear\",\n    \"use_label_smooth\": True,\n    \"label_smooth_factor\": 0.1,\n    \"lr_init\": 0,\n    \"lr_max\": 0.8,\n    \"lr_end\": 0.0\n})\n\n# config for resent101, imagenet2012\nconfig3 = ed({\n    \"class_num\": 1001,\n    \"batch_size\": 32,\n    \"loss_scale\": 1024,\n    \"momentum\": 0.9,\n    \"weight_decay\": 1e-4,\n    \"epoch_size\": 120,\n    \"pretrain_epoch_size\": 0,\n    \"save_checkpoint\": True,\n    \"save_checkpoint_epochs\": 5,\n    \"keep_checkpoint_max\": 10,\n    \"save_checkpoint_path\": \"./\",\n    \"warmup_epochs\": 0,\n    \"lr_decay_mode\": \"cosine\",\n    \"use_label_smooth\": True,\n    \"label_smooth_factor\": 0.1,\n    \"lr\": 0.1\n})\n\n# config for se-resnet50, imagenet2012\nconfig4 = ed({\n    \"class_num\": 1001,\n    \"batch_size\": 32,\n    \"loss_scale\": 1024,\n    \"momentum\": 0.9,\n    \"weight_decay\": 1e-4,\n    \"epoch_size\": 28,\n    \"train_epoch_size\": 24,\n    \"pretrain_epoch_size\": 0,\n    \"save_checkpoint\": True,\n    \"save_checkpoint_epochs\": 4,\n    \"keep_checkpoint_max\": 10,\n    \"save_checkpoint_path\": \"./\",\n    \"warmup_epochs\": 3,\n    \"lr_decay_mode\": \"cosine\",\n    \"use_label_smooth\": True,\n    \"label_smooth_factor\": 0.1,\n    \"lr_init\": 0.0,\n    \"lr_max\": 0.3,\n    \"lr_end\": 0.0001\n})\n"
  },
  {
    "path": "example/resnet/export_model/resnet/src/resnet.py",
    "content": "# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"ResNet.\"\"\"\nimport math\nimport numpy as np\nfrom scipy.stats import truncnorm\nimport mindspore as ms\nfrom mindspore import nn\nfrom mindspore import ops\nfrom mindspore import Tensor\n\n\ndef _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):\n    fan_in = in_channel * kernel_size * kernel_size\n    scale = 1.0\n    scale /= max(1., fan_in)\n    stddev = (scale ** 0.5) / .87962566103423978\n    mu, sigma = 0, stddev\n    weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)\n    weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))\n    return Tensor(weight, dtype=ms.float32)\n\n\ndef _weight_variable(shape, factor=0.01):\n    init_value = np.random.randn(*shape).astype(np.float32) * factor\n    return Tensor(init_value)\n\n\ndef calculate_gain(nonlinearity, param=None):\n    \"\"\"calculate_gain\"\"\"\n    linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']\n    res = 0\n    if nonlinearity in linear_fns or nonlinearity == 'sigmoid':\n        res = 1\n    elif nonlinearity == 'tanh':\n        res = 5.0 / 3\n    elif nonlinearity == 'relu':\n        res = math.sqrt(2.0)\n    elif nonlinearity == 'leaky_relu':\n        if param is None:\n            negative_slope = 0.01\n        elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):\n            # True/False are instances of int, hence check above\n            negative_slope = param\n        else:\n            raise ValueError(\"negative_slope {} not a valid number\".format(param))\n        res = math.sqrt(2.0 / (1 + negative_slope ** 2))\n    else:\n        raise ValueError(\"Unsupported nonlinearity {}\".format(nonlinearity))\n    return res\n\n\ndef _calculate_fan_in_and_fan_out(tensor):\n    \"\"\"_calculate_fan_in_and_fan_out\"\"\"\n    dimensions = len(tensor)\n    if dimensions < 2:\n        raise ValueError(\"Fan in and fan out can not be computed for tensor with fewer than 2 dimensions\")\n    if dimensions == 2:  # Linear\n        fan_in = tensor[1]\n        fan_out = tensor[0]\n    else:\n        num_input_fmaps = tensor[1]\n        num_output_fmaps = tensor[0]\n        receptive_field_size = 1\n        if dimensions > 2:\n            receptive_field_size = tensor[2] * tensor[3]\n        fan_in = num_input_fmaps * receptive_field_size\n        fan_out = num_output_fmaps * receptive_field_size\n    return fan_in, fan_out\n\n\ndef _calculate_correct_fan(tensor, mode):\n    mode = mode.lower()\n    valid_modes = ['fan_in', 'fan_out']\n    if mode not in valid_modes:\n        raise ValueError(\"Mode {} not supported, please use one of {}\".format(mode, valid_modes))\n    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n    return fan_in if mode == 'fan_in' else fan_out\n\n\ndef kaiming_normal(inputs_shape, a=0, mode='fan_in', nonlinearity='leaky_relu'):\n    fan = _calculate_correct_fan(inputs_shape, mode)\n    gain = calculate_gain(nonlinearity, a)\n    std = gain / math.sqrt(fan)\n    return np.random.normal(0, std, size=inputs_shape).astype(np.float32)\n\n\ndef kaiming_uniform(inputs_shape, a=0., mode='fan_in', nonlinearity='leaky_relu'):\n    fan = _calculate_correct_fan(inputs_shape, mode)\n    gain = calculate_gain(nonlinearity, a)\n    std = gain / math.sqrt(fan)\n    bound = math.sqrt(3.0) * std  # Calculate uniform bounds from standard deviation\n    return np.random.uniform(-bound, bound, size=inputs_shape).astype(np.float32)\n\n\ndef _conv3x3(in_channel, out_channel, stride=1, use_se=False, res_base=False):\n    if use_se:\n        weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=3)\n    else:\n        weight_shape = (out_channel, in_channel, 3, 3)\n        weight = Tensor(kaiming_normal(weight_shape, mode=\"fan_out\", nonlinearity='relu'))\n    if res_base:\n        return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,\n                         padding=1, pad_mode='pad', weight_init=weight)\n    return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,\n                     padding=0, pad_mode='same', weight_init=weight)\n\n\ndef _conv1x1(in_channel, out_channel, stride=1, use_se=False, res_base=False):\n    if use_se:\n        weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=1)\n    else:\n        weight_shape = (out_channel, in_channel, 1, 1)\n        weight = Tensor(kaiming_normal(weight_shape, mode=\"fan_out\", nonlinearity='relu'))\n    if res_base:\n        return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,\n                         padding=0, pad_mode='pad', weight_init=weight)\n    return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,\n                     padding=0, pad_mode='same', weight_init=weight)\n\n\ndef _conv7x7(in_channel, out_channel, stride=1, use_se=False, res_base=False):\n    if use_se:\n        weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=7)\n    else:\n        weight_shape = (out_channel, in_channel, 7, 7)\n        weight = Tensor(kaiming_normal(weight_shape, mode=\"fan_out\", nonlinearity='relu'))\n    if res_base:\n        return nn.Conv2d(in_channel, out_channel,\n                         kernel_size=7, stride=stride, padding=3, pad_mode='pad', weight_init=weight)\n    return nn.Conv2d(in_channel, out_channel,\n                     kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight)\n\n\ndef _bn(channel, res_base=False):\n    if res_base:\n        return nn.BatchNorm2d(channel, eps=1e-5, momentum=0.1,\n                              gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)\n    return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,\n                          gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)\n\n\ndef _bn_last(channel):\n    return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,\n                          gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1)\n\n\ndef _fc(in_channel, out_channel, use_se=False):\n    if use_se:\n        weight = np.random.normal(loc=0, scale=0.01, size=out_channel * in_channel)\n        weight = Tensor(np.reshape(weight, (out_channel, in_channel)), dtype=ms.float32)\n    else:\n        weight_shape = (out_channel, in_channel)\n        weight = Tensor(kaiming_uniform(weight_shape, a=math.sqrt(5)))\n    return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)\n\n\nclass ResidualBlock(nn.Cell):\n    \"\"\"\n    ResNet V1 residual block definition.\n\n    Args:\n        in_channel (int): Input channel.\n        out_channel (int): Output channel.\n        stride (int): Stride size for the first convolutional layer. Default: 1.\n        use_se (bool): Enable SE-ResNet50 net. Default: False.\n        se_block(bool): Use se block in SE-ResNet50 net. Default: False.\n\n    Returns:\n        Tensor, output tensor.\n\n    Examples:\n        >>> ResidualBlock(3, 256, stride=2)\n    \"\"\"\n    expansion = 4\n\n    def __init__(self,\n                 in_channel,\n                 out_channel,\n                 stride=1,\n                 use_se=False, se_block=False):\n        super(ResidualBlock, self).__init__()\n        self.stride = stride\n        self.use_se = use_se\n        self.se_block = se_block\n        channel = out_channel // self.expansion\n        self.conv1 = _conv1x1(in_channel, channel, stride=1, use_se=self.use_se)\n        self.bn1 = _bn(channel)\n        if self.use_se and self.stride != 1:\n            self.e2 = nn.SequentialCell([_conv3x3(channel, channel, stride=1, use_se=True), _bn(channel),\n                                         nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')])\n        else:\n            self.conv2 = _conv3x3(channel, channel, stride=stride, use_se=self.use_se)\n            self.bn2 = _bn(channel)\n\n        self.conv3 = _conv1x1(channel, out_channel, stride=1, use_se=self.use_se)\n        self.bn3 = _bn_last(out_channel)\n        if self.se_block:\n            self.se_global_pool = ops.ReduceMean(keep_dims=False)\n            self.se_dense_0 = _fc(out_channel, int(out_channel / 4), use_se=self.use_se)\n            self.se_dense_1 = _fc(int(out_channel / 4), out_channel, use_se=self.use_se)\n            self.se_sigmoid = nn.Sigmoid()\n            self.se_mul = ops.Mul()\n        self.relu = nn.ReLU()\n\n        self.down_sample = False\n\n        if stride != 1 or in_channel != out_channel:\n            self.down_sample = True\n        self.down_sample_layer = None\n\n        if self.down_sample:\n            if self.use_se:\n                if stride == 1:\n                    self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel,\n                                                                         stride, use_se=self.use_se), _bn(out_channel)])\n                else:\n                    self.down_sample_layer = nn.SequentialCell([nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same'),\n                                                                _conv1x1(in_channel, out_channel, 1,\n                                                                         use_se=self.use_se), _bn(out_channel)])\n            else:\n                self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,\n                                                                     use_se=self.use_se), _bn(out_channel)])\n\n    def construct(self, x):\n        \"\"\"Construct ResidualBlock\"\"\"\n        identity = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n        if self.use_se and self.stride != 1:\n            out = self.e2(out)\n        else:\n            out = self.conv2(out)\n            out = self.bn2(out)\n            out = self.relu(out)\n        out = self.conv3(out)\n        out = self.bn3(out)\n        if self.se_block:\n            out_se = out\n            out = self.se_global_pool(out, (2, 3))\n            out = self.se_dense_0(out)\n            out = self.relu(out)\n            out = self.se_dense_1(out)\n            out = self.se_sigmoid(out)\n            out = out.reshape(out.shape(out) + (1, 1))\n            out = self.se_mul(out, out_se)\n\n        if self.down_sample:\n            identity = self.down_sample_layer(identity)\n\n        out = out + identity\n        out = self.relu(out)\n\n        return out\n\n\nclass ResidualBlockBase(nn.Cell):\n    \"\"\"\n    ResNet V1 residual block definition.\n\n    Args:\n        in_channel (int): Input channel.\n        out_channel (int): Output channel.\n        stride (int): Stride size for the first convolutional layer. Default: 1.\n        use_se (bool): Enable SE-ResNet50 net. Default: False.\n        se_block(bool): Use se block in SE-ResNet50 net. Default: False.\n        res_base (bool): Enable parameter setting of resnet18. Default: True.\n\n    Returns:\n        Tensor, output tensor.\n\n    Examples:\n        >>> ResidualBlockBase(3, 256, stride=2)\n    \"\"\"\n\n    # pylint: disable=unused-argument\n    def __init__(self,\n                 in_channel,\n                 out_channel,\n                 stride=1,\n                 use_se=False,\n                 se_block=False,\n                 res_base=True):\n        super(ResidualBlockBase, self).__init__()\n        self.res_base = res_base\n        self.conv1 = _conv3x3(in_channel, out_channel, stride=stride, res_base=self.res_base)\n        self.bn1d = _bn(out_channel)\n        self.conv2 = _conv3x3(out_channel, out_channel, stride=1, res_base=self.res_base)\n        self.bn2d = _bn(out_channel)\n        self.relu = nn.ReLU()\n\n        self.down_sample = False\n        if stride != 1 or in_channel != out_channel:\n            self.down_sample = True\n\n        self.down_sample_layer = None\n        if self.down_sample:\n            self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,\n                                                                 use_se=use_se, res_base=self.res_base),\n                                                        _bn(out_channel, res_base)])\n\n    def construct(self, x):\n        \"\"\"Construct ResidualBlockBase\"\"\"\n        identity = x\n\n        out = self.conv1(x)\n        out = self.bn1d(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2d(out)\n\n        if self.down_sample:\n            identity = self.down_sample_layer(identity)\n\n        out = out + identity\n        out = self.relu(out)\n\n        return out\n\n\nclass ResNet(nn.Cell):\n    \"\"\"\n    ResNet architecture.\n\n    Args:\n        block (Cell): Block for network.\n        layer_nums (list): Numbers of block in different layers.\n        in_channels (list): Input channel in each layer.\n        out_channels (list): Output channel in each layer.\n        strides (list):  Stride size in each layer.\n        num_classes (int): The number of classes that the training images are belonging to.\n        use_se (bool): Enable SE-ResNet50 net. Default: False.\n        se_block(bool): Use se block in SE-ResNet50 net in layer 3 and layer 4. Default: False.\n        res_base (bool): Enable parameter setting of resnet18. Default: False.\n\n    Returns:\n        Tensor, output tensor.\n\n    Examples:\n        >>> ResNet(ResidualBlock,\n        >>>        [3, 4, 6, 3],\n        >>>        [64, 256, 512, 1024],\n        >>>        [256, 512, 1024, 2048],\n        >>>        [1, 2, 2, 2],\n        >>>        10)\n    \"\"\"\n\n    def __init__(self,\n                 block,\n                 layer_nums,\n                 in_channels,\n                 out_channels,\n                 strides,\n                 num_classes,\n                 use_se=False,\n                 res_base=False):\n        super(ResNet, self).__init__()\n\n        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:\n            raise ValueError(\"the length of layer_num, in_channels, out_channels list must be 4!\")\n        self.use_se = use_se\n        self.res_base = res_base\n        self.se_block = False\n        if self.use_se:\n            self.se_block = True\n\n        if self.use_se:\n            self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)\n            self.bn1_0 = _bn(32)\n            self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)\n            self.bn1_1 = _bn(32)\n            self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)\n        else:\n            self.conv1 = _conv7x7(3, 64, stride=2, res_base=self.res_base)\n        self.bn1 = _bn(64, self.res_base)\n        self.relu = ops.ReLU()\n\n        if self.res_base:\n            self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 1), (1, 1)))\n            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode=\"valid\")\n        else:\n            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode=\"same\")\n\n        self.layer1 = self._make_layer(block,\n                                       layer_nums[0],\n                                       in_channel=in_channels[0],\n                                       out_channel=out_channels[0],\n                                       stride=strides[0],\n                                       use_se=self.use_se)\n        self.layer2 = self._make_layer(block,\n                                       layer_nums[1],\n                                       in_channel=in_channels[1],\n                                       out_channel=out_channels[1],\n                                       stride=strides[1],\n                                       use_se=self.use_se)\n        self.layer3 = self._make_layer(block,\n                                       layer_nums[2],\n                                       in_channel=in_channels[2],\n                                       out_channel=out_channels[2],\n                                       stride=strides[2],\n                                       use_se=self.use_se,\n                                       se_block=self.se_block)\n        self.layer4 = self._make_layer(block,\n                                       layer_nums[3],\n                                       in_channel=in_channels[3],\n                                       out_channel=out_channels[3],\n                                       stride=strides[3],\n                                       use_se=self.use_se,\n                                       se_block=self.se_block)\n\n        self.mean = ops.ReduceMean(keep_dims=True)\n        self.flatten = nn.Flatten()\n        self.end_point = _fc(out_channels[3], num_classes, use_se=self.use_se)\n\n    def _make_layer(self, block, layer_num, in_channel, out_channel, stride, use_se=False, se_block=False):\n        \"\"\"\n        Make stage network of ResNet.\n\n        Args:\n            block (Cell): Resnet block.\n            layer_num (int): Layer number.\n            in_channel (int): Input channel.\n            out_channel (int): Output channel.\n            stride (int): Stride size for the first convolutional layer.\n            se_block(bool): Use se block in SE-ResNet50 net. Default: False.\n        Returns:\n            SequentialCell, the output layer.\n\n        Examples:\n            >>> _make_layer(ResidualBlock, 3, 128, 256, 2)\n        \"\"\"\n        layers = []\n\n        resnet_block = block(in_channel, out_channel, stride=stride, use_se=use_se)\n        layers.append(resnet_block)\n        if se_block:\n            for _ in range(1, layer_num - 1):\n                resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)\n                layers.append(resnet_block)\n            resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se, se_block=se_block)\n            layers.append(resnet_block)\n        else:\n            for _ in range(1, layer_num):\n                resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)\n                layers.append(resnet_block)\n        return nn.SequentialCell(layers)\n\n    def construct(self, x):\n        \"\"\"Construct Resnet\"\"\"\n        if self.use_se:\n            x = self.conv1_0(x)\n            x = self.bn1_0(x)\n            x = self.relu(x)\n            x = self.conv1_1(x)\n            x = self.bn1_1(x)\n            x = self.relu(x)\n            x = self.conv1_2(x)\n        else:\n            x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.relu(x)\n        if self.res_base:\n            x = self.pad(x)\n        c1 = self.maxpool(x)\n\n        c2 = self.layer1(c1)\n        c3 = self.layer2(c2)\n        c4 = self.layer3(c3)\n        c5 = self.layer4(c4)\n\n        out = self.mean(c5, (2, 3))\n        out = self.flatten(out)\n        out = self.end_point(out)\n\n        return out\n\n\ndef resnet18(class_num=10):\n    \"\"\"\n    Get ResNet18 neural network.\n\n    Args:\n        class_num (int): Class number.\n\n    Returns:\n        Cell, cell instance of ResNet18 neural network.\n\n    Examples:\n        >>> net = resnet18(10)\n    \"\"\"\n    return ResNet(ResidualBlockBase,\n                  [2, 2, 2, 2],\n                  [64, 64, 128, 256],\n                  [64, 128, 256, 512],\n                  [1, 2, 2, 2],\n                  class_num,\n                  res_base=True)\n\n\ndef resnet34(class_num=10):\n    \"\"\"\n    Get ResNet34 neural network.\n\n    Args:\n        class_num (int): Class number.\n\n    Returns:\n        Cell, cell instance of ResNet34 neural network.\n\n    Examples:\n        >>> net = resnet18(10)\n    \"\"\"\n    return ResNet(ResidualBlockBase,\n                  [3, 4, 6, 3],\n                  [64, 64, 128, 256],\n                  [64, 128, 256, 512],\n                  [1, 2, 2, 2],\n                  class_num,\n                  res_base=True)\n\n\ndef resnet50(class_num=10):\n    \"\"\"\n    Get ResNet50 neural network.\n\n    Args:\n        class_num (int): Class number.\n\n    Returns:\n        Cell, cell instance of ResNet50 neural network.\n\n    Examples:\n        >>> net = resnet50(10)\n    \"\"\"\n    return ResNet(ResidualBlock,\n                  [3, 4, 6, 3],\n                  [64, 256, 512, 1024],\n                  [256, 512, 1024, 2048],\n                  [1, 2, 2, 2],\n                  class_num)\n\n\ndef se_resnet50(class_num=1001):\n    \"\"\"\n    Get SE-ResNet50 neural network.\n\n    Args:\n        class_num (int): Class number.\n\n    Returns:\n        Cell, cell instance of SE-ResNet50 neural network.\n\n    Examples:\n        >>> net = se-resnet50(1001)\n    \"\"\"\n    return ResNet(ResidualBlock,\n                  [3, 4, 6, 3],\n                  [64, 256, 512, 1024],\n                  [256, 512, 1024, 2048],\n                  [1, 2, 2, 2],\n                  class_num,\n                  use_se=True)\n\n\ndef resnet101(class_num=1001):\n    \"\"\"\n    Get ResNet101 neural network.\n\n    Args:\n        class_num (int): Class number.\n\n    Returns:\n        Cell, cell instance of ResNet101 neural network.\n\n    Examples:\n        >>> net = resnet101(1001)\n    \"\"\"\n    return ResNet(ResidualBlock,\n                  [3, 4, 23, 3],\n                  [64, 256, 512, 1024],\n                  [256, 512, 1024, 2048],\n                  [1, 2, 2, 2],\n                  class_num)\n"
  },
  {
    "path": "example/resnet/resnet50/servable_config.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Resnet50 cifar10 config python file\"\"\"\nimport numpy as np\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.c_transforms as TC\nimport mindspore.dataset.vision.c_transforms as VC\n\nfrom mindspore_serving.server import register\n\n# cifar 10\nidx_2_label = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\n\ndef preprocess_eager(image):\n    \"\"\"\n    Define preprocess, input is image numpy, return preprocess result.\n    Return type can be numpy, str, bytes, int, float, or bool.\n    Use MindData Eager, this image processing can also use other image processing library, likes numpy, PIL or cv2 etc.\n    \"\"\"\n    image_size = 224\n    mean = [0.4914 * 255, 0.4822 * 255, 0.4465 * 255]\n    std = [0.2023 * 255, 0.1994 * 255, 0.2010 * 255]\n\n    decode = VC.Decode()\n    resize = VC.Resize([image_size, image_size])\n    normalize = VC.Normalize(mean=mean, std=std)\n    hwc2chw = VC.HWC2CHW()\n\n    image = decode(image)\n    image = resize(image)\n    image = normalize(image)\n    image = hwc2chw(image)\n    return image\n\n\ndef preprocess_batch(instances):\n    \"\"\"\n    Define preprocess pipeline, the function arg is multi instances, every instance is tuple of inputs.\n    This example has one input and one output.\n    Use MindData Pipeline.\n    \"\"\"\n\n    def generator_func():\n        for instance in instances:\n            image = instance[0]\n            yield (image,)\n\n    resnet_ds = ds.GeneratorDataset(generator_func, [\"image\"], shuffle=False)\n    image_size = 224\n    mean = [0.4914 * 255, 0.4822 * 255, 0.4465 * 255]\n    std = [0.2023 * 255, 0.1994 * 255, 0.2010 * 255]\n    resnet_ds = resnet_ds.map(operations=VC.Decode(), input_columns=\"image\", num_parallel_workers=8)\n\n    trans = [\n        VC.Resize([image_size, image_size]),\n        VC.Normalize(mean=mean, std=std),\n        VC.HWC2CHW()\n    ]\n    resnet_ds = resnet_ds.map(operations=TC.Compose(trans), input_columns=\"image\", num_parallel_workers=2)\n\n    for data in resnet_ds.create_dict_iterator(num_epochs=1):\n        image_result = data[\"image\"]\n        yield (image_result,)\n\n\ndef postprocess_top1(score):\n    \"\"\"\n    Define postprocess. This example has one input and one output.\n    The input is the numpy tensor of the score, and the output is the label str of top one.\n    \"\"\"\n    max_idx = np.argmax(score)\n    return idx_2_label[max_idx]\n\n\ndef postprocess_top5(score):\n    \"\"\"\n    Define postprocess. This example has one input and two outputs.\n    The input is the numpy tensor of the score. The first output is the str joined by labels of top five,\n    and the second output is the score tensor of the top five.\n    \"\"\"\n    idx = np.argsort(score)[::-1][:5]  # top 5\n    ret_label = [idx_2_label[i] for i in idx]\n    ret_score = score[idx]\n    return \";\".join(ret_label), ret_score\n\n\nresnet_model = register.declare_model(model_file=\"resnet50_1b_cifar10.mindir\", model_format=\"MindIR\")\n\n\ndef call_resnet_model(image):\n    \"\"\"call model with only one instance a time\"\"\"\n    image = preprocess_eager(image)\n    score = resnet_model.call(image)  # for only one instance\n    return postprocess_top1(score)\n\n\ndef call_resnet_model_batch(instances):\n    \"\"\"call model with multiply instances a time\"\"\"\n    input_instances = []\n    for instance in instances:\n        image = instance[0] # only one input\n        image = preprocess_eager(image) # [3,224,224]\n        input_instances.append([image])\n    output_instances = resnet_model.call(input_instances)  # for multiply instances\n    for instance in output_instances:\n        output = instance[0]  # only one output for each instance\n        output = postprocess_top1(output)\n        yield output\n\n\n@register.register_method(output_names=[\"label\"])\ndef classify_top1_batch(image):\n    \"\"\"Define method `classify_top1` for servable `resnet50`.\n     The input is `image` and the output is `lable`.\"\"\"\n    x = register.add_stage(preprocess_batch, image, outputs_count=1, batch_size=1024)\n    x = register.add_stage(resnet_model, x, outputs_count=1)\n    x = register.add_stage(postprocess_top1, x, outputs_count=1)\n    return x\n\n\n@register.register_method(output_names=[\"label\"])\ndef classify_top1(image):  # pipeline: preprocess_eager/postprocess_top1, model\n    \"\"\"Define method `classify_top1` for servable `resnet50`.\n     The input is `image` and the output is `label`. \"\"\"\n    x = register.add_stage(preprocess_eager, image, outputs_count=1)\n    x = register.add_stage(resnet_model, x, outputs_count=1)\n    x = register.add_stage(postprocess_top1, x, outputs_count=1)\n    return x\n\n\n@register.register_method(output_names=[\"label\"])\ndef classify_top1_v2(image):  # without pipeline, call model with only one instance a time\n    \"\"\"Define method `classify_top1_v2` for servable `resnet50`.\n     The input is `image` and the output is `label`. \"\"\"\n    label = register.add_stage(call_resnet_model, image, outputs_count=1)\n    return label\n\n\n@register.register_method(output_names=[\"label\"])\ndef classify_top1_v3(image):  # without pipeline, call model with maximum 32 instances a time\n    \"\"\"Define method `classify_top1_v2` for servable `resnet50`.\n     The input is `image` and the output is `label`. \"\"\"\n    label = register.add_stage(call_resnet_model_batch, image, outputs_count=1, batch_size=32)\n    return label\n\n\n@register.register_method(output_names=[\"label\", \"score\"])\ndef classify_top5(image):\n    \"\"\"Define method `classify_top5` for servable `resnet50`.\n     The input is `image` and the output is `label` and `score`. \"\"\"\n    x = register.add_stage(preprocess_eager, image, outputs_count=1)\n    x = register.add_stage(resnet_model, x, outputs_count=1)\n    label, score = register.add_stage(postprocess_top5, x, outputs_count=2)\n    return label, score\n"
  },
  {
    "path": "example/resnet/serving_client.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Client for resnet50\"\"\"\nimport os\nfrom mindspore_serving.client import Client\n\n\ndef read_images():\n    \"\"\"Read images for directory test_image\"\"\"\n    image_files = []\n    images_buffer = []\n    for path, _, file_list in os.walk(\"./test_image/\"):\n        for file_name in file_list:\n            image_file = os.path.join(path, file_name)\n            image_files.append(image_file)\n    for image_file in image_files:\n        with open(image_file, \"rb\") as fp:\n            images_buffer.append(fp.read())\n    return image_files, images_buffer\n\n\ndef run_classify_top1(method_name):\n    \"\"\"Client for servable resnet50 and method classify_top1[v1,v2,v3]\"\"\"\n    print(f\"\\n--------------run_{method_name}----------\")\n    client = Client(\"localhost:5500\", \"resnet50\", method_name)\n    instances = []\n    image_files, images_buffer = read_images()\n    for image in images_buffer:\n        instances.append({\"image\": image})\n    result = client.infer(instances)\n    print(result)\n    for file, label in zip(image_files, result):\n        print(f\"{file}, label: {label['label']}\")\n\n\ndef run_classify_top5():\n    \"\"\"Client for servable resnet50 and method classify_top5\"\"\"\n    print(\"\\n--------------run_classify_top5-----------\")\n    client = Client(\"localhost:5500\", \"resnet50\", \"classify_top5\")\n    instances = []\n    image_files, images_buffer = read_images()\n    for image in images_buffer:\n        instances.append({\"image\": image})  # input `image`\n\n    result = client.infer(instances)\n\n    print(result)\n    for file, result_item in zip(image_files, result):  # result for every image\n        label = result_item[\"label\"]  # result `label`\n        score = result_item[\"score\"]  # result `score`\n        print(\"file:\", file)\n        print(\"label result:\", label)\n        print(\"score result:\", score)\n\n\ndef run_classify_top5_async():\n    \"\"\"Client for servable resnet50 and method classify_top5\"\"\"\n    print(\"\\n--------------run_classify_top5_async-----------\")\n    client = Client(\"localhost:5500\", \"resnet50\", \"classify_top5\")\n    instances = []\n    image_files, images_buffer = read_images()\n    for image in images_buffer:\n        instances.append({\"image\": image})  # input `image`\n\n    result_future = client.infer_async(instances)\n    result = result_future.result()\n\n    print(result)\n    for file, result_item in zip(image_files, result):  # result for every image\n        label = result_item[\"label\"]  # result `label`\n        score = result_item[\"score\"]  # result `score`\n        print(\"file:\", file)\n        print(\"label result:\", label)\n        print(\"score result:\", score)\n\n\ndef run_restful_classify_top1():\n    \"\"\"RESTful Client for servable resnet50 and method classify_top1\"\"\"\n    print(\"\\n--------------run_restful_classify_top1-----------\")\n    import base64\n    import requests\n    import json\n    instances = []\n    image_files, images_buffer = read_images()\n    for image in images_buffer:\n        base64_data = base64.b64encode(image).decode()\n        instances.append({\"image\": {\"b64\": base64_data}})\n    instances_map = {\"instances\": instances}\n    post_payload = json.dumps(instances_map)\n    ip = \"localhost\"\n    restful_port = 1500\n    servable_name = \"resnet50\"\n    method_name = \"classify_top1\"\n    result = requests.post(f\"http://{ip}:{restful_port}/model/{servable_name}:{method_name}\", data=post_payload)\n    print(result.text)\n    result = json.loads(result.text)\n    for file, label in zip(image_files, result['instances']):\n        print(f\"{file}, label: {label['label']}\")\n\n\nif __name__ == '__main__':\n    run_classify_top1(\"classify_top1_batch\")\n    run_classify_top1(\"classify_top1\")  # preprocess eager, pipeline\n    run_classify_top1(\"classify_top1_v2\")  # preprocess eager, without pipeline\n    run_classify_top1(\"classify_top1_v3\")  # preprocess eager, without pipeline\n\n    run_classify_top5()\n    run_restful_classify_top1()\n    run_classify_top5_async()\n"
  },
  {
    "path": "example/resnet/serving_server.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Start Servable resnet50\"\"\"\n\nimport os\nimport sys\nfrom mindspore_serving import server\n\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    # Total 4 worker, one worker occupy device 0, the model inference tasks of other workers are forwarded to the worker\n    # that occupies the device.\n    config = server.ServableStartConfig(servable_directory=servable_dir, servable_name=\"resnet50\", device_ids=0,\n                                        num_parallel_workers=4)\n    server.start_servables(config)\n\n    server.start_grpc_server(\"127.0.0.1:5500\")\n    server.start_restful_server(\"127.0.0.1:1500\")\n\n\nif __name__ == \"__main__\":\n    start()\n"
  },
  {
    "path": "example/tensor_add/add/servable_config.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"add model servable config\"\"\"\n\nimport numpy as np\nfrom mindspore_serving.server import register\n\n\ndef add_trans_datatype(x1, x2):\n    \"\"\"define preprocess, this example has two inputs and two outputs\"\"\"\n    return x1.astype(np.float32), x2.astype(np.float32)\n\n\n# when with_batch_dim is set to False, only 2x2 add is supported\n# when with_batch_dim is set to True(default), Nx2 add is supported, while N is viewed as batch\n# float32 inputs/outputs\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n\n# register add_common method in add\n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):  # only support float32 inputs\n    \"\"\"method add_common data flow definition, only call model\"\"\"\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n\n\n# register add_cast method in add\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    \"\"\"method add_cast data flow definition, only preprocessing and call model\"\"\"\n    x1, x2 = register.add_stage(add_trans_datatype, x1, x2, outputs_count=2)  # cast input to float32\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n"
  },
  {
    "path": "example/tensor_add/export_model/add_model.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"add model generator\"\"\"\nimport os\nfrom shutil import copyfile\nimport numpy as np\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nimport mindspore.ops as ops\nimport mindspore as ms\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\n\nclass Net(nn.Cell):\n    \"\"\"Define Net of add\"\"\"\n\n    def __init__(self):\n        super(Net, self).__init__()\n        self.add = ops.Add()\n\n    def construct(self, x_, y_):\n        \"\"\"construct add net\"\"\"\n        return self.add(x_, y_)\n\n\ndef export_net():\n    \"\"\"Export add net of 2x2 + 2x2, and copy output model `tensor_add.mindir` to directory ../add/1\"\"\"\n    x = np.ones([2, 2]).astype(np.float32)\n    y = np.ones([2, 2]).astype(np.float32)\n    add = Net()\n    ms.export(add, ms.Tensor(x), ms.Tensor(y), file_name='tensor_add', file_format='MINDIR')\n    dst_dir = '../add/1'\n    try:\n        os.mkdir(dst_dir)\n    except OSError:\n        pass\n\n    dst_file = os.path.join(dst_dir, 'tensor_add.mindir')\n    copyfile('tensor_add.mindir', dst_file)\n    print(\"copy tensor_add.mindir to \" + dst_dir + \" success\")\n\n\nif __name__ == \"__main__\":\n    export_net()\n"
  },
  {
    "path": "example/tensor_add/serving_client.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The client of example add\"\"\"\n\nimport numpy as np\nfrom mindspore_serving.client import Client\n\n\ndef run_add_common():\n    \"\"\"invoke servable add method add_common\"\"\"\n    client = Client(\"127.0.0.1:5500\", \"add\", \"add_common\")\n    instances = []\n\n    # instance 1\n    x1 = np.asarray([[1, 1], [1, 1]]).astype(np.float32)\n    x2 = np.asarray([[1, 1], [1, 1]]).astype(np.float32)\n    instances.append({\"x1\": x1, \"x2\": x2})\n\n    # instance 2\n    x1 = np.asarray([[2, 2], [2, 2]]).astype(np.float32)\n    x2 = np.asarray([[2, 2], [2, 2]]).astype(np.float32)\n    instances.append({\"x1\": x1, \"x2\": x2})\n\n    # instance 3\n    x1 = np.asarray([[3, 3], [3, 3]]).astype(np.float32)\n    x2 = np.asarray([[3, 3], [3, 3]]).astype(np.float32)\n    instances.append({\"x1\": x1, \"x2\": x2})\n\n    result = client.infer(instances)\n    print(result)\n\n\ndef run_add_cast():\n    \"\"\"invoke servable add method add_cast\"\"\"\n    client = Client(\"127.0.0.1:5500\", \"add\", \"add_cast\")\n    instances = []\n    x1 = np.ones((2, 2), np.int32)\n    x2 = np.ones((2, 2), np.int32)\n    instances.append({\"x1\": x1, \"x2\": x2})\n    result = client.infer(instances)\n    print(result)\n\n\ndef post_restful(address, servable_name, method_name, json_instances, version_number=None):\n    \"\"\"construct and post restful request\"\"\"\n    import json\n    import requests\n    instances_map = {\"instances\": json_instances}\n    post_payload = json.dumps(instances_map)\n    print(\"request:\", post_payload[:200])\n    if version_number is not None:\n        request_url = f\"http://{address}/model/{servable_name}/version/{version_number}:{method_name}\"\n        result = requests.post(request_url, data=post_payload)\n    else:\n        request_url = f\"http://{address}/model/{servable_name}:{method_name}\"\n        result = requests.post(request_url, data=post_payload)\n    print(\"result\", result.text[:200])\n    result = json.loads(result.text)\n    return result\n\n\ndef run_add_restful():\n    \"\"\"run restful request: invoke servable add method add_common\"\"\"\n    # Client\n    print(\"begin to run add restful.\")\n    instances = []\n    x1 = np.asarray([[1.1, 2.2], [3.3, 4.4]]).astype(np.float32)\n    x2 = np.asarray([[5.5, 6.6], [7.7, 8.8]]).astype(np.float32)\n    instances.append({\"x1\": x1.tolist(), \"x2\": x2.tolist()})\n\n    result = post_restful(\"localhost:1500\", \"add\", \"add_common\", instances)\n    print(result)\n\n\nif __name__ == '__main__':\n    run_add_common()\n    run_add_cast()\n    run_add_restful()\n"
  },
  {
    "path": "example/tensor_add/serving_client_with_check.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The client of example add with result check\"\"\"\n\nimport json\nimport requests\nimport numpy as np\nfrom mindspore_serving.client import Client\n\n\ndef check_result(result, y_data_list):\n    \"\"\"check grpc output result\"\"\"\n    assert len(result) == len(y_data_list)\n    for result_item, y_data in zip(result, y_data_list):\n        assert (np.abs(result_item[\"y\"] - y_data) < 0.00001).all()\n\n\ndef run_add_common():\n    \"\"\"invoke servable add method add_common\"\"\"\n    client = Client(\"localhost:5500\", \"add\", \"add_common\")\n    instances = []\n    instance_count = 3\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1, 2.2], [3.3, 4.4]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5, 6.6], [7.7, 8.8]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    result = client.infer(instances)\n    print(result)\n    check_result(result, y_data_list)\n\n\ndef run_add_cast():\n    \"\"\"invoke servable add method add_cast\"\"\"\n    client = Client(\"localhost:5500\", \"add\", \"add_cast\")\n    instances = []\n    y_data_list = []\n    x1 = np.ones((2, 2), np.int32)\n    x2 = np.ones((2, 2), np.int32)\n    instances.append({\"x1\": x1, \"x2\": x2})\n    y_data_list.append((x1 + x2).astype(np.float32))\n    result = client.infer(instances)\n    print(result)\n    check_result(result, y_data_list)\n\n\ndef post_restful(address, servable_name, method_name, json_instances, version_number=None):\n    \"\"\"construct post restful request\"\"\"\n    instances_map = {\"instances\": json_instances}\n    post_payload = json.dumps(instances_map)\n    print(\"request:\", post_payload[:200])\n    if version_number is not None:\n        request_url = f\"http://{address}/model/{servable_name}/version/{version_number}:{method_name}\"\n        result = requests.post(request_url, data=post_payload)\n    else:\n        request_url = f\"http://{address}/model/{servable_name}:{method_name}\"\n        result = requests.post(request_url, data=post_payload)\n    print(\"result\", result.text[:200])\n    result = json.loads(result.text)\n    return result\n\n\ndef check_number_result(result, y_data_list, output_name=\"y\"):\n    \"\"\"check restful output result\"\"\"\n    result = result[\"instances\"]\n    assert len(result) == len(y_data_list)\n    for result_item, expected_item in zip(result, y_data_list):\n        result_item = np.array(result_item[output_name])\n        print(\"result\", result_item)\n        print(\"expect:\", expected_item)\n        assert result_item.shape == expected_item.shape\n        assert (np.abs(result_item - expected_item) < 0.001).all()\n\n\ndef run_add_restful():\n    \"\"\"run restful request: invoke servable add method add_common\"\"\"\n    # Client\n    print(\"begin to run add restful.\")\n    y_data_list = []\n    instances = []\n    x1 = np.asarray([[1.1, 2.2], [3.3, 4.4]]).astype(np.float32)\n    x2 = np.asarray([[5.5, 6.6], [7.7, 8.8]]).astype(np.float32)\n    y_data_list.append((x1 + x2).astype(np.float32))\n    instances.append({\"x1\": x1.tolist(), \"x2\": x2.tolist()})\n\n    result = post_restful(\"localhost:1500\", \"add\", \"add_common\", instances)\n    check_number_result(result, y_data_list)\n\n\nif __name__ == '__main__':\n    run_add_common()\n    run_add_cast()\n    run_add_restful()\n"
  },
  {
    "path": "example/tensor_add/serving_server.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The server of example add\"\"\"\n\nimport os\nimport sys\nfrom mindspore_serving import server\n\n\ndef start():\n    servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n\n    servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name=\"add\",\n                                                 device_ids=(0, 1))\n    server.start_servables(servable_configs=servable_config)\n\n    server.start_grpc_server(address=\"127.0.0.1:5500\")\n    server.start_restful_server(address=\"127.0.0.1:1500\")\n\n\nif __name__ == \"__main__\":\n    start()\n"
  },
  {
    "path": "mindspore_serving/CMakeLists.txt",
    "content": "# This branch assumes that gRPC and all its dependencies are already installed\n# on this system, so they can be located by find_package().\n\n# Find Protobuf installation\n# Looks for protobuf-config.cmake file installed by Protobuf's cmake installation.\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wl,-rpath,$ORIGIN:$ORIGIN/lib -Wl,--no-as-needed\")\n\nif(ENABLE_COVERAGE)\n    add_compile_options(-coverage)\n    add_link_options(-lgcov --coverage)\nendif()\n\n# Proto file\n# Generated sources\nfile(GLOB_RECURSE PROTO_FILE_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ./proto/*.proto)\nms_grpc_generate(PROTO_SRC_LIST PROTO_HDR_LIST ${PROTO_FILE_LIST})\nadd_library(PROTO_SRC_LIB STATIC ${PROTO_SRC_LIST})\ntarget_compile_options(PROTO_SRC_LIB PRIVATE \"-Wno-array-bounds\")\n\ninclude_directories(\"${CMAKE_BINARY_DIR}/mindspore_serving\" ${CMAKE_BINARY_DIR}) # for proto header file\ninclude_directories(\"ccsrc\")\n\n# serving_common for c++ server and python interface\nfile(GLOB_RECURSE SERVING_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}\n        \"ccsrc/master/*.cc\" \"ccsrc/common/*.cc\" \"ccsrc/worker/*.cc\")\n\nfile(GLOB_RECURSE SERVING_ASCEND_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}\n        \"ccsrc/worker/inference/mindspore_model_wrap.cc\")\n\nlist(REMOVE_ITEM SERVING_SRC ${SERVING_ASCEND_SRC})\nadd_library(serving_common SHARED ${SERVING_SRC})\nadd_library(serving_ascend SHARED ${SERVING_ASCEND_SRC})\ntarget_link_libraries(serving_ascend PRIVATE serving_common)\ntarget_link_libraries(serving_ascend PRIVATE ${SECUREC_LIBRARY})\n\ninclude(CheckPIESupported)\ncheck_pie_supported()\nset_property(TARGET serving_common PROPERTY POSITION_INDEPENDENT_CODE TRUE)\nset_property(TARGET serving_ascend PROPERTY POSITION_INDEPENDENT_CODE TRUE)\n\ntarget_link_libraries(serving_common PRIVATE PROTO_SRC_LIB)\ntarget_link_libraries(serving_common PRIVATE mindspore_serving::ssl mindspore_serving::crypto)\ntarget_link_libraries(serving_common PRIVATE mindspore_serving::grpc++)\ntarget_link_libraries(serving_common PRIVATE mindspore_serving::protobuf pthread rt)\ntarget_link_libraries(serving_common PRIVATE mindspore_serving::event mindspore_serving::event_pthreads)\ntarget_link_libraries(serving_common PRIVATE mindspore_serving::event_core)\ntarget_link_libraries(serving_common PRIVATE mindspore_serving::event_openssl)\ntarget_link_libraries(serving_common PRIVATE mindspore_serving::glog)\ntarget_link_libraries(serving_common PRIVATE mindspore_serving::eigen)\ntarget_link_libraries(serving_common PRIVATE ${SECUREC_LIBRARY})\n\nset_target_properties(serving_common PROPERTIES SKIP_BUILD_RPATH TRUE)\n\n# python\nadd_compile_definitions(ENABLE_PYTHON)\nfile(GLOB_RECURSE PY_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} \"ccsrc/python/*.cc\")\n\nfind_package(Python3 3.7 COMPONENTS Interpreter Development)\nif(Python3_FOUND)\n    set(PYTHON_INCLUDE_DIRS \"${Python3_INCLUDE_DIRS}\")\n    set(PYTHON_LIBRARIES \"${Python3_LIBRARIES}\")\nelse()\n    find_python_package(py_inc py_lib)\n    set(PYTHON_INCLUDE_DIRS \"${py_inc}\")\n    set(PYTHON_LIBRARIES \"${py_lib}\")\nendif()\n\ninclude_directories(${PYTHON_INCLUDE_DIRS})\npybind11_add_module(_mindspore_serving NO_EXTRAS ${PY_SRC_LIST})\nset_target_properties(_mindspore_serving PROPERTIES LINK_FLAGS_RELEASE -s)\ntarget_link_libraries(_mindspore_serving PRIVATE \"${PYTHON_LIBRARIES}\")\ntarget_include_directories(_mindspore_serving PRIVATE ${pybind11_INCLUDE_DIRS})\ntarget_link_libraries(_mindspore_serving PRIVATE serving_common)\nset_property(TARGET _mindspore_serving PROPERTY POSITION_INDEPENDENT_CODE TRUE)\n\ntarget_link_options(serving_common PRIVATE -Wl,-init,mindspore_serving_log_init)\n\n# user set path\nif(ENABLE_TESTCASES)\n    include_directories(${CMAKE_SOURCE_DIR}/tests/ut/stub)\n    target_link_libraries(serving_ascend PRIVATE mindspore)\nelseif(MS_WHL_LIB_PATH)\n    include_directories(${MS_WHL_LIB_PATH}/../)\nelseif(MS_BACKEND_HEADER)\n    include_directories(${CMAKE_SOURCE_DIR}/third_party/mindspore)\n    include_directories(${CMAKE_SOURCE_DIR}/third_party/mindspore/mindspore/core)\nelse()\n    message(FATAL_ERROR \"Please check MindSpore path.\")\nendif()\n"
  },
  {
    "path": "mindspore_serving/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MindSpore Serving.\"\"\"\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/buffer_tensor.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"common/buffer_tensor.h\"\nnamespace mindspore::serving {\nBufferTensor::BufferTensor(DataType type, const std::vector<int64_t> &shape, uint8_t *data, size_t data_len,\n                           bool data_readonly) {\n  type_ = type;\n  shape_ = shape;\n  data_ = data;\n  data_len_ = data_len;\n  data_readonly_ = data_readonly;\n}\n\nBufferTensor::~BufferTensor() { data_ = nullptr; }\n\nstd::vector<int64_t> BufferTensor::shape() const { return shape_; }\n\nvoid BufferTensor::set_shape(const std::vector<int64_t> &shape) { shape_ = shape; }\n\nDataType BufferTensor::data_type() const { return type_; }\n\nvoid BufferTensor::set_data_type(DataType type) { type_ = type; }\n\nconst uint8_t *BufferTensor::data() const { return data_; }\n\nsize_t BufferTensor::data_size() const { return data_len_; }\n\nbool BufferTensor::resize_data(size_t data_len) {\n  if (data_len != data_len_) {\n    MSI_LOG_EXCEPTION << \"Buffer tensor cannot resize data\";\n  }\n  return true;\n}\n\nuint8_t *BufferTensor::mutable_data() {\n  if (data_readonly_) {\n    MSI_LOG_EXCEPTION << \"Buffer tensor is create readonly\";\n  }\n  return data_;\n}\n\nsize_t BufferTensor::bytes_data_size() const {\n  if (!is_bytes_val_data()) {\n    return 0;\n  }\n  return 1;\n}\n\nvoid BufferTensor::get_bytes_data(size_t index, const uint8_t **data, size_t *bytes_len) const {\n  MSI_EXCEPTION_IF_NULL(data);\n  MSI_EXCEPTION_IF_NULL(bytes_len);\n  if (!is_bytes_val_data()) {\n    MSI_LOG_EXCEPTION << \"Buffer tensor data type is not kMSI_Bytes or kMSI_String, cannot get bytes data\";\n  }\n  *data = data_;\n  *bytes_len = data_len_;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/buffer_tensor.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_BUFFER_TENSOR_H\n#define MINDSPORE_SERVING_BUFFER_TENSOR_H\n\n#include <vector>\n#include \"common/serving_common.h\"\n\nnamespace mindspore::serving {\nclass BufferTensor : public TensorBase {\n public:\n  // the data's lifetime must longer than this object\n  BufferTensor(DataType type, const std::vector<int64_t> &shape, uint8_t *data, size_t data_len, bool data_readonly);\n  ~BufferTensor();\n\n  // For all data type\n  std::vector<int64_t> shape() const override;\n  void set_shape(const std::vector<int64_t> &shape) override;\n  DataType data_type() const override;\n  void set_data_type(DataType type) override;\n\n  // All the following interfaces are not for kMSI_String and kMSI_Bytes\n  const uint8_t *data() const override;\n  size_t data_size() const override;\n  bool resize_data(size_t data_len) override;\n  uint8_t *mutable_data() override;\n\n  // For kMSI_String and kMSI_Bytes\n  void clear_bytes_data() override { MSI_LOG_EXCEPTION << \"Buffer tensor cannot clear bytes data\"; }\n  void add_bytes_data(const uint8_t *, size_t) override { MSI_LOG_EXCEPTION << \"Buffer tensor cannot add bytes data\"; }\n\n  size_t bytes_data_size() const override;\n  void get_bytes_data(size_t index, const uint8_t **data, size_t *bytes_len) const override;\n\n private:\n  uint8_t *data_ = nullptr;\n  size_t data_len_ = 0;\n  std::vector<int64_t> shape_;\n  DataType type_;\n  bool data_readonly_ = false;\n};\n\nclass BufferTensorWithOwner : public BufferTensor {\n public:\n  BufferTensorWithOwner(const TensorBasePtr &buffer_tensor_owner, DataType type, const std::vector<int64_t> &shape,\n                        uint8_t *data, size_t data_len, bool data_readonly)\n      : BufferTensor(type, shape, data, data_len, data_readonly), buffer_tensor_owner_(buffer_tensor_owner) {}\n  ~BufferTensorWithOwner() = default;\n\n private:\n  TensorBasePtr buffer_tensor_owner_;\n};\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_BUFFER_TENSOR_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/exit_handle.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"common/exit_handle.h\"\n#include <signal.h>\n#include <utility>\n\nnamespace mindspore {\nnamespace serving {\nExitSignalHandle &ExitSignalHandle::Instance() {\n  static ExitSignalHandle instance = ExitSignalHandle();\n  return instance;\n}\n\nvoid ExitSignalHandle::InitSignalHandle() {\n  if (!has_inited_.test_and_set()) {\n    (void)signal(SIGINT, HandleSignal);\n    (void)signal(SIGTERM, HandleSignal);\n  }\n}\n\n// waiting ctrl+c or stop message to exit,\n// if no server is running or server has exited, there is no need to wait\nvoid ExitSignalHandle::MasterWait() {\n  if (!is_running_) {\n    MSI_LOG_INFO << \"Exit Handle has not started or has exited\";\n    return;\n  }\n  auto exit_future = master_exit_requested_.get_future();\n  exit_future.wait();\n  MSI_LOG_WARNING << \"Receive exit signal \" << exit_signal_;\n}\n\n// waiting ctrl+c or stop message to exit,\n// if no server is running or server has exited, there is no need to wait\nvoid ExitSignalHandle::WorkerWait() {\n  if (!is_running_) {\n    MSI_LOG_INFO << \"Exit Handle has not started or has exited\";\n    return;\n  }\n  auto exit_future = worker_exit_requested_.get_future();\n  exit_future.wait();\n  MSI_LOG_WARNING << \"Receive exit signal \" << exit_signal_;\n}\n\n// waiting ctrl+c or stop message to exit,\n// if no server is running or server has exited, there is no need to wait\nvoid ExitSignalHandle::AgentWait() {\n  if (!is_running_) {\n    MSI_LOG_INFO << \"Exit Handle has not started or has exited\";\n    return;\n  }\n  auto exit_future = agent_exit_requested_.get_future();\n  exit_future.wait();\n  MSI_LOG_WARNING << \"Receive exit signal \" << exit_signal_;\n}\n\nvoid ExitSignalHandle::Start() {\n  if (is_running_) {\n    return;\n  }\n  is_running_ = true;\n  master_exit_requested_ = std::promise<void>();\n  worker_exit_requested_ = std::promise<void>();\n  agent_exit_requested_ = std::promise<void>();\n  has_exited_.clear();\n  InitSignalHandle();\n}\n\nvoid ExitSignalHandle::Stop() { HandleSignal(0); }\n\nbool ExitSignalHandle::HasStopped() const { return !is_running_; }\n\nvoid ExitSignalHandle::HandleSignal(int sig) {\n  auto &instance = Instance();\n  instance.HandleSignalInner(sig);\n}\n\nvoid ExitSignalHandle::HandleSignalInner(int sig) {\n  if (!has_exited_.test_and_set()) {\n    exit_signal_ = sig;\n    master_exit_requested_.set_value();\n    worker_exit_requested_.set_value();\n    agent_exit_requested_.set_value();\n    is_running_ = false;\n  }\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/exit_handle.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_EXIT_HANDLE_H\n#define MINDSPORE_SERVING_EXIT_HANDLE_H\n#include <functional>\n#include <atomic>\n#include <future>\n#include \"common/serving_common.h\"\n\nnamespace mindspore {\nnamespace serving {\n// Handle Ctrl+C signal. When the master or worker is waiting for the Ctrl+C signal,\n// it can continue to perform subsequent operations, such as cleaning.\nclass MS_API ExitSignalHandle {\n public:\n  static ExitSignalHandle &Instance();\n  void InitSignalHandle();\n  void MasterWait();\n  void WorkerWait();\n  void AgentWait();\n  void Start();\n  void Stop();\n  bool HasStopped() const;\n\n private:\n  std::promise<void> master_exit_requested_;\n  std::promise<void> worker_exit_requested_;\n  std::promise<void> agent_exit_requested_;\n  std::atomic_flag has_exited_ = true;\n  std::atomic_flag has_inited_ = ATOMIC_FLAG_INIT;\n  std::atomic_bool is_running_ = false;\n  int exit_signal_ = 0;\n\n  static void HandleSignal(int sig);\n  void HandleSignalInner(int sig);\n};\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_EXIT_HANDLE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/float16.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_SERVING_COMMON_FLOAT16_H_\n#define MINDSPORE_SERVING_COMMON_FLOAT16_H_\n\n#if defined(ENABLE_ARM32) || defined(ENABLE_ARM64)\n// Built for lite and ARM\n#include <arm_neon.h>\n\nusing float16 = float16_t;\ninline float half_to_float(float16 h) { return static_cast<float>(h); }\n#else\n#include <functional>\n#include \"Eigen/Core\"\n\nusing float16 = Eigen::half;\nusing HalfToFloat = std::function<float(float16)>;\nconst inline HalfToFloat half_to_float = Eigen::half_impl::half_to_float;\n#endif\n#endif  // MINDSPORE_SERVING_COMMON_FLOAT16_H_\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/grpc_async_server.h",
    "content": "/**\r\n * Copyright 2020 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_GRPC_ASYNC_SERVER_H\r\n#define MINDSPORE_SERVING_GRPC_ASYNC_SERVER_H\r\n\r\n#include <grpcpp/grpcpp.h>\r\n#include <grpcpp/health_check_service_interface.h>\r\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\r\n#include <memory>\r\n#include <utility>\r\n#include <string>\r\n#include <future>\r\n#include \"common/serving_common.h\"\r\n#include \"common/ssl_config.h\"\r\n#include \"common/utils.h\"\r\n\r\nnamespace mindspore::serving {\r\nclass GrpcAsyncServiceContextBase {\r\n public:\r\n  GrpcAsyncServiceContextBase() = default;\r\n  virtual ~GrpcAsyncServiceContextBase() = default;\r\n\r\n  virtual void NewAndHandleRequest() = 0;\r\n\r\n  bool HasFinish() const { return finished_; }\r\n  void SetFinish() { finished_ = true; }\r\n\r\n private:\r\n  bool finished_ = false;\r\n};\r\n\r\ntemplate <class ServiceImpl, class AsyncService, class Derived>\r\nclass GrpcAsyncServiceContext : public GrpcAsyncServiceContextBase {\r\n public:\r\n  GrpcAsyncServiceContext(ServiceImpl *service_impl, AsyncService *async_service, grpc::ServerCompletionQueue *cq)\r\n      : service_impl_(service_impl), async_service_(async_service), cq_(cq) {}\r\n  ~GrpcAsyncServiceContext() = default;\r\n  GrpcAsyncServiceContext() = delete;\r\n\r\n  virtual void StartEnqueueRequest() = 0;\r\n  virtual void HandleRequest() = 0;\r\n\r\n  static void EnqueueRequest(ServiceImpl *service_impl, AsyncService *async_service, grpc::ServerCompletionQueue *cq) {\r\n    auto call = new Derived(service_impl, async_service, cq);\r\n    call->StartEnqueueRequest();\r\n  }\r\n\r\n  void NewAndHandleRequest() final {\r\n    EnqueueRequest(service_impl_, async_service_, cq_);\r\n    HandleRequest();\r\n  }\r\n\r\n protected:\r\n  grpc::ServerContext ctx_;\r\n\r\n  ServiceImpl *service_impl_;\r\n  AsyncService *async_service_;\r\n  grpc::ServerCompletionQueue *cq_;\r\n};\r\n\r\ntemplate <class AsyncService>\r\nclass GrpcAsyncServer {\r\n public:\r\n  GrpcAsyncServer() {}\r\n  virtual ~GrpcAsyncServer() { Stop(); }\r\n\r\n  virtual void EnqueueRequests() = 0;\r\n\r\n  Status Start(const std::string &socket_address, const SSLConfig &ssl_config, int max_msg_mb_size,\r\n               const std::string &server_tag) {\r\n    if (in_running_) {\r\n      return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: \" << server_tag << \" server is already running\";\r\n    }\r\n\r\n    grpc::ServerBuilder builder;\r\n    if (max_msg_mb_size > 0) {\r\n      constexpr uint32_t mbytes_to_bytes = 1u << 20;\r\n      builder.SetMaxSendMessageSize(static_cast<int>(max_msg_mb_size * mbytes_to_bytes));\r\n      builder.SetMaxReceiveMessageSize(static_cast<int>(max_msg_mb_size * mbytes_to_bytes));\r\n    }\r\n    builder.AddChannelArgument(GRPC_ARG_ALLOW_REUSEPORT, 0);\r\n    int port_tcpip = 0;\r\n    auto creds = BuildServerCredentialsFromSSLConfigFile(ssl_config);\r\n\r\n    Status status;\r\n    status = CheckServerAddress(socket_address, server_tag);\r\n    if (status != SUCCESS) {\r\n      return status;\r\n    }\r\n    builder.AddListeningPort(socket_address, creds, &port_tcpip);\r\n    status = RegisterService(&builder);\r\n    if (status != SUCCESS) return status;\r\n    cq_ = builder.AddCompletionQueue();\r\n    server_ = builder.BuildAndStart();\r\n    if (!server_) {\r\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Serving Error: \" << server_tag\r\n                                            << \" server start failed, create server failed, address \" << socket_address;\r\n    }\r\n    auto grpc_server_run = [this]() { HandleRequests(); };\r\n    grpc_thread_ = std::thread(grpc_server_run);\r\n    in_running_ = true;\r\n    MSI_LOG(INFO) << server_tag << \" server start success, listening on \" << socket_address;\r\n    std::cout << \"Serving: \" << server_tag << \" server start success, listening on \" << socket_address << std::endl;\r\n    return SUCCESS;\r\n  }\r\n\r\n  Status CheckServerAddress(const std::string &address, const std::string &server_tag) {\r\n    Status status;\r\n    std::string prefix = \"unix:\";\r\n    if (address.substr(0, prefix.size()) == prefix) {\r\n      if (address.size() > prefix.size()) {\r\n        return SUCCESS;\r\n      } else {\r\n        status = INFER_STATUS_LOG_ERROR(FAILED) << \"Serving Error: Empty grpc server unix domain socket address\";\r\n        return status;\r\n      }\r\n    }\r\n    status = common::CheckAddress(address, server_tag, nullptr, nullptr);\r\n    if (status != SUCCESS) {\r\n      return status;\r\n    }\r\n    return SUCCESS;\r\n  }\r\n\r\n  std::shared_ptr<grpc::ServerCredentials> BuildServerCredentialsFromSSLConfigFile(const SSLConfig &ssl_config) {\r\n    if (!ssl_config.use_ssl) {\r\n      return grpc::InsecureServerCredentials();\r\n    }\r\n    grpc::SslServerCredentialsOptions ssl_ops(ssl_config.verify_client\r\n                                                ? GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY\r\n                                                : GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE);\r\n\r\n    if (!ssl_config.custom_ca.empty()) {\r\n      ssl_ops.pem_root_certs = ssl_config.custom_ca;\r\n    }\r\n    grpc::SslServerCredentialsOptions::PemKeyCertPair keycert = {ssl_config.private_key, ssl_config.certificate};\r\n    ssl_ops.pem_key_cert_pairs.push_back(keycert);\r\n\r\n    return grpc::SslServerCredentials(ssl_ops);\r\n  }\r\n\r\n  Status HandleRequests() {\r\n    void *tag;\r\n    bool ok = false;\r\n    EnqueueRequests();\r\n    while (cq_->Next(&tag, &ok)) {\r\n      ProcessRequest(tag, ok);\r\n    }\r\n    return SUCCESS;\r\n  }\r\n\r\n  void Stop() {\r\n    if (in_running_) {\r\n      if (server_) {\r\n        server_->Shutdown();\r\n      }\r\n      // Always shutdown the completion queue after the server.\r\n      if (cq_) {\r\n        cq_->Shutdown();\r\n      }\r\n      grpc_thread_.join();\r\n    }\r\n    in_running_ = false;\r\n  }\r\n\r\n  Status RegisterService(grpc::ServerBuilder *builder) {\r\n    builder->RegisterService(&svc_);\r\n    return SUCCESS;\r\n  }\r\n\r\n  void ProcessRequest(void *tag, bool rpc_ok) {\r\n    auto rq = static_cast<GrpcAsyncServiceContextBase *>(tag);\r\n    if (rq->HasFinish() || !rpc_ok) {  // !rpc_ok: cancel get request when shutting down.\r\n      delete rq;\r\n    } else {\r\n      rq->NewAndHandleRequest();\r\n      rq->SetFinish();  // will delete next time\r\n    }\r\n  }\r\n\r\n protected:\r\n  std::unique_ptr<grpc::ServerCompletionQueue> cq_;\r\n  std::unique_ptr<grpc::Server> server_;\r\n\r\n  AsyncService svc_;\r\n\r\n  bool in_running_ = false;\r\n  std::thread grpc_thread_;\r\n};\r\n}  // namespace mindspore::serving\r\n\r\n#endif  // MINDSPORE_SERVING_GRPC_ASYNC_SERVER_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/grpc_client.cc",
    "content": "/**\r\n * Copyright 2021 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"common/grpc_client.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\nstd::unique_ptr<MSPredictClient> client_;\r\nstd::unique_ptr<MSDistributedClient> distributed_client_;\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/grpc_client.h",
    "content": "/**\r\n * Copyright 2021 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_MASTER_GRPC_CLIENT_H\r\n#define MINDSPORE_SERVING_MASTER_GRPC_CLIENT_H\r\n\r\n#include <grpcpp/grpcpp.h>\r\n#include <grpcpp/health_check_service_interface.h>\r\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\r\n#include <memory>\r\n#include <functional>\r\n#include <thread>\r\n#include <string>\r\n#include <utility>\r\n#include \"common/serving_common.h\"\r\n#include \"proto/ms_service.pb.h\"\r\n#include \"proto/ms_service.grpc.pb.h\"\r\n#include \"proto/ms_master.pb.h\"\r\n#include \"proto/ms_master.grpc.pb.h\"\r\n#include \"proto/ms_worker.grpc.pb.h\"\r\n#include \"proto/ms_agent.pb.h\"\r\n#include \"proto/ms_agent.grpc.pb.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\nusing PredictOnFinish = std::function<void()>;\r\n\r\nusing AsyncPredictCallback = std::function<void(Status status)>;\r\n\r\ntemplate <typename Request, typename Reply, typename MSStub>\r\nclass MSServiceClient {\r\n public:\r\n  MSServiceClient() = default;\r\n  ~MSServiceClient() {\r\n    if (in_running_) {\r\n      cq_.Shutdown();\r\n      if (client_thread_.joinable()) {\r\n        try {\r\n          client_thread_.join();\r\n        } catch (const std::system_error &) {\r\n        } catch (...) {\r\n        }\r\n      }\r\n    }\r\n    in_running_ = false;\r\n  }\r\n\r\n  void Start() {\r\n    client_thread_ = std::thread(&MSServiceClient::AsyncCompleteRpc, this);\r\n    in_running_ = true;\r\n  }\r\n\r\n  void AsyncCompleteRpc() {\r\n    void *got_tag;\r\n    bool ok = false;\r\n\r\n    while (cq_.Next(&got_tag, &ok)) {\r\n      AsyncClientCall *call = static_cast<AsyncClientCall *>(got_tag);\r\n      if (call->status.ok()) {\r\n        call->callback(SUCCESS);\r\n      } else {\r\n        MSI_LOG_ERROR << \"RPC failed: \" << call->status.error_code() << \", \" << call->status.error_message()\r\n                      << \", target address: \" << call->target_address;\r\n        call->callback(Status(WORKER_UNAVAILABLE, call->status.error_message()));\r\n      }\r\n      delete call;\r\n    }\r\n  }\r\n\r\n  void PredictAsync(const Request &request, Reply *reply, MSStub *stub, const AsyncPredictCallback &callback,\r\n                    const std::string &target_address) {\r\n    AsyncClientCall *call = new AsyncClientCall;\r\n    call->reply = reply;\r\n    call->callback = callback;\r\n    call->target_address = target_address;\r\n    call->response_reader = stub->PrepareAsyncPredict(&call->context, request, &cq_);\r\n    call->response_reader->StartCall();\r\n    call->response_reader->Finish(call->reply, &call->status, call);\r\n  }\r\n\r\n private:\r\n  struct AsyncClientCall {\r\n    grpc::ClientContext context;\r\n    grpc::Status status;\r\n    Reply *reply;\r\n    std::string target_address;\r\n    AsyncPredictCallback callback;\r\n    std::shared_ptr<grpc::ClientAsyncResponseReader<Reply>> response_reader;\r\n  };\r\n\r\n  grpc::CompletionQueue cq_;\r\n  std::thread client_thread_;\r\n  bool in_running_ = false;\r\n};\r\n\r\nusing MSPredictClient = MSServiceClient<proto::PredictRequest, proto::PredictReply, proto::MSWorker::Stub>;\r\nusing MSDistributedClient =\r\n  MSServiceClient<proto::DistributedPredictRequest, proto::DistributedPredictReply, proto::MSAgent::Stub>;\r\nextern std::unique_ptr<MSPredictClient> client_;\r\nextern std::unique_ptr<MSDistributedClient> distributed_client_;\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n\r\n#endif  // MINDSPORE_SERVING_MASTER_GRPC_CLIENT_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/grpc_server.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"common/grpc_server.h\"\n\nnamespace mindspore::serving {\nStatus GrpcServer::Start(const std::shared_ptr<grpc::Service> &service, const std::string &server_address,\n                         int max_msg_mb_size, const std::string &server_tag) {\n  service_ = service;\n  if (in_running_) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: \" << server_tag << \" server is already running\";\n  }\n  // Set the port is not reuseable\n  auto option = grpc::MakeChannelArgumentOption(GRPC_ARG_ALLOW_REUSEPORT, 0);\n  grpc::ServerBuilder serverBuilder;\n  (void)serverBuilder.SetOption(std::move(option));\n  if (max_msg_mb_size > 0) {\n    constexpr int mbytes_to_bytes = static_cast<int>(1u << 20);\n    (void)serverBuilder.SetMaxSendMessageSize(max_msg_mb_size * mbytes_to_bytes);\n    (void)serverBuilder.SetMaxReceiveMessageSize(max_msg_mb_size * mbytes_to_bytes);\n  }\n  (void)serverBuilder.AddListeningPort(server_address, grpc::InsecureServerCredentials());\n  (void)serverBuilder.RegisterService(service.get());\n  server_ = serverBuilder.BuildAndStart();\n  if (server_ == nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Serving Error: \" << server_tag\n                                          << \" server start failed, create server failed, address \" << server_address;\n  }\n\n  auto grpc_server_run = [this, server_address, server_tag]() {\n    MSI_LOG(INFO) << server_tag << \" server start success, listening on \" << server_address;\n    server_->Wait();\n  };\n\n  grpc_thread_ = std::thread(grpc_server_run);\n  in_running_ = true;\n  return SUCCESS;\n}\n\nvoid GrpcServer::Stop() {\n  if (in_running_) {\n    server_->Shutdown();\n    grpc_thread_.join();\n    server_ = nullptr;\n  }\n  in_running_ = false;\n}\n\nstd::shared_ptr<grpc::Channel> GrpcServer::CreateChannel(const std::string &target_str) {\n  grpc::ChannelArguments channel_args;\n  constexpr int mbytes_to_bytes = static_cast<int>(1u << 20);\n  channel_args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, gRpcMaxMBMsgSize * mbytes_to_bytes);\n  std::shared_ptr<grpc::Channel> channel =\n    grpc::CreateCustomChannel(target_str, grpc::InsecureChannelCredentials(), channel_args);\n  return channel;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/grpc_server.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_GRPC_SERVER_H\n#define MINDSPORE_SERVING_GRPC_SERVER_H\n\n#include <grpcpp/grpcpp.h>\n#include <grpcpp/health_check_service_interface.h>\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\n#include <memory>\n#include <utility>\n#include <string>\n#include <future>\n#include \"common/serving_common.h\"\n\nnamespace mindspore::serving {\nconstexpr int gRpcDefaultMsgMBSize = 100;\nconstexpr int gRpcMaxMBMsgSize = 512;  // max 512 MB\n\nclass GrpcServer {\n public:\n  GrpcServer() = default;\n  ~GrpcServer() noexcept { Stop(); }\n\n  Status Start(const std::shared_ptr<grpc::Service> &service, const std::string &server_address, int max_msg_size,\n               const std::string &server_tag);\n  void Stop();\n  static std::shared_ptr<grpc::Channel> CreateChannel(const std::string &target_str);\n\n private:\n  std::unique_ptr<grpc::Server> server_;\n  std::thread grpc_thread_;\n  bool in_running_ = false;\n  std::shared_ptr<grpc::Service> service_;\n};\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_GRPC_SERVER_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/heart_beat.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"common/heart_beat.h\"\nnamespace mindspore::serving {}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/heart_beat.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_HEART_BEAT_H\n#define MINDSPORE_SERVING_HEART_BEAT_H\n\n#include <grpcpp/grpcpp.h>\n#include <grpcpp/health_check_service_interface.h>\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\n#include <vector>\n#include <unordered_map>\n#include <memory>\n#include <string>\n#include <condition_variable>\n#include <thread>\n#include <functional>\n#include <chrono>\n#include <utility>\n#include \"common/serving_common.h\"\n#include \"common/grpc_server.h\"\n#include \"proto/ms_service.pb.h\"\n#include \"proto/ms_service.grpc.pb.h\"\nnamespace mindspore::serving {\nusing TimerCallback = std::function<void()>;\n\nclass Timer {\n public:\n  Timer() {}\n  ~Timer() {\n    is_stoped_.store(true);\n    cv_.notify_all();\n    if (thread_.joinable()) {\n      try {\n        thread_.join();\n      } catch (const std::system_error &) {\n      } catch (...) {\n      }\n    }\n  }\n\n  void StartTimer(int64_t millisecond, TimerCallback callback) {\n    auto timer_run = [this, millisecond, callback]() {\n      while (!is_stoped_.load()) {\n        std::unique_lock<std::mutex> lk(cv_m_);\n        if (cv_.wait_for(lk, std::chrono::milliseconds(millisecond)) == std::cv_status::timeout) {\n          callback();\n        }\n      }\n    };\n    thread_ = std::thread(timer_run);\n  }\n  void StopTimer() {\n    is_stoped_.store(true);\n    cv_.notify_all();\n  }\n\n private:\n  std::mutex cv_m_;\n  std::thread thread_;\n  std::condition_variable cv_;\n  std::atomic<bool> is_stoped_ = false;\n};\n\ntemplate <class SendStub, class RecvStub>\nclass Watcher {\n public:\n  explicit Watcher(const std::string host_address) { host_address_ = host_address; }\n  ~Watcher() {\n    if (ping_running_) {\n      ping_cq_.Shutdown();\n      if (ping_thread_.joinable()) {\n        try {\n          ping_thread_.join();\n        } catch (const std::system_error &) {\n        } catch (...) {\n        }\n      }\n    }\n    ping_running_ = false;\n    if (pong_running_) {\n      pong_cq_.Shutdown();\n      if (pong_thread_.joinable()) {\n        try {\n          pong_thread_.join();\n        } catch (const std::system_error &) {\n        } catch (...) {\n        }\n      }\n    }\n    pong_running_ = false;\n  }\n  void StartWatch(const std::string &address) {\n    if (ping_running_ == false) {\n      ping_thread_ = std::thread(&Watcher::AsyncPingRpc, this);\n      ping_running_ = true;\n    }\n    auto it = watchee_map_.find(address);\n    if (it != watchee_map_.end()) {\n      MSI_LOG(INFO) << \"watchee exist: \" << address;\n      it->second.timeouts_ = 0;\n      it->second.timer_ = std::make_shared<Timer>();\n      // add timer\n      it->second.timer_->StartTimer(max_time_out_ / max_ping_times_,\n                                    std::bind(&Watcher::RecvPongTimeOut, this, address));\n    } else {\n      WatcheeContext context;\n      auto channel = GrpcServer::CreateChannel(address);\n      context.stub_ = SendStub::NewStub(channel);\n      context.timer_ = std::make_shared<Timer>();\n      // add timer\n      context.timer_->StartTimer(max_time_out_ / max_ping_times_, std::bind(&Watcher::RecvPongTimeOut, this, address));\n      watchee_map_.insert(make_pair(address, context));\n    }\n    MSI_LOG(INFO) << \"Begin to send ping to \" << address;\n    SendPing(address);\n  }\n  void StopWatch(const std::string &address) {\n    // clear map and timer\n    auto it = watchee_map_.find(address);\n    if (it == watchee_map_.end()) {\n      MSI_LOG(INFO) << \"watchee not exist: \" << address;\n      return;\n    }\n    it->second.timer_->StopTimer();\n    watchee_map_.erase(address);\n  }\n\n  void SendPing(const std::string &address) {\n    auto it = watchee_map_.find(address);\n    if (it == watchee_map_.end()) {\n      MSI_LOG(INFO) << \"watchee not exist: \" << address;\n      return;\n    }\n    it->second.timeouts_ += 1;\n    // send async message\n    PingAsync(address);\n  }\n\n  void RecvPing(const std::string &address) {\n    std::unique_lock<std::mutex> lock{m_lock_};\n    if (pong_running_ == false) {\n      pong_thread_ = std::thread(&Watcher::AsyncPongRpc, this);\n      pong_running_ = true;\n    }\n    // recv message\n    auto it = watcher_map_.find(address);\n    if (it != watcher_map_.end()) {\n      it->second.timer_->StopTimer();\n      it->second.timer_ = std::make_shared<Timer>();\n      // add timer\n      it->second.timer_->StartTimer(max_time_out_, std::bind(&Watcher::RecvPingTimeOut, this, address));\n    } else {\n      WatcherContext context;\n      auto channel = GrpcServer::CreateChannel(address);\n      context.stub_ = RecvStub::NewStub(channel);\n      context.timer_ = std::make_shared<Timer>();\n      // add timer\n      context.timer_->StartTimer(max_time_out_, std::bind(&Watcher::RecvPingTimeOut, this, address));\n      watcher_map_.insert(make_pair(address, context));\n      MSI_LOG(INFO) << \"Begin to send pong to \" << address;\n    }\n    // send async message\n    PongAsync(address);\n  }\n\n  void RecvPong(const std::string &address) {\n    std::unique_lock<std::mutex> lock{m_lock_};\n    // recv message\n    auto it = watchee_map_.find(address);\n    if (it != watchee_map_.end()) {\n      it->second.timeouts_ = 0;\n    } else {\n      MSI_LOG(INFO) << \"Recv Pong after timeout or stop\";\n    }\n  }\n\n  void RecvPongTimeOut(const std::string &address) {\n    std::unique_lock<std::mutex> lock{m_lock_};\n    auto it = watchee_map_.find(address);\n    if (it != watchee_map_.end()) {\n      if (it->second.timeouts_ >= max_ping_times_) {\n        // add exit handle\n        MSI_LOG(ERROR) << \"Recv Pong Time Out from \" << address << \", host address is \" << host_address_;\n        it->second.timer_->StopTimer();\n        // need erase map\n        return;\n      }\n      SendPing(address);\n    } else {\n      MSI_LOG(INFO) << \"Recv Pong Time Out after timeout or stop\";\n    }\n  }\n\n  void RecvPingTimeOut(const std::string &address) {\n    std::unique_lock<std::mutex> lock{m_lock_};\n    auto it = watcher_map_.find(address);\n    if (it != watcher_map_.end()) {\n      MSI_LOG(ERROR) << \"Recv Ping Time Out from \" << address << \", host address is \" << host_address_;\n      // add exit handle\n      it->second.timer_->StopTimer();\n      // need erase map\n    } else {\n      MSI_LOG(INFO) << \"Recv Ping Time Out after timeout or stop\";\n    }\n  }\n  void PingAsync(const std::string &address) {\n    auto it = watchee_map_.find(address);\n    if (it != watchee_map_.end()) {\n      proto::PingRequest request;\n      request.set_address(host_address_);\n      AsyncPingCall *call = new AsyncPingCall;\n      call->response_reader = it->second.stub_->PrepareAsyncPing(&call->context, request, &ping_cq_);\n      call->response_reader->StartCall();\n      call->response_reader->Finish(&call->reply, &call->status, call);\n    }\n  }\n\n  void PongAsync(const std::string &address) {\n    auto it = watcher_map_.find(address);\n    if (it != watcher_map_.end()) {\n      proto::PongRequest request;\n      request.set_address(host_address_);\n      AsyncPongCall *call = new AsyncPongCall;\n      call->response_reader = it->second.stub_->PrepareAsyncPong(&call->context, request, &pong_cq_);\n      call->response_reader->StartCall();\n      call->response_reader->Finish(&call->reply, &call->status, call);\n    }\n  }\n  void AsyncPingRpc() {\n    void *got_tag;\n    bool ok = false;\n    while (ping_cq_.Next(&got_tag, &ok)) {\n      AsyncPingCall *call = static_cast<AsyncPingCall *>(got_tag);\n      if (!call->status.ok()) {\n        MSI_LOG_DEBUG << \"RPC failed: \" << call->status.error_code() << \", \" << call->status.error_message();\n      }\n      delete call;\n    }\n  }\n  void AsyncPongRpc() {\n    void *got_tag;\n    bool ok = false;\n    while (pong_cq_.Next(&got_tag, &ok)) {\n      AsyncPongCall *call = static_cast<AsyncPongCall *>(got_tag);\n      if (!call->status.ok()) {\n        MSI_LOG_DEBUG << \"RPC failed: \" << call->status.error_code() << \", \" << call->status.error_message();\n      }\n      delete call;\n    }\n  }\n\n private:\n  struct WatcheeContext {\n    uint64_t timeouts_ = 0;\n    std::shared_ptr<Timer> timer_ = nullptr;\n    std::shared_ptr<typename SendStub::Stub> stub_ = nullptr;\n  };\n  struct WatcherContext {\n    uint64_t timeouts_ = 0;\n    std::shared_ptr<Timer> timer_ = nullptr;\n    std::shared_ptr<typename RecvStub::Stub> stub_ = nullptr;\n  };\n  struct AsyncPingCall {\n    grpc::ClientContext context;\n    grpc::Status status;\n    proto::PingReply reply;\n    std::shared_ptr<grpc::ClientAsyncResponseReader<proto::PingReply>> response_reader;\n  };\n  struct AsyncPongCall {\n    grpc::ClientContext context;\n    grpc::Status status;\n    proto::PongReply reply;\n    std::shared_ptr<grpc::ClientAsyncResponseReader<proto::PongReply>> response_reader;\n  };\n  std::string host_address_;\n  uint64_t max_ping_times_ = 20;\n  uint64_t max_time_out_ = 20000;  // 20s\n  std::unordered_map<std::string, WatcheeContext> watchee_map_;\n  std::unordered_map<std::string, WatcherContext> watcher_map_;\n  std::mutex m_lock_;\n  grpc::CompletionQueue ping_cq_;\n  std::thread ping_thread_;\n  bool ping_running_ = false;\n  grpc::CompletionQueue pong_cq_;\n  std::thread pong_thread_;\n  bool pong_running_ = false;\n};\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_HEART_BEAT_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/instance.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_INSTANCE_H\n#define MINDSPORE_SERVING_INSTANCE_H\n\n#include <map>\n#include <memory>\n#include \"common/serving_common.h\"\n#include \"common/servable.h\"\n#include \"common/instance_data.h\"\n\nnamespace mindspore::serving {\nstruct Instance {\n  InstanceData data;  // for inputs of function, predict, output\n\n  const MethodSignature *method_def = nullptr;\n  uint64_t stage_index = 0;\n  uint64_t stage_max = 0;\n  std::map<size_t, InstanceData> stage_data_list;  // input: 0, stage: 1-n\n\n  uint64_t user_id = 0;\n  Status error_msg = SUCCESS;\n};\n\nusing InstancePtr = std::shared_ptr<Instance>;\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_INSTANCE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/instance_data.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_INSTANCE_DATA_H\n#define MINDSPORE_SERVING_INSTANCE_DATA_H\n\n#include <vector>\n#include \"common/serving_common.h\"\n\nnamespace mindspore::serving {\nusing InstanceData = std::vector<TensorBasePtr>;\n\nstruct ResultInstance {\n  InstanceData data;\n  Status error_msg = SUCCESS;\n};\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_INSTANCE_DATA_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/log.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"common/log.h\"\n#include <sys/time.h>\n#include <securec.h>\n#include <thread>\n#include <iomanip>\n#define google mindspore_serving_private\n#include \"glog/logging.h\"\n#include \"common/utils.h\"\n\nnamespace mindspore {\nnamespace serving {\nint g_ms_serving_log_level = static_cast<int>(LOG_WARNING);\n\nstatic std::string GetTimeString() {\n#if defined(_WIN32) || defined(_WIN64)\n  time_t time_seconds = time(0);\n  struct tm now_time;\n  localtime_s(&now_time, &time_seconds);\n  constexpr int base_year = 1900;\n  std::stringstream ss;\n  ss << now_time.tm_year + base_year << \"-\" << now_time.tm_mon + 1 << \"-\" << now_time.tm_mday << \" \" << now_time.tm_hour\n     << \":\" << now_time.tm_min << \":\" << now_time.tm_sec;\n  return ss.str();\n#else\n  constexpr auto BUFLEN = 80;\n  char buf[BUFLEN] = {'\\0'};\n  struct timeval cur_time;\n  (void)gettimeofday(&cur_time, nullptr);\n\n  struct tm now;\n  constexpr int width = 3;\n  constexpr int64_t time_convert_unit = 1000;\n  (void)localtime_r(&cur_time.tv_sec, &now);\n  (void)strftime(buf, BUFLEN, \"%Y-%m-%d-%H:%M:%S\", &now);  // format date and time\n  std::stringstream ss;\n  ss << \".\" << std::setfill('0') << std::setw(width) << cur_time.tv_usec / time_convert_unit << \".\" << std::setfill('0')\n     << std::setw(width) << cur_time.tv_usec % time_convert_unit;\n  return std::string(buf) + ss.str();\n#endif\n}\n\nstatic std::string GetProcName() {\n#if defined(__APPLE__) || defined(__FreeBSD__)\n  const std::string appname = getprogname();\n#elif defined(_GNU_SOURCE)\n  const std::string appname = program_invocation_name;\n#else\n  const std::string appname = \"?\";\n#endif\n  // some times, the appname is an absolute path, its too long\n  std::string app_name(appname);\n  std::size_t pos = app_name.rfind(\"/\");\n  if (pos == std::string::npos) {\n    return app_name;\n  }\n  if (pos + 1 >= app_name.size()) {\n    return app_name;\n  }\n  return app_name.substr(pos + 1);\n}\n\nstatic std::string GetLogLevel(MsLogLevel level) {\n  switch (level) {\n    case LOG_DEBUG:\n      return \"DEBUG\";\n    case LOG_INFO:\n      return \"INFO\";\n    case LOG_WARNING:\n      return \"WARNING\";\n    case LOG_EXCEPTION:\n      return \"EXCEPTION\";\n    case LOG_ERROR:\n    default:\n      return \"ERROR\";\n  }\n}\n\n// convert MsLogLevel to corresponding glog level\nstatic int GetGlogLevel(MsLogLevel level) {\n  switch (level) {\n    case LOG_DEBUG:\n    case LOG_INFO:\n      return google::GLOG_INFO;\n    case LOG_WARNING:\n      return google::GLOG_WARNING;\n    case LOG_ERROR:\n    case LOG_EXCEPTION:\n    default:\n      return google::GLOG_ERROR;\n  }\n}\n\n// get threshold level\nstatic int GetThresholdLevel(const std::string &threshold) {\n  if (threshold.empty()) {\n    return google::GLOG_WARNING;\n  } else if (threshold == \"DEBUG\" || threshold == \"INFO\") {\n    return google::GLOG_INFO;\n  } else if (threshold == \"WARNING\") {\n    return google::GLOG_WARNING;\n  } else if (threshold == \"ERROR\" || threshold == \"CRITICAL\") {\n    return google::GLOG_ERROR;\n  } else {\n    return google::GLOG_WARNING;\n  }\n}\n\nvoid LogWriter::OutputLog(const std::string &msg_str) const {\n  if (static_cast<int>(log_level_) < g_ms_serving_log_level) {\n    return;\n  }\n  auto submodule_name = \"SERVING\";\n  google::LogMessage(\"\", 0, GetGlogLevel(log_level_)).stream()\n    << \"[\" << GetLogLevel(log_level_) << \"] \" << submodule_name << \"(\" << getpid() << \",\" << std::hex\n    << std::this_thread::get_id() << std::dec << \",\" << GetProcName() << \"):\" << GetTimeString() << \" \"\n    << \"[\" << file_ << \":\" << line_ << \"] \" << func_ << \"] \" << msg_str << std::endl;\n}\n\nstatic int GetGlobalLogLevel() { return FLAGS_v; }\n\nenum class LogConfigToken : size_t {\n  INVALID,      // indicate invalid token\n  LEFT_BRACE,   // '{'\n  RIGHT_BRACE,  // '}'\n  VARIABLE,     // '[A-Za-z][A-Za-z0-9_]*'\n  NUMBER,       // [0-9]+\n  COMMA,        // ','\n  COLON,        // ':'\n  EOS,          // End Of String, '\\0'\n  NUM_LOG_CFG_TOKENS\n};\n\nstatic const char *g_tok_names[static_cast<size_t>(LogConfigToken::NUM_LOG_CFG_TOKENS)] = {\n  \"invalid\",        // indicate invalid token\n  \"{\",              // '{'\n  \"}\",              // '}'\n  \"variable\",       // '[A-Za-z][A-Za-z0-9_]*'\n  \"number\",         // [0-9]+\n  \",\",              // ','\n  \":\",              // ':'\n  \"end-of-string\",  // End Of String, '\\0'\n};\n\nstatic inline bool IsAlpha(char ch) { return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z'); }\n\nstatic inline bool IsDigit(char ch) { return ch >= '0' && ch <= '9'; }\n\nclass LogConfigLexer {\n public:\n  explicit LogConfigLexer(const std::string &text) : buffer_(text) { cur_idx_ = 0; }\n  ~LogConfigLexer() = default;\n\n  // skip white space, and return the first char after white space\n  char SkipWhiteSpace() {\n    while (cur_idx_ < buffer_.size()) {\n      char ch = buffer_[cur_idx_];\n      if (ch == ' ' || ch == '\\t') {\n        ++cur_idx_;\n        continue;\n      }\n      return ch;\n    }\n    return '\\0';\n  }\n\n  LogConfigToken GetNext(std::string *const ptr) {\n    char ch = SkipWhiteSpace();\n    // clang-format off\n    static const std::map<char, LogConfigToken> single_char_map = {\n      {'{', LogConfigToken::LEFT_BRACE},\n      {'}', LogConfigToken::RIGHT_BRACE},\n      {',', LogConfigToken::COMMA},\n      {':', LogConfigToken::COLON},\n      {'\\0', LogConfigToken::EOS},\n    };\n    // clang-format on\n\n    auto iter = single_char_map.find(ch);\n    if (iter != single_char_map.end()) {\n      if (ptr != nullptr) {\n        *ptr = std::string() + ch;\n      }\n      ++cur_idx_;\n      return iter->second;\n    } else if (IsAlpha(ch)) {\n      std::ostringstream oss;\n      do {\n        oss << ch;\n        ch = buffer_[++cur_idx_];\n      } while (cur_idx_ < buffer_.size() && (IsAlpha(ch) || IsDigit(ch) || ch == '_'));\n      if (ptr != nullptr) {\n        *ptr = std::string(oss.str());\n      }\n      return LogConfigToken::VARIABLE;\n    } else if (IsDigit(ch)) {\n      std::ostringstream oss;\n      do {\n        oss << ch;\n        ch = buffer_[++cur_idx_];\n      } while (cur_idx_ < buffer_.size() && IsDigit(ch));\n      if (ptr != nullptr) {\n        *ptr = std::string(oss.str());\n      }\n      return LogConfigToken::NUMBER;\n    }\n    return LogConfigToken::INVALID;\n  }\n\n private:\n  std::string buffer_;\n  size_t cur_idx_;\n};\n\nclass LogConfigParser {\n public:\n  explicit LogConfigParser(const std::string &cfg) : lexer(cfg) {}\n  ~LogConfigParser() = default;\n\n  bool Expect(LogConfigToken expected, LogConfigToken tok) const {\n    if (expected != tok) {\n      MSI_LOG(WARNING) << \"Parse submodule log configuration text error, expect `\"\n                       << g_tok_names[static_cast<size_t>(expected)] << \"`, but got `\"\n                       << g_tok_names[static_cast<size_t>(tok)] << \"`. The whole configuration will be ignored.\";\n      return false;\n    }\n    return true;\n  }\n\n  // The text of config MS_SUBMODULE_LOG_v is in the form {submodule1:log_level1,submodule2:log_level2,...}.\n  // Valid values of log levels are: 0 - debug, 1 - info, 2 - warning, 3 - error\n  // e.g. MS_SUBMODULE_LOG_v={PARSER:0, ANALYZER:2, PIPELINE:1}\n  std::map<std::string, std::string> Parse() {\n    std::map<std::string, std::string> log_levels;\n\n    bool flag_error = false;\n    std::string text;\n    auto tok = lexer.GetNext(&text);\n    // empty string\n    if (tok == LogConfigToken::EOS) {\n      return log_levels;\n    }\n\n    if (!Expect(LogConfigToken::LEFT_BRACE, tok)) {\n      return log_levels;\n    }\n\n    do {\n      std::string key, val;\n      tok = lexer.GetNext(&key);\n      if (!Expect(LogConfigToken::VARIABLE, tok)) {\n        flag_error = true;\n        break;\n      }\n\n      tok = lexer.GetNext(&text);\n      if (!Expect(LogConfigToken::COLON, tok)) {\n        flag_error = true;\n        break;\n      }\n\n      tok = lexer.GetNext(&val);\n      if (!Expect(LogConfigToken::NUMBER, tok)) {\n        flag_error = true;\n        break;\n      }\n\n      log_levels[key] = val;\n      tok = lexer.GetNext(&text);\n    } while (tok == LogConfigToken::COMMA);\n\n    if (!flag_error && !Expect(LogConfigToken::RIGHT_BRACE, tok)) {\n      flag_error = true;\n    }\n\n    if (flag_error) {\n      log_levels.clear();\n    }\n    return log_levels;\n  }\n\n private:\n  LogConfigLexer lexer;\n};\n\nbool ParseLogLevel(const std::string &str_level, MsLogLevel *ptr_level) {\n  if (str_level.size() == 1) {\n    int ch = str_level.c_str()[0];\n    ch = ch - '0';  // subtract ASCII code of '0', which is 48\n    if (ch >= static_cast<int>(LOG_DEBUG) && ch <= static_cast<int>(LOG_ERROR)) {\n      if (ptr_level != nullptr) {\n        *ptr_level = static_cast<MsLogLevel>(ch);\n      }\n      return true;\n    }\n  }\n  return false;\n}\n\nvoid InitSubModulesLogLevel() {\n  // initialize submodule's log level using global\n  auto global_log_level = GetGlobalLogLevel();\n  g_ms_serving_log_level = global_log_level;\n\n  // set submodule's log level\n  auto submodule = common::GetEnv(\"MS_SUBMODULE_LOG_v\");\n  MSI_LOG(DEBUG) << \"MS_SUBMODULE_LOG_v=`\" << submodule << \"`\";\n  LogConfigParser parser(submodule);\n  auto configs = parser.Parse();\n  for (const auto &cfg : configs) {\n    if (cfg.first == \"SERVING\") {\n      MsLogLevel submodule_log_level;\n      if (!ParseLogLevel(cfg.second, &submodule_log_level)) {\n        MSI_LOG(WARNING) << \"Illegal log level value \" << cfg.second << \" for \" << cfg.first << \", ignore it.\";\n        continue;\n      }\n      g_ms_serving_log_level = static_cast<int>(submodule_log_level);\n    }\n  }\n}\n\nvoid common_log_init(void) {\n  // do not use glog predefined log prefix\n  FLAGS_log_prefix = false;\n  // disable log buffer, real-time output\n  FLAGS_logbufsecs = 0;\n  // set default log level to WARNING\n  if (common::GetEnv(\"GLOG_v\").empty()) {\n    FLAGS_v = static_cast<int>(mindspore::serving::LOG_WARNING);\n  }\n\n  // set default log file mode to 0640\n  if (common::GetEnv(\"GLOG_logfile_mode\").empty()) {\n    FLAGS_logfile_mode = 0640;\n  }\n  std::string logtostderr = common::GetEnv(\"GLOG_logtostderr\");\n  // default print log to screen\n  if (logtostderr.empty()) {\n    FLAGS_logtostderr = true;\n  } else if (logtostderr == \"0\" && common::GetEnv(\"GLOG_log_dir\").empty()) {\n    FLAGS_logtostderr = true;\n    MSI_LOG(WARNING) << \"`GLOG_log_dir` is not set, output log to screen.\";\n  }\n\n  // default GLOG_stderrthreshold level to WARNING\n  auto threshold = common::GetEnv(\"GLOG_stderrthreshold\");\n  FLAGS_stderrthreshold = GetThresholdLevel(threshold);\n\n  mindspore::serving::InitSubModulesLogLevel();\n}\n}  // namespace serving\n}  // namespace mindspore\n\nextern \"C\" {\n#if defined(_WIN32) || defined(_WIN64)\n__attribute__((constructor)) void mindspore_serving_log_init(void) {\n#else\nvoid mindspore_serving_log_init(void) {\n#endif\n  static bool is_glog_inited = false;\n  if (!is_glog_inited) {\n#if !defined(_WIN32) && !defined(_WIN64)\n    google::InitGoogleLogging(\"mindspore_serving\");\n#endif\n    is_glog_inited = true;\n  }\n  mindspore::serving::common_log_init();\n}\n}\n#undef google\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/log.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_LOG_H\n#define MINDSPORE_SERVING_LOG_H\n\n#include <iostream>\n#include <vector>\n#include <unordered_map>\n#include <map>\n#include <sstream>\n#include <memory>\n#include <string>\n#include <utility>\n\nnamespace mindspore::serving {\n#define MS_API __attribute__((visibility(\"default\")))\n\n#define SERVING_LOG_HDR_FILE_REL_PATH \"mindspore_serving/ccsrc/common/log.h\"\n\n// Get start index of file relative path in __FILE__\nstatic constexpr size_t GetRelPathPos() noexcept {\n  return sizeof(__FILE__) > sizeof(SERVING_LOG_HDR_FILE_REL_PATH)\n           ? sizeof(__FILE__) - sizeof(SERVING_LOG_HDR_FILE_REL_PATH)\n           : 0;\n}\n\n#define SERVING_FILE_NAME                                                        \\\n  (sizeof(__FILE__) > mindspore::serving::GetRelPathPos()                        \\\n     ? static_cast<const char *>(__FILE__) + mindspore::serving::GetRelPathPos() \\\n     : static_cast<const char *>(__FILE__))\n\nclass AsStringHelper {\n public:\n  template <typename T>\n  static std::string AsString(const T &val) noexcept {\n    std::stringstream ss;\n    ss << val;\n    return ss.str();\n  }\n  static std::string AsString(const bool &val) noexcept { return val ? \"true\" : \"false\"; }\n\n  template <typename T>\n  static std::string AsString(const std::vector<T> &val) noexcept {\n    std::stringstream ss;\n    ss << \"[\";\n    for (size_t i = 0; i < val.size(); i++) {\n      ss << AsString(val[i]);\n      if (i + 1 < val.size()) {\n        ss << \", \";\n      }\n    }\n    ss << \"]\";\n    return ss.str();\n  }\n\n  template <typename K, typename V>\n  static std::string AsString(const std::unordered_map<K, V> &val) noexcept {\n    return AsStringMap(val);\n  }\n  template <typename K, typename V>\n  static std::string AsString(const std::map<K, V> &val) noexcept {\n    return AsStringMap(val);\n  }\n  template <typename K, typename V>\n  static std::string AsString(const std::vector<std::pair<K, V>> &val) noexcept {\n    return AsStringMap(val);\n  }\n\n private:\n  template <typename T>\n  static std::string AsStringMap(const T &val) noexcept {\n    std::stringstream ss;\n    ss << \"{\";\n    size_t index = 0;\n    for (auto &item : val) {\n      ss << AsString(item.first) << \": \" << AsString(item.second);\n      if (index + 1 < val.size()) {\n        ss << \", \";\n      }\n      index += 1;\n    }\n    ss << \"}\";\n    return ss.str();\n  }\n};\n\nclass LogStream {\n public:\n  LogStream() { sstream_ = std::make_shared<std::stringstream>(); }\n  ~LogStream() = default;\n\n  template <typename T>\n  LogStream &operator<<(const T &val) noexcept {\n    (*sstream_) << val;\n    return *this;\n  }\n\n  LogStream &operator<<(const bool &val) noexcept {\n    (*sstream_) << (val ? \"true\" : \"false\");\n    return *this;\n  }\n\n  template <typename T>\n  LogStream &operator<<(const std::vector<T> &val) noexcept {\n    (*sstream_) << \"[\";\n    for (size_t i = 0; i < val.size(); i++) {\n      (*this) << val[i];\n      if (i + 1 < val.size()) {\n        (*sstream_) << \", \";\n      }\n    }\n    (*sstream_) << \"]\";\n    return *this;\n  }\n\n  template <typename K, typename V>\n  LogStream &operator<<(const std::unordered_map<K, V> &val) noexcept {\n    return OutputMap(val);\n  }\n\n  template <typename K, typename V>\n  LogStream &operator<<(const std::map<K, V> &val) noexcept {\n    return OutputMap(val);\n  }\n\n  LogStream &operator<<(std::ostream &func(std::ostream &os)) noexcept {\n    (*sstream_) << func;\n    return *this;\n  }\n\n  friend class LogWriter;\n  friend class Status;\n\n private:\n  std::shared_ptr<std::stringstream> sstream_;\n  template <typename T>\n  LogStream &OutputMap(const T &val) noexcept {\n    (*sstream_) << \"{\";\n    size_t index = 0;\n    for (auto &item : val) {\n      (*this) << item.first << \": \" << item.second;\n      if (index + 1 < val.size()) {\n        (*sstream_) << \", \";\n      }\n      index += 1;\n    }\n    (*sstream_) << \"}\";\n    return *this;\n  }\n};\n\nenum MsLogLevel {\n  LOG_DEBUG,\n  LOG_INFO,\n  LOG_WARNING,\n  LOG_ERROR,\n  LOG_EXCEPTION,\n};\n\nclass MS_API LogWriter {\n public:\n  LogWriter(const char *file, int line, const char *func, MsLogLevel log_level)\n      : file_(file), line_(line), func_(func), log_level_(log_level) {}\n  ~LogWriter() = default;\n\n  std::string operator<(const LogStream &stream) const noexcept __attribute__((visibility(\"default\"))) {\n    std::ostringstream msg;\n    msg << stream.sstream_->rdbuf();\n    auto msg_str = GetOutputMsg(msg);\n    OutputLog(msg_str);\n    return msg_str;\n  }\n\n  void operator^(const LogStream &stream) const __attribute__((noreturn, visibility(\"default\"))) {\n    std::ostringstream msg;\n    msg << stream.sstream_->rdbuf();\n    auto msg_str = GetOutputMsg(msg);\n    OutputLog(msg_str);\n    throw std::runtime_error(msg_str);\n  }\n\n  std::string GetOutputMsg(const std::ostringstream &msg) const {\n    std::string msg_str = msg.str();\n    constexpr int max_log_size = 384;\n    constexpr int msg_log_start_size = 192;\n    if (msg_str.length() > max_log_size) {\n      msg_str = msg_str.substr(0, msg_log_start_size) + \"...\" + msg_str.substr(msg_str.length() - msg_log_start_size);\n    }\n    return msg_str;\n  }\n\n private:\n  void OutputLog(const std::string &msg_str) const;\n\n  const char *file_;\n  int line_;\n  const char *func_;\n  MsLogLevel log_level_;\n};\n\nextern int g_ms_serving_log_level MS_API;\n\n#define MSILOG_IF(level, condition)                                                       \\\n  !(condition) ? std::string()                                                            \\\n               : mindspore::serving::LogWriter(SERVING_FILE_NAME, __LINE__, __FUNCTION__, \\\n                                               mindspore::serving::LOG_##level) < mindspore::serving::LogStream()\n\n#define MSILOG_NOIF(level)                                                                                    \\\n  mindspore::serving::LogWriter(SERVING_FILE_NAME, __LINE__, __FUNCTION__, mindspore::serving::LOG_##level) < \\\n    mindspore::serving::LogStream()\n\ninline bool IS_OUTPUT_ON(enum MsLogLevel level) { return static_cast<int>(level) >= g_ms_serving_log_level; }\n\n#define MSILOG_THROW                                                                                            \\\n  mindspore::serving::LogWriter(SERVING_FILE_NAME, __LINE__, __FUNCTION__, mindspore::serving::LOG_EXCEPTION) ^ \\\n    mindspore::serving::LogStream()\n\n#define MSI_LOG(level) MSI_LOG_##level\n\n#define MSI_LOG_DEBUG MSILOG_IF(DEBUG, mindspore::serving::IS_OUTPUT_ON(mindspore::serving::LOG_DEBUG))\n#define MSI_LOG_INFO MSILOG_IF(INFO, mindspore::serving::IS_OUTPUT_ON(mindspore::serving::LOG_INFO))\n#define MSI_LOG_WARNING MSILOG_IF(WARNING, mindspore::serving::IS_OUTPUT_ON(mindspore::serving::LOG_WARNING))\n#define MSI_LOG_ERROR MSILOG_IF(ERROR, mindspore::serving::IS_OUTPUT_ON(mindspore::serving::LOG_ERROR))\n\n#define MSI_LOG_EXCEPTION MSILOG_THROW\n\n#define MSI_EXCEPTION_IF_NULL(ptr)                                   \\\n  do {                                                               \\\n    if ((ptr) == nullptr) {                                          \\\n      MSI_LOG_EXCEPTION << \": The pointer[\" << #ptr << \"] is null.\"; \\\n    }                                                                \\\n  } while (0)\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_LOG_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/proto_tensor.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"common/proto_tensor.h\"\n#include <unordered_map>\n#include <vector>\n#include <string>\n#include <algorithm>\n#include <map>\n#include \"common/buffer_tensor.h\"\n#include \"common/servable.h\"\n#include \"master/dispacther.h\"\n#include \"common/shared_memory.h\"\n\nusing std::string;\nusing std::unordered_map;\nusing std::vector;\n\nnamespace mindspore::serving {\nconst size_t kMaxShapeElementCount = INT32_MAX;\n\nProtoTensor::ProtoTensor(proto::Tensor *other) : tensor_(other) {}\n\nProtoTensor::~ProtoTensor() {}\n\nDataType ProtoTensor::data_type() const {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  return TransDataType2Inference(tensor_->dtype());\n}\n\nvoid ProtoTensor::set_data_type(DataType data_type) {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  tensor_->set_dtype(TransDataType2Proto(data_type));\n}\n\nstd::vector<int64_t> ProtoTensor::shape() const {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  std::vector<int64_t> result;\n  auto dims = tensor_->shape().dims();\n  std::transform(dims.begin(), dims.end(), std::back_inserter(result), [](const int64_t dim) { return dim; });\n  return result;\n}\n\nvoid ProtoTensor::set_shape(const std::vector<int64_t> &shape) {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  auto tensor_shape = tensor_->mutable_shape();\n  tensor_shape->Clear();\n  size_t element_count = 1;\n  for (auto dim : shape) {\n    if (dim < 0 || (dim > 0 && element_count > kMaxShapeElementCount / dim)) {\n      MSI_LOG_ERROR << \"failed to set shape, invalid dim num \" << dim;\n      tensor_shape->Clear();\n      return;\n    }\n    element_count *= dim;\n    tensor_shape->add_dims(dim);\n  }\n}\n\nbool ProtoTensor::resize_data(size_t data_len) {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  if (tensor_->has_shm_data()) {\n    if (data_len == tensor_->shm_data().data_size()) {\n      return true;\n    }\n    MSI_LOG_EXCEPTION << \"Cannot resize shared memory data size from \" << tensor_->shm_data().data_size() << \" to \"\n                      << data_len;\n  }\n  string *buffer = tensor_->mutable_data();\n  if (buffer == nullptr) {\n    MSI_LOG_ERROR << \"invalid buffer data\";\n    return false;\n  }\n  buffer->resize(data_len);\n  return true;\n}\n\nsize_t ProtoTensor::data_size() const {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  if (tensor_->has_shm_data()) {\n    return tensor_->shm_data().data_size();\n  }\n  return tensor_->data().size();\n}\n\nuint8_t *ProtoTensor::mutable_data() {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  if (data_size() == 0) {\n    return nullptr;\n  }\n  if (tensor_->has_shm_data()) {\n    auto status = AttachSharedMemory();\n    if (status != SUCCESS) {\n      return nullptr;\n    }\n    return shm_attach_.offset_address;\n  }\n  return reinterpret_cast<uint8_t *>(tensor_->mutable_data()->data());\n}\n\nconst uint8_t *ProtoTensor::data() const {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  if (data_size() == 0) {\n    return nullptr;\n  }\n  if (tensor_->has_shm_data()) {\n    auto status = AttachSharedMemory();\n    if (status != SUCCESS) {\n      return nullptr;\n    }\n    return shm_attach_.offset_address;\n  }\n  return reinterpret_cast<const uint8_t *>(tensor_->data().data());\n}\n\nvoid ProtoTensor::clear_bytes_data() {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  return tensor_->mutable_bytes_val()->Clear();\n}\n\nvoid ProtoTensor::add_bytes_data(const uint8_t *data, size_t bytes_len) {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  tensor_->add_bytes_val(data, bytes_len);\n}\n\nsize_t ProtoTensor::bytes_data_size() const {\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  return tensor_->bytes_val().size();\n}\n\nvoid ProtoTensor::get_bytes_data(size_t index, const uint8_t **data, size_t *bytes_len) const {\n  MSI_EXCEPTION_IF_NULL(data);\n  MSI_EXCEPTION_IF_NULL(bytes_len);\n  MSI_EXCEPTION_IF_NULL(tensor_);\n  if (index >= static_cast<size_t>(tensor_->bytes_val().size())) {\n    MSI_LOG_EXCEPTION << \"visit invalid index \" << index << \" total size \" << tensor_->bytes_val().size();\n  }\n  auto &bytes = tensor_->bytes_val(index);\n  *data = reinterpret_cast<const uint8_t *>(bytes.data());\n  *bytes_len = bytes.size();\n}\n\nproto::DataType ProtoTensor::TransDataType2Proto(DataType data_type) {\n  const std::unordered_map<DataType, proto::DataType> id2type_map{\n    {serving::kMSI_Unknown, proto::MS_UNKNOWN}, {serving::kMSI_Bool, proto::MS_BOOL},\n    {serving::kMSI_Float64, proto::MS_FLOAT64}, {serving::kMSI_Int8, proto::MS_INT8},\n    {serving::kMSI_Uint8, proto::MS_UINT8},     {serving::kMSI_Int16, proto::MS_INT16},\n    {serving::kMSI_Uint16, proto::MS_UINT16},   {serving::kMSI_Int32, proto::MS_INT32},\n    {serving::kMSI_Uint32, proto::MS_UINT32},   {serving::kMSI_Int64, proto::MS_INT64},\n    {serving::kMSI_Uint64, proto::MS_UINT64},   {serving::kMSI_Float16, proto::MS_FLOAT16},\n    {serving::kMSI_Float32, proto::MS_FLOAT32}, {serving::kMSI_String, proto::MS_STRING},\n    {serving::kMSI_Bytes, proto::MS_BYTES},\n  };\n  auto it = id2type_map.find(data_type);\n  if (it == id2type_map.end()) {\n    MSI_LOG_WARNING << \"failed to set data type, undefined data type \" << data_type;\n    return proto::MS_UNKNOWN;\n  } else {\n    return it->second;\n  }\n}\n\nDataType ProtoTensor::TransDataType2Inference(proto::DataType data_type) {\n  const std::unordered_map<proto::DataType, DataType> type2id_map{\n    {proto::MS_UNKNOWN, kMSI_Unknown}, {proto::MS_BOOL, kMSI_Bool},       {proto::MS_INT8, kMSI_Int8},\n    {proto::MS_UINT8, kMSI_Uint8},     {proto::MS_INT16, kMSI_Int16},     {proto::MS_UINT16, kMSI_Uint16},\n    {proto::MS_INT32, kMSI_Int32},     {proto::MS_UINT32, kMSI_Uint32},   {proto::MS_INT64, kMSI_Int64},\n    {proto::MS_UINT64, kMSI_Uint64},   {proto::MS_FLOAT16, kMSI_Float16}, {proto::MS_FLOAT32, kMSI_Float32},\n    {proto::MS_FLOAT64, kMSI_Float64}, {proto::MS_STRING, kMSI_String},   {proto::MS_BYTES, kMSI_Bytes},\n  };\n  auto it = type2id_map.find(data_type);\n  if (it == type2id_map.end()) {\n    MSI_LOG_WARNING << \"failed to get data type, undefined data type \" << data_type;\n    return kMSI_Unknown;\n  } else {\n    return it->second;\n  }\n}\n\nvoid ProtoTensor::SetSharedMemory(const proto::ShmTensorData &shm_data) { *tensor_->mutable_shm_data() = shm_data; }\n\nStatus ProtoTensor::AttachSharedMemory() const {\n  if (has_attached_shm_) {\n    return SUCCESS;\n  }\n  if (tensor_ == nullptr) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"The proto tensor object cannot be nullptr\";\n  }\n  if (!tensor_->has_shm_data()) {\n    return SUCCESS;\n  }\n  const proto::ShmTensorData &shm_data = tensor_->shm_data();\n  auto status = SharedMemoryManager::Instance().Attach(shm_data.memory_key(), shm_data.bytes_size(),\n                                                       shm_data.data_offset(), shm_data.data_size(), &shm_attach_);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Attach shared memory failed, memory key: \" << shm_data.memory_key()\n                  << \", bytes size: \" << shm_data.bytes_size() << \", data offset: \" << shm_data.data_offset()\n                  << \", data size: \" << shm_data.data_size();\n    return status;\n  }\n  has_attached_shm_ = true;\n  return SUCCESS;\n}\n\nvoid GrpcTensorHelper::GetRequestSpec(const proto::PredictRequest &request, RequestSpec *request_spec) {\n  MSI_EXCEPTION_IF_NULL(request_spec);\n  request_spec->servable_name = request.servable_spec().name();\n  request_spec->method_name = request.servable_spec().method_name();\n  request_spec->version_number = request.servable_spec().version_number();\n}\n\nvoid GrpcTensorHelper::ConvertProtoWorkerSpec(const proto::RegisterRequest &proto_request, WorkerRegSpec *worker_spec) {\n  MSI_EXCEPTION_IF_NULL(worker_spec);\n  auto &proto_worker_spec = proto_request.worker_spec();\n  worker_spec->worker_address = proto_worker_spec.address();\n  worker_spec->worker_pid = proto_worker_spec.worker_pid();\n\n  auto &proto_spec = proto_worker_spec.servable_spec();\n  auto &servable_spec = worker_spec->servable_spec;\n  servable_spec.servable_name = proto_spec.name();\n  servable_spec.version_number = proto_spec.version_number();\n  servable_spec.batch_size = proto_spec.batch_size();\n  servable_spec.own_device = proto_spec.own_device();\n  for (const auto &proto_method : proto_spec.methods()) {\n    ServableMethodInfo method_info;\n    method_info.name = proto_method.name();\n    method_info.only_model_stage = proto_method.only_model_stage();\n    for (auto &name : proto_method.input_names()) {\n      method_info.input_names.push_back(name);\n    }\n    servable_spec.methods.push_back(method_info);\n  }\n  ConvertProtoModelInfos(proto_spec.model_infos(), &servable_spec.models);\n}\n\nvoid GrpcTensorHelper::ConvertWorkerSpec(const WorkerRegSpec &worker_spec, proto::RegisterRequest *proto_request) {\n  auto proto_worker_spec = proto_request->mutable_worker_spec();\n  proto_worker_spec->set_address(worker_spec.worker_address);\n  proto_worker_spec->set_worker_pid(worker_spec.worker_pid);\n\n  auto proto_spec = proto_worker_spec->mutable_servable_spec();\n  const auto &spec = worker_spec.servable_spec;\n  proto_spec->set_name(spec.servable_name);\n  proto_spec->set_version_number(spec.version_number);\n  proto_spec->set_batch_size(spec.batch_size);\n  proto_spec->set_own_device(spec.own_device);\n  for (auto &method : spec.methods) {\n    auto proto_method = proto_spec->add_methods();\n    proto_method->set_name(method.name);\n    proto_method->set_only_model_stage(method.only_model_stage);\n    for (auto &name : method.input_names) {\n      proto_method->add_input_names(name);\n    }\n  }\n  ConvertModelInfos(spec.models, proto_spec->mutable_model_infos());\n}\n\nvoid GrpcTensorHelper::ConvertProtoModelInfos(const proto::ModelInfos &proto_model_infos,\n                                              std::map<std::string, ModelInfo> *model_infos) {\n  MSI_EXCEPTION_IF_NULL(model_infos);\n  model_infos->clear();\n  auto convert_tensor_info = [](const proto::TensorInfo &proto_tensor_info) -> TensorInfo {\n    TensorInfo tensor_info;\n    tensor_info.is_no_batch_dim = proto_tensor_info.is_no_batch_dim();\n    tensor_info.size = proto_tensor_info.size();\n    tensor_info.data_type = ProtoTensor::TransDataType2Inference(proto_tensor_info.dtype());\n    auto &proto_shape = proto_tensor_info.shape().dims();\n    std::copy(proto_shape.begin(), proto_shape.end(), std::back_inserter(tensor_info.shape));\n    return tensor_info;\n  };\n  for (const auto &proto_model_it : proto_model_infos.model_infos()) {\n    auto &model_key = proto_model_it.first;\n    auto &proto_model = proto_model_it.second;\n    ModelInfo &model_info = (*model_infos)[model_key];\n    model_info.batch_size = proto_model.batch_size();\n    for (auto &proto_subgraph : proto_model.subgraph_infos()) {\n      ModelSubgraphInfo subgraph_info;\n      for (auto &input_tensor : proto_subgraph.inputs()) {\n        subgraph_info.input_infos.push_back(convert_tensor_info(input_tensor));\n      }\n      for (auto &output_tensor : proto_subgraph.outputs()) {\n        subgraph_info.output_infos.push_back(convert_tensor_info(output_tensor));\n      }\n      model_info.sub_graph_infos.push_back(subgraph_info);\n    }\n  }\n}\n\nvoid GrpcTensorHelper::ConvertModelInfos(const std::map<std::string, ModelInfo> &model_infos,\n                                         proto::ModelInfos *proto_model_infos) {\n  MSI_EXCEPTION_IF_NULL(proto_model_infos);\n  proto_model_infos->Clear();\n  auto convert_tensor_info = [](const TensorInfo &tensor_info, proto::TensorInfo *proto_tensor_info) {\n    proto_tensor_info->set_is_no_batch_dim(tensor_info.is_no_batch_dim);\n    proto_tensor_info->set_size(tensor_info.size);\n    proto_tensor_info->set_dtype(ProtoTensor::TransDataType2Proto(tensor_info.data_type));\n    auto proto_shape = proto_tensor_info->mutable_shape()->mutable_dims();\n    for (auto &dim : tensor_info.shape) {\n      proto_shape->Add(dim);\n    }\n  };\n  auto &proto_models_items = *(proto_model_infos->mutable_model_infos());\n  for (const auto &model_it : model_infos) {\n    auto &model_key = model_it.first;\n    auto &model_info = model_it.second;\n    auto &proto_model = proto_models_items[model_key];\n    proto_model.set_batch_size(model_info.batch_size);\n    for (auto &subgraph_info : model_info.sub_graph_infos) {\n      auto proto_subgraph = proto_model.add_subgraph_infos();\n      for (auto &input_tensor : subgraph_info.input_infos) {\n        convert_tensor_info(input_tensor, proto_subgraph->add_inputs());\n      }\n      for (auto &output_tensor : subgraph_info.output_infos) {\n        convert_tensor_info(output_tensor, proto_subgraph->add_outputs());\n      }\n    }\n  }\n}\n\nStatus GrpcTensorHelper::CreateInstanceFromRequest(const MethodSignature &method, const proto::PredictRequest &request,\n                                                   vector<InstanceData> *results) {\n  MSI_EXCEPTION_IF_NULL(results);\n  results->clear();\n\n  Status status;\n  if (request.instances_size() == 0) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n           << \"Instances count of request cannot be 0, servable: \" << method.servable_name\n           << \", method: \" << method.method_name;\n  }\n  status = CreateInstanceFromRequestInstances(request, method, results);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Create instances from request instances failed\";\n    return status;\n  }\n  return SUCCESS;\n}\n\nvoid GrpcTensorHelper::CreateReplyFromInstances(const proto::PredictRequest &request, const MethodSignature &method,\n                                                const vector<InstancePtr> &instances, proto::PredictReply *reply) {\n  auto status = CreateReplyFromInstancesInner(request, method, instances, reply);\n  if (status != SUCCESS) {\n    CreateReplyFromErrorMsg(status, reply);\n  }\n}\n\nStatus GrpcTensorHelper::CreateInstanceFromPredictReply(const RequestSpec &request_spec,\n                                                        const proto::PredictReply &reply,\n                                                        std::vector<proto::ErrorMsg> *error,\n                                                        std::vector<const proto::Instance *> *results) {\n  MSI_EXCEPTION_IF_NULL(error);\n  MSI_EXCEPTION_IF_NULL(results);\n  results->clear();\n  error->clear();\n  if (reply.instances_size() == 0 && reply.error_msg_size() == 0) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n           << \"The instance or error count of reply cannot be 0, servable: \" << request_spec.servable_name\n           << \", method: \" << request_spec.method_name;\n  }\n  std::copy(reply.error_msg().begin(), reply.error_msg().end(), std::back_inserter(*error));\n  for (auto &item : reply.instances()) {\n    // cppcheck-suppress useStlAlgorithm\n    results->push_back(&item);\n  }\n  return SUCCESS;\n}\n\nStatus GrpcTensorHelper::CreatePredictReplyFromInstances(const proto::PredictRequest &request,\n                                                         const std::vector<proto::ErrorMsg> &errors,\n                                                         const std::vector<const proto::Instance *> &instances,\n                                                         proto::PredictReply *reply) {\n  MSI_EXCEPTION_IF_NULL(reply);\n  for (auto &instance : instances) {\n    auto proto_instance = reply->add_instances();\n    if (instance) {\n      *proto_instance->mutable_items() = instance->items();\n    }\n  }\n  bool all_ok = true;\n  bool all_same = true;\n  for (auto &error : errors) {\n    if (error.error_code() != 0) {\n      all_ok = false;\n    }\n    if (error.error_code() != errors[0].error_code() || error.error_msg() != errors[0].error_msg()) {\n      all_same = false;\n    }\n  }\n  if (!all_ok) {\n    if (all_same) {\n      reply->clear_instances();\n      auto proto_error = reply->add_error_msg();\n      proto_error->set_error_msg(errors[0].error_msg());\n      proto_error->set_error_code(errors[0].error_code());\n    } else {\n      for (auto &error : errors) {\n        auto proto_error = reply->add_error_msg();\n        proto_error->set_error_msg(error.error_msg());\n        proto_error->set_error_code(error.error_code());\n      }\n    }\n  }\n  return SUCCESS;\n}\n\nStatus GrpcTensorHelper::CreatePredictRequestFromInstances(const RequestSpec &request_spec,\n                                                           const std::vector<const proto::Instance *> &instances,\n                                                           proto::PredictRequest *request) {\n  MSI_EXCEPTION_IF_NULL(request);\n  auto proto_spec = request->mutable_servable_spec();\n  proto_spec->set_name(request_spec.servable_name);\n  proto_spec->set_method_name(request_spec.method_name);\n  proto_spec->set_version_number(request_spec.version_number);\n  for (auto &instance : instances) {\n    auto proto_instance = request->add_instances();\n    *proto_instance = *instance;\n  }\n  return SUCCESS;\n}\n\nStatus GrpcTensorHelper::CreateReplyFromInstancesInner(const proto::PredictRequest &request,\n                                                       const MethodSignature &method,\n                                                       const std::vector<InstancePtr> &instances,\n                                                       proto::PredictReply *reply) {\n  MSI_EXCEPTION_IF_NULL(reply);\n  if (instances.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Result instances count invalid, cannot be 0\";\n  }\n  if (instances.size() != static_cast<size_t>(request.instances_size())) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n           << \"Result instances number \" << instances.size() << \" is inconsistent with request instances number \"\n           << request.instances_size();\n  }\n  Status status;\n  size_t err_cnt = 0;\n  for (auto &instance : instances) {\n    if (instance->error_msg != SUCCESS) {\n      err_cnt++;\n    } else if (instance->data.size() != method.outputs.size()) {\n      return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n             << \"Result data tensor size \" << instance->data.size() << \" not equal outputs size \"\n             << method.outputs.size() << \" defined in method signature\";\n    }\n  }\n  if (err_cnt > 0) {\n    for (auto &instance : instances) {\n      auto proto_err_msg = reply->add_error_msg();\n      proto_err_msg->set_error_code(instance->error_msg.StatusCode());\n      if (instance->error_msg == INVALID_INPUTS) {\n        proto_err_msg->set_error_msg(instance->error_msg.StatusMessage());\n      } else if (instance->error_msg != SUCCESS) {\n        proto_err_msg->set_error_msg(instance->error_msg.StatusMessage());\n      }\n    }\n  }\n  // create instance reply, same with request\n  for (size_t index = 0; index < instances.size(); index++) {\n    auto proto_instance = reply->add_instances();\n    auto &instance = instances[index];\n    if (instance->data.empty()) {\n      continue;\n    }\n    auto &request_output_buffers = request.instances(index).output_buffers();\n    auto proto_items = proto_instance->mutable_items();\n    for (size_t i = 0; i < method.outputs.size(); i++) {\n      auto &output_tensor = instance->data[i];\n      auto &output_name = method.outputs[i];\n\n      auto &proto_tensor = (*proto_items)[method.outputs[i]];\n      ProtoTensor result_tensor(&proto_tensor);\n\n      auto it = request_output_buffers.find(output_name);\n      if (it != request_output_buffers.end()) {\n        if (output_tensor->is_bytes_val_data()) {\n          return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n                 << \"The output shared memory cannot be specified in the request\"\n                 << \" when the data type of output \" << output_name << \" is \" << output_tensor->data_type()\n                 << \", output name: \" << output_name;\n        }\n        auto &shm_data = it->second;\n        if (shm_data.data_size() != output_tensor->data_size()) {\n          return INFER_STATUS_LOG_ERROR(FAILED)\n                 << \"The data size \" << shm_data.data_size() << \" of output shared memory \"\n                 << \" is inconsistent with the data size \" << output_tensor->data_size()\n                 << \" of result, output name: \" << output_name;\n        }\n        result_tensor.SetSharedMemory(shm_data);\n        status = result_tensor.AttachSharedMemory();\n        if (status != SUCCESS) {\n          return INFER_STATUS_LOG_ERROR(FAILED)\n                 << \"Attach output shared memory failed, memory key: \" << shm_data.memory_key()\n                 << \", bytes size: \" << shm_data.bytes_size() << \", data offset: \" << shm_data.data_offset()\n                 << \", data size: \" << shm_data.data_size() << \", output name: \" << output_name;\n        }\n      }\n      result_tensor.assign(*output_tensor);\n    }\n  }\n  return SUCCESS;\n}\n\nStatus GrpcTensorHelper::CreateInstanceFromRequestInstances(const proto::PredictRequest &request,\n                                                            const MethodSignature &method,\n                                                            std::vector<InstanceData> *results) {\n  MSI_EXCEPTION_IF_NULL(results);\n  auto servable_name = request.servable_spec().name();\n  auto method_name = request.servable_spec().method_name();\n  Status status;\n  auto &input_names = method.inputs;\n  auto &output_names = method.outputs;\n  for (auto &proto_instance : request.instances()) {\n    InstanceData instance_data;\n    for (const auto &input_name : input_names) {\n      auto it = proto_instance.items().find(input_name);\n      if (it == proto_instance.items().end()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n               << \"Cannot find input \" << input_name << \" in instance input , servable \" << servable_name << \", method \"\n               << method_name;\n      }\n      auto &tensor_proto = it->second;\n      status = CheckRequestTensor(tensor_proto);\n      if (status != SUCCESS) {\n        auto status2 = INFER_STATUS(INVALID_INPUTS) << \"Instances input \" << input_name << \" check failed\";\n        MSI_LOG_ERROR << status2.StatusMessage();\n        return Status(INVALID_INPUTS, status2.StatusMessage() + \", detail: \" + status.StatusMessage());\n      }\n      auto add_tensor = std::make_shared<ProtoTensor>(const_cast<proto::Tensor *>(&tensor_proto));\n      if (tensor_proto.has_shm_data()) {\n        status = add_tensor->AttachSharedMemory();\n        if (status != SUCCESS) {\n          auto &shm_data = tensor_proto.shm_data();\n          MSI_LOG_ERROR << \"Attach input shared memory failed, memory key: \" << shm_data.memory_key()\n                        << \", bytes size: \" << shm_data.bytes_size() << \", data offset: \" << shm_data.data_offset()\n                        << \", data size: \" << shm_data.data_size() << \", input name: \" << input_name;\n          return status;\n        }\n      }\n      instance_data.push_back(add_tensor);\n    }\n    auto &output_buffers = proto_instance.output_buffers();\n    if (!output_buffers.empty()) {\n      for (auto &buffer : output_buffers) {\n        auto it = std::find(output_names.begin(), output_names.end(), buffer.first);\n        if (it == output_names.end()) {\n          return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n                 << \"The name \" << buffer.first << \" of the output buffers cannot be found in the output names \"\n                 << output_names << \" of the method, servable \" << servable_name << \", method \" << method_name;\n        }\n        auto &shm_data = buffer.second;\n        SharedMemoryAttachItem item;\n        status = SharedMemoryManager::Instance().Attach(shm_data.memory_key(), shm_data.bytes_size(),\n                                                        shm_data.data_offset(), shm_data.data_size(), &item);\n        if (status != SUCCESS) {\n          MSI_LOG_ERROR << \"Attach output shared memory failed, memory key: \" << shm_data.memory_key()\n                        << \", bytes size: \" << shm_data.bytes_size() << \", data offset: \" << shm_data.data_offset()\n                        << \", data size: \" << shm_data.data_size() << \", output name: \" << buffer.first;\n          return status;\n        }\n      }\n    }\n    results->push_back(instance_data);\n  }\n  return SUCCESS;\n}\n\nStatus GrpcTensorHelper::CheckRequestInstances(const proto::PredictRequest &request,\n                                               const std::vector<std::string> &input_names) {\n  auto servable_name = request.servable_spec().name();\n  auto method_name = request.servable_spec().method_name();\n  Status status;\n  for (auto &proto_instance : request.instances()) {\n    for (const auto &input_name : input_names) {\n      auto it = proto_instance.items().find(input_name);\n      if (it == proto_instance.items().end()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n               << \"Cannot find input \" << input_name << \" in instance input , servable \" << servable_name << \", method \"\n               << method_name;\n      }\n      status = CheckRequestTensor(it->second);\n      if (status != SUCCESS) {\n        auto status2 = INFER_STATUS(INVALID_INPUTS) << \"Instances input \" << input_name << \" check failed\";\n        MSI_LOG_ERROR << status2.StatusMessage();\n        return Status(INVALID_INPUTS, status2.StatusMessage() + \", detail: \" + status.StatusMessage());\n      }\n    }\n  }\n  return SUCCESS;\n}\n\nvoid GrpcTensorHelper::CopyFromAgentSpec(const proto::AgentSpec &specs, WorkerAgentSpec *worker_specs) {\n  worker_specs->rank_id = specs.rank_id();\n  worker_specs->batch_size = specs.batch_size();\n  for (auto &in : specs.inputs()) {\n    TensorInfo info;\n    info.data_type = ProtoTensor::TransDataType2Inference(in.dtype());\n    info.size = in.size();\n    info.is_no_batch_dim = in.is_no_batch_dim();\n    for (auto &dim : in.shape().dims()) {\n      info.shape.push_back(dim);\n    }\n    worker_specs->input_infos.push_back(info);\n  }\n  for (auto &out : specs.outputs()) {\n    TensorInfo info;\n    info.data_type = ProtoTensor::TransDataType2Inference(out.dtype());\n    info.size = out.size();\n    info.is_no_batch_dim = out.is_no_batch_dim();\n    for (auto &dim : out.shape().dims()) {\n      info.shape.push_back(dim);\n    }\n    worker_specs->output_infos.push_back(info);\n  }\n}\n\nvoid GrpcTensorHelper::CopyFromWorkerAgentSpec(const std::vector<WorkerAgentSpec> &worker_specs,\n                                               proto::AgentRegisterRequest *request) {\n  for (size_t i = 0; i < worker_specs.size(); i++) {\n    auto &spec = worker_specs[i];\n    auto worker_spec = request->add_agent_spec();\n    worker_spec->set_rank_id(spec.rank_id);\n    worker_spec->set_batch_size(spec.batch_size);\n    for (auto &method : spec.input_infos) {\n      auto proto_method = worker_spec->add_inputs();\n      proto_method->set_dtype(ProtoTensor::TransDataType2Proto(method.data_type));\n      proto_method->set_size(method.size);\n      proto_method->set_is_no_batch_dim(method.is_no_batch_dim);\n      auto proto_shape = proto_method->mutable_shape();\n      for (auto &dim : method.shape) {\n        proto_shape->add_dims(dim);\n      }\n    }\n    for (auto &method : spec.output_infos) {\n      auto proto_method = worker_spec->add_outputs();\n      proto_method->set_dtype(ProtoTensor::TransDataType2Proto(method.data_type));\n      proto_method->set_size(method.size);\n      proto_method->set_is_no_batch_dim(method.is_no_batch_dim);\n      auto proto_shape = proto_method->mutable_shape();\n      for (auto &dim : method.shape) {\n        proto_shape->add_dims(dim);\n      }\n    }\n  }\n}\n\nStatus GrpcTensorHelper::CheckRequestTensor(const proto::Tensor &tensor) {\n  Status status;\n  ProtoTensor tensor_input(const_cast<proto::Tensor *>(&tensor));\n  auto shape = tensor_input.shape();\n  if (tensor.dtype() == proto::MS_BYTES || tensor.dtype() == proto::MS_STRING) {\n    if (tensor.bytes_val_size() != 1) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"Instance tensor check failed: bytes or string type shape batch size can only be 1\";\n    }\n    if (!(shape.size() == 1 && shape[0] == 1) && !shape.empty()) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Instance tensor check failed: bytes or string type input \"\n                                                    << \" shape can only be (1,) or empty, but given shape is \" << shape;\n    }\n  } else {\n    bool zero_dim = false;\n    for (auto &shape_item : tensor.shape().dims()) {\n      if (shape_item < 0 || zero_dim) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Tensor check failed: input \"\n                                                      << \" shape \" << shape << \" invalid\";\n      }\n      if (shape_item == 0) {\n        zero_dim = true;\n      }\n    }\n    auto item_size = tensor_input.itemsize();\n    if (item_size == 0) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"Tensor check failed: input data type \" << tensor.dtype() << \" invalid\";\n    }\n    size_t element_num = tensor_input.element_cnt();\n    auto expect_data_size = element_num * item_size;\n    if (tensor.tensor_data_case() == proto::Tensor::TensorDataCase::kShmData) {\n      if (expect_data_size != tensor.shm_data().data_size()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n               << \"Tensor check failed: input shared memory data size \" << tensor.shm_data().data_size()\n               << \" not equal to expected size \" << expect_data_size;\n      }\n    } else {\n      if (expect_data_size != tensor.data().size()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n               << \"Tensor check failed: input data size \" << tensor.data().size() << \" invalid\";\n      }\n    }\n  }\n  return SUCCESS;\n}\n\nvoid GrpcTensorHelper::CreateReplyFromErrorMsg(const Status &error_msg, proto::PredictReply *reply) {\n  MSI_EXCEPTION_IF_NULL(reply);\n  if (error_msg == SUCCESS) {\n    return;\n  }\n  reply->clear_error_msg();\n  reply->clear_instances();\n  auto proto_error_msg = reply->add_error_msg();\n  proto_error_msg->set_error_code(error_msg.StatusCode());\n  std::string error_msg_str = error_msg.StatusMessage();\n  if (error_msg_str.empty()) {\n    proto_error_msg->set_error_msg(\"Predict failed\");\n  } else {\n    proto_error_msg->set_error_msg(error_msg_str);\n  }\n}\n\nserving::LogStream &operator<<(serving::LogStream &stream, proto::DataType data_type) {\n  const std::map<proto::DataType, std::string> type_name_map{\n    {proto::MS_UNKNOWN, \"proto::MS_UNKNOWN\"}, {proto::MS_BOOL, \"proto::kMSI_Bool\"},\n    {proto::MS_INT8, \"proto::MS_INT8\"},       {proto::MS_UINT8, \"proto::MS_UINT8\"},\n    {proto::MS_INT16, \"proto::MS_INT16\"},     {proto::MS_UINT16, \"proto::MS_UINT16\"},\n    {proto::MS_INT32, \"proto::MS_INT32\"},     {proto::MS_UINT32, \"proto::MS_UINT32\"},\n    {proto::MS_INT64, \"proto::MS_INT64\"},     {proto::MS_UINT64, \"proto::MS_UINT64\"},\n    {proto::MS_FLOAT16, \"proto::MS_FLOAT16\"}, {proto::MS_FLOAT32, \"proto::MS_FLOAT32\"},\n    {proto::MS_FLOAT64, \"proto::MS_FLOAT64\"}, {proto::MS_STRING, \"proto::MS_STRING\"},\n    {proto::MS_BYTES, \"proto::MS_BYTES\"},\n  };\n  auto it = type_name_map.find(data_type);\n  if (it != type_name_map.end()) {\n    stream << it->second;\n  } else {\n    stream << \"proto::MS_UNKNOWN\";\n  }\n  return stream;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/proto_tensor.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_PROTO_TENSOR_H_\n#define MINDSPORE_SERVING_PROTO_TENSOR_H_\n\n#include <string>\n#include <utility>\n#include <vector>\n#include <memory>\n#include <map>\n#include \"common/serving_common.h\"\n#include \"proto/ms_service.pb.h\"\n#include \"proto/ms_master.pb.h\"\n#include \"proto/ms_distributed.pb.h\"\n#include \"common/instance.h\"\n#include \"common/servable.h\"\n#include \"common/shared_memory.h\"\n\nnamespace mindspore::serving {\nclass ProtoTensor : public TensorBase {\n public:\n  // the other's lifetime must longer than this object\n  explicit ProtoTensor(proto::Tensor *other);\n  ~ProtoTensor();\n\n  DataType data_type() const override;\n  void set_data_type(DataType type) override;\n  std::vector<int64_t> shape() const override;\n  void set_shape(const std::vector<int64_t> &shape) override;\n  const uint8_t *data() const override;\n  size_t data_size() const override;\n  bool resize_data(size_t data_len) override;\n  uint8_t *mutable_data() override;\n\n  void clear_bytes_data() override;\n  void add_bytes_data(const uint8_t *data, size_t bytes_len) override;\n  size_t bytes_data_size() const override;\n  void get_bytes_data(size_t index, const uint8_t **data, size_t *bytes_len) const override;\n\n  static proto::DataType TransDataType2Proto(DataType data_type);\n  static DataType TransDataType2Inference(proto::DataType data_type);\n\n  void SetSharedMemory(const proto::ShmTensorData &shm_data_proto);\n  Status AttachSharedMemory() const;\n\n private:\n  // if tensor_ is reference from other ms_serving::Tensor, the other's lifetime must\n  // longer than this object\n  proto::Tensor *tensor_;\n  mutable bool has_attached_shm_ = false;\n  mutable SharedMemoryAttachItem shm_attach_;\n};\n\nclass MS_API GrpcTensorHelper {\n public:\n  static void GetRequestSpec(const proto::PredictRequest &request, RequestSpec *request_spec);\n  static void ConvertProtoWorkerSpec(const proto::RegisterRequest &proto_request, WorkerRegSpec *worker_spec);\n  static void ConvertWorkerSpec(const WorkerRegSpec &worker_spec, proto::RegisterRequest *proto_request);\n  static void ConvertProtoModelInfos(const proto::ModelInfos &proto_model_infos,\n                                     std::map<std::string, ModelInfo> *model_infos);\n  static void ConvertModelInfos(const std::map<std::string, ModelInfo> &model_infos,\n                                proto::ModelInfos *proto_model_infos);\n  static Status CreateInstanceFromRequest(const MethodSignature &method, const proto::PredictRequest &request,\n                                          std::vector<InstanceData> *results);\n  static void CreateReplyFromInstances(const proto::PredictRequest &request, const MethodSignature &method,\n                                       const std::vector<InstancePtr> &instances, proto::PredictReply *reply);\n  static void CreateReplyFromErrorMsg(const Status &error_msg, proto::PredictReply *reply);\n  static void CopyFromAgentSpec(const proto::AgentSpec &request, WorkerAgentSpec *worker_specs);\n  static void CopyFromWorkerAgentSpec(const std::vector<WorkerAgentSpec> &worker_specs,\n                                      proto::AgentRegisterRequest *request);\n  static Status CreatePredictRequestFromInstances(const RequestSpec &request_spec,\n                                                  const std::vector<const proto::Instance *> &instances,\n                                                  proto::PredictRequest *request);\n  static Status CreatePredictReplyFromInstances(const proto::PredictRequest &request,\n                                                const std::vector<proto::ErrorMsg> &errors,\n                                                const std::vector<const proto::Instance *> &instances,\n                                                proto::PredictReply *reply);\n  static Status CreateInstanceFromPredictReply(const RequestSpec &request_spec, const proto::PredictReply &reply,\n                                               std::vector<proto::ErrorMsg> *error,\n                                               std::vector<const proto::Instance *> *results);\n\n  static Status CheckRequestInstances(const proto::PredictRequest &request,\n                                      const std::vector<std::string> &input_names);\n\n private:\n  static Status CreateInstanceFromRequestInstances(const proto::PredictRequest &request, const MethodSignature &method,\n                                                   std::vector<InstanceData> *results);\n  static Status CheckRequestTensor(const proto::Tensor &tensor);\n  static Status CreateReplyFromInstancesInner(const proto::PredictRequest &request, const MethodSignature &method,\n                                              const std::vector<InstancePtr> &instances, proto::PredictReply *reply);\n};\n\nextern MS_API LogStream &operator<<(serving::LogStream &stream, proto::DataType data_type);\n}  // namespace mindspore::serving\n#endif  // MINDSPORE_SERVING_PROTO_TENSOR_H_\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/servable.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"common/servable.h\"\n#include <set>\n#include <sstream>\n#include \"worker/stage_function.h\"\n\nnamespace mindspore::serving {\nvoid LocalModelMeta::SetModelFormat(const std::string &format) {\n  if (format == \"om\") {\n    model_format = kOM;\n  } else if (format == \"mindir\") {\n    model_format = kMindIR;\n  } else if (format == \"mindir_opt\" || format == \"mindir_lite\") {\n    model_format = kMindIR_Lite;\n  } else {\n    MSI_LOG_ERROR << \"Invalid model format \" << format;\n  }\n}\n\nstd::string ServableLoadSpec::Repr() const {\n  std::string version;\n  if (version_number > 0) {\n    version = \" version(\" + std::to_string(version_number) + \") \";\n  }\n  return \"servable(\" + servable_name + \") \" + version;\n}\n\nstd::string WorkerRegSpec::Repr() const {\n  std::stringstream str_stream;\n  str_stream << \"{worker_pid:\" << worker_pid << \", address:\" + worker_address\n             << \", servable:\" << servable_spec.servable_name + \", version:\" << servable_spec.version_number << \"}\";\n  return str_stream.str();\n}\n\nstd::string RequestSpec::Repr() const {\n  std::string version;\n  if (version_number > 0) {\n    version = \" version(\" + std::to_string(version_number) + \") \";\n  }\n  return \"servable(\" + servable_name + \") \" + \"method(\" + method_name + \") \" + version;\n}\n\nvoid MethodSignature::AddStageFunction(const std::string &func_name,\n                                       const std::vector<std::pair<size_t, uint64_t>> &stage_inputs,\n                                       uint64_t batch_size, const std::string &tag) {\n  MethodStage stage;\n  stage.method_name = method_name;\n  stage.stage_index = stage_index;\n  stage.stage_key = func_name;\n\n  if (PyStageFunctionStorage::Instance()->HasPyFunction(func_name)) {\n    stage.stage_type = kMethodStageTypePyFunction;\n  } else {\n    auto func = CppStageFunctionStorage::Instance().GetFunction(func_name);\n    if (!func) {\n      MSI_LOG_EXCEPTION << \"Function '\" << func_name << \"' is not defined\";\n    }\n    stage.stage_type = kMethodStageTypeCppFunction;\n  }\n  stage.stage_inputs = stage_inputs;\n  stage.batch_size = batch_size;\n  if (tag.empty()) {\n    stage.tag = \"Function '\" + func_name + \"'\";\n  } else {\n    stage.tag = tag;\n  }\n  stage_map[stage_index] = stage;\n  stage_index += 1;\n}\n\nvoid MethodSignature::AddStageModel(const std::string &model_key,\n                                    const std::vector<std::pair<size_t, uint64_t>> &stage_inputs, uint64_t subgraph,\n                                    const std::string &tag) {\n  MethodStage stage;\n  stage.method_name = method_name;\n  stage.stage_index = stage_index;\n  stage.stage_key = model_key;\n  stage.stage_type = kMethodStageTypeModel;\n  stage.stage_inputs = stage_inputs;\n  stage.subgraph = subgraph;\n  if (tag.empty()) {\n    stage.tag = \"Model '\" + model_key + \"'\";\n  } else {\n    stage.tag = tag;\n  }\n  stage_map[stage_index] = stage;\n  stage_index += 1;\n}\n\nvoid MethodSignature::SetReturn(const std::vector<std::pair<size_t, uint64_t>> &return_inputs) {\n  MethodStage stage;\n  stage.method_name = method_name;\n  stage.stage_index = stage_index;\n  stage.stage_key = \"return\";\n  stage.stage_type = kMethodStageTypeReturn;\n  stage.stage_inputs = return_inputs;\n  stage_map[stage_index] = stage;\n}\n\nsize_t MethodSignature::GetStageMax() const { return stage_index; }\n\nconst MethodSignature *ServableSignature::GetMethodDeclare(const std::string &method_name) const {\n  auto item =\n    find_if(methods.begin(), methods.end(), [&](const MethodSignature &v) { return v.method_name == method_name; });\n  if (item == methods.end()) {\n    return nullptr;\n  }\n  return &(*item);\n}\n\nconst ModelMeta *ServableSignature::GetModelDeclare(const std::string &model_key) const {\n  auto item = find_if(model_metas.begin(), model_metas.end(),\n                      [&](const ModelMeta &v) { return v.common_meta.model_key == model_key; });\n  if (item == model_metas.end()) {\n    return nullptr;\n  }\n  return &(*item);\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/servable.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_SERVABLE_H\n#define MINDSPORE_SERVING_SERVABLE_H\n\n#include <utility>\n#include <unordered_map>\n#include <string>\n#include <vector>\n#include <memory>\n#include <map>\n#include \"common/serving_common.h\"\n#include \"worker/inference/inference.h\"\n\nnamespace mindspore::serving {\nenum MethodStageType {\n  kMethodStageTypeNone = 0,\n  kMethodStageTypePyFunction,\n  kMethodStageTypeCppFunction,\n  kMethodStageTypeModel,\n  kMethodStageTypeReturn,\n};\n\nstruct MethodStage {\n  std::string method_name;\n  uint64_t stage_index = 0;\n  std::string stage_key;  // function name, model name\n  std::string tag;\n  MethodStageType stage_type;\n  uint64_t subgraph = 0;                                  // when model\n  std::vector<std::pair<size_t, uint64_t>> stage_inputs;  // first: input- 0, stage- 1~n, second: output index\n  // will be updated when model loaded\n  uint64_t batch_size = 0;\n};\n\nstatic const uint64_t kStageStartIndex = 1;\nstruct MS_API MethodSignature {\n  std::string servable_name;\n  std::string method_name;\n  std::vector<std::string> inputs;\n  std::vector<std::string> outputs;\n\n  std::map<size_t, MethodStage> stage_map;  // stage_index, MethodStage\n\n  void AddStageFunction(const std::string &func_name, const std::vector<std::pair<size_t, uint64_t>> &stage_inputs,\n                        uint64_t batch_size = 0, const std::string &tag = \"\");\n  void AddStageModel(const std::string &model_key, const std::vector<std::pair<size_t, uint64_t>> &stage_inputs,\n                     uint64_t subgraph = 0, const std::string &tag = \"\");\n  void SetReturn(const std::vector<std::pair<size_t, uint64_t>> &return_inputs);\n  // the max stage is return, when reach max stage, all stage works done\n  size_t GetStageMax() const;\n\n private:\n  // stage index begin with 1, 0 reserve for input, include function, model, return stage\n  size_t stage_index = kStageStartIndex;\n};\n\nstruct ServableLoadSpec {\n  std::string servable_directory;\n  std::string servable_name;\n  uint64_t version_number = 0;\n  std::string Repr() const;\n};\n\nstruct ServableMethodInfo {\n  std::string name;\n  std::vector<std::string> input_names;\n  bool only_model_stage = false;\n};\n\nstruct ModelSubgraphInfo {\n  std::vector<TensorInfo> input_infos;\n  std::vector<TensorInfo> output_infos;\n};\n\nstruct ModelInfo {\n  std::vector<ModelSubgraphInfo> sub_graph_infos;\n  uint64_t batch_size = 0;\n};\n\nstruct ServableRegSpec {\n  std::string servable_name;\n  uint64_t version_number = 0;\n  uint64_t batch_size = 0;\n  bool own_device = true;\n  std::vector<ServableMethodInfo> methods;\n  std::map<std::string, ModelInfo> models;\n};\n\nstruct WorkerRegSpec {\n  uint64_t worker_pid = 0;\n  std::string worker_address;\n  ServableRegSpec servable_spec;\n  std::string Repr() const;\n};\n\nstruct RequestSpec {\n  std::string servable_name;\n  std::string method_name;\n  uint64_t version_number = 0;  // not specified\n  std::string Repr() const;\n};\n\nenum ServableType {\n  kServableTypeUnknown = 0,\n  kServableTypeLocal = 1,\n  kServableTypeDistributed = 2,\n};\n\nstruct CommonModelMeta {\n  std::string servable_name;\n  // used to identify model, for local model: \";\".join(model_files), for distributed model: servable name\n  std::string model_key;\n  bool with_batch_dim = true;  // whether there is batch dim in model's inputs/outputs\n  std::vector<int> without_batch_dim_inputs;\n  std::map<uint64_t, size_t> inputs_count;\n  std::map<uint64_t, size_t> outputs_count;\n};\n\nstruct MS_API LocalModelMeta {\n  std::vector<std::string> model_files;              // file names\n  ModelType model_format = ModelType::kUnknownType;  // OM, MindIR, MindIR_Lite\n  ModelContext model_context;\n  std::string config_file;\n  void SetModelFormat(const std::string &format);\n};\n\nstruct DistributedModelMeta {\n  size_t rank_size = 0;\n  size_t stage_size = 0;\n  bool enable_pipeline_infer = false;\n};\n\nstruct MS_API ModelMeta {\n  CommonModelMeta common_meta;\n  LocalModelMeta local_meta;\n  DistributedModelMeta distributed_meta;\n};\n\nstruct MS_API ServableSignature {\n  ServableType servable_type = kServableTypeUnknown;\n  std::string servable_name;\n  std::vector<ModelMeta> model_metas;\n  std::vector<MethodSignature> methods;\n  const MethodSignature *GetMethodDeclare(const std::string &method_name) const;\n  const ModelMeta *GetModelDeclare(const std::string &model_key) const;\n};\n\nstruct WorkerAgentSpec {\n  std::string agent_address;\n  uint32_t rank_id = 0;\n  std::vector<TensorInfo> input_infos;\n  std::vector<TensorInfo> output_infos;\n  uint32_t batch_size = 0;\n  uint64_t subgraph = 0;\n};\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_SERVABLE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/serving_common.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_SERVING_COMMON_H\n#define MINDSPORE_SERVING_SERVING_COMMON_H\n\n#include <securec.h>\n\n#include \"common/status.h\"\n#include \"common/log.h\"\n#include \"common/tensor.h\"\n#include \"common/utils.h\"\n\n#endif  // MINDSPORE_SERVING_SERVING_COMMON_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/shared_memory.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <sys/mman.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include \"common/shared_memory.h\"\n\nnamespace mindspore {\nnamespace serving {\nSharedMemoryAllocator &SharedMemoryAllocator::Instance() {\n  static SharedMemoryAllocator instance = SharedMemoryAllocator();\n  return instance;\n}\n\nSharedMemoryAllocator::SharedMemoryAllocator() = default;\nSharedMemoryAllocator::~SharedMemoryAllocator() noexcept {\n  std::unique_lock<std::mutex> lock(lock_);\n  for (auto &item : memory_map_) {\n    auto &group = item.second;\n    for (auto &shm : group.shm_map) {\n      auto ret = munmap(shm.second.address, shm.second.bytes_size);\n      if (ret == -1) {\n        MSI_LOG_ERROR << \"Failed to munmap, memory key: \" << shm.second.memory_key;\n      }\n      ret = shm_unlink(shm.second.memory_key.c_str());\n      if (ret == -1) {\n        MSI_LOG_ERROR << \"Failed to shm_unlink \" << shm.second.memory_key << \", errno: \" << errno;\n      }\n    }\n  }\n  memory_map_.clear();\n}\n\nStatus SharedMemoryAllocator::AddShmMemoryBuffer(SharedMemoryGroup *shm_group) {\n  auto item_size = shm_group->item_size;\n  auto item_count = shm_group->item_count;\n  auto memory_key = shm_group->memory_key_prefix + \"_\" + std::to_string(shm_group->shm_map.size());\n  // maximum 4GB memory\n  if (item_size == 0 || item_count == 0 || UINT32_MAX / item_size < item_count) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Invalid item size or item count, item size: \" << item_size\n                                          << \", item count :\" << item_count << \", memory key: \" << memory_key;\n  }\n  constexpr uint32_t align_size = 8;\n  auto align_item_size = (item_size + align_size - 1) / align_size * align_size;\n  auto shm_fd = shm_open(memory_key.c_str(), O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);\n  if (shm_fd == -1) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Failed to shm_open \" << memory_key << \" , errno: \" << errno;\n  }\n\n  uint64_t memory_size = align_item_size * item_count;\n  auto ret = ftruncate(shm_fd, static_cast<int64_t>(memory_size));\n  if (ret == -1) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Failed to ftruncate \" << memory_key << \", errno: \" << errno << \", memory size: \" << memory_size;\n  }\n  auto address = mmap(nullptr, memory_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);\n  if (address == MAP_FAILED) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Failed to mmap \" << memory_key << \", errno: \" << errno << \", memory size: \" << memory_size;\n  }\n  ret = close(shm_fd);\n  if (ret == -1) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Failed to close \" << memory_key << \", errno: \" << errno;\n  }\n  SharedMemory &shm = shm_group->shm_map[memory_key];\n  shm.memory_key = memory_key;\n  shm.address = reinterpret_cast<uint8_t *>(address);\n  shm.bytes_size = memory_size;\n  uint64_t offset = 0;\n  for (uint64_t i = 0; i < item_count; i++) {\n    (void)shm.free_queue.emplace(offset);\n    offset += align_item_size;\n  }\n  shm_group->free_count += item_count;\n\n  MSI_LOG_INFO << \"New shared memory success, memory key: \" << memory_key << \", bytes size: \" << memory_size\n               << \", item count: \" << item_count;\n  return SUCCESS;\n}\n\nStatus SharedMemoryAllocator::NewMemoryBuffer(const std::string &memory_key_prefix, uint64_t item_size,\n                                              uint64_t item_count) {\n  std::unique_lock<std::mutex> lock(lock_);\n  if (memory_map_.find(memory_key_prefix) != memory_map_.end()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Shared memory has already been inited\";\n  }\n  auto &group = memory_map_[memory_key_prefix];\n  group.memory_key_prefix = memory_key_prefix;\n  group.item_size = item_size;\n  group.item_count = item_count;\n  group.free_count = 0;\n  auto status = AddShmMemoryBuffer(&group);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Alloc shared memory failed, memory key prefix: \" << memory_key_prefix;\n    return status;\n  }\n  return SUCCESS;\n}\n\nStatus SharedMemoryAllocator::AllocMemoryItem(const std::string &memory_key_prefix, SharedMemoryItem *shm_item) {\n  std::unique_lock<std::mutex> lock(lock_);\n  auto it = memory_map_.find(memory_key_prefix);\n  if (it == memory_map_.end()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Cannot find shared memory \" << memory_key_prefix;\n  }\n  auto &group = it->second;\n  if (group.free_count == 0) {\n    auto status = AddShmMemoryBuffer(&group);\n    if (status != SUCCESS) {\n      MSI_LOG_ERROR << \"Alloc shared memory failed, memory key prefix: \" << memory_key_prefix;\n      return SUCCESS;\n    }\n  }\n  for (auto &item : group.shm_map) {\n    auto &shm = item.second;\n    if (!shm.free_queue.empty()) {\n      shm_item->memory_key_prefix = memory_key_prefix;\n      shm_item->memory_key = shm.memory_key;\n      shm_item->bytes_size = shm.bytes_size;\n      shm_item->offset = *shm.free_queue.begin();\n      shm_item->offset_address = shm.address + shm_item->offset;\n      shm_item->size = group.item_size;\n      (void)shm.free_queue.erase(shm_item->offset);\n      group.free_count -= 1;\n      return SUCCESS;\n    }\n  }\n  MSI_LOG_EXCEPTION << \"There is no free shared memory\";\n}\n\nvoid SharedMemoryAllocator::ReleaseMemoryItem(const SharedMemoryItem &shm_item) {\n  std::unique_lock<std::mutex> lock(lock_);\n  auto it = memory_map_.find(shm_item.memory_key_prefix);\n  if (it == memory_map_.end()) {\n    MSI_LOG_WARNING << \"Cannot find shared memory prefix \" << shm_item.memory_key_prefix;\n    return;\n  }\n  auto shm_it = it->second.shm_map.find(shm_item.memory_key);\n  if (shm_it == it->second.shm_map.end()) {\n    MSI_LOG_WARNING << \"Cannot find shared memory \" << shm_item.memory_key;\n    return;\n  }\n  if (shm_it->second.free_queue.count(shm_item.offset) > 0) {\n    MSI_LOG_EXCEPTION << \"Shared memory \" << shm_item.memory_key\n                      << \" has already been in free set, offset: \" << shm_item.offset;\n  }\n  (void)shm_it->second.free_queue.emplace(shm_item.offset);\n  it->second.free_count += 1;\n}\n\nShmTensor::ShmTensor(DataType type, const std::vector<int64_t> &shape, const SharedMemoryItem &shm_item)\n    : BufferTensor(type, shape, shm_item.offset_address, shm_item.size, false), shm_info_(shm_item) {}\n\nShmTensor::~ShmTensor() noexcept { SharedMemoryAllocator::Instance().ReleaseMemoryItem(shm_info_); }\n\nSharedMemoryManager &SharedMemoryManager::Instance() {\n  static SharedMemoryManager instance = SharedMemoryManager();\n  return instance;\n}\n\nSharedMemoryManager::SharedMemoryManager() {}\nSharedMemoryManager::~SharedMemoryManager() noexcept {\n  std::unique_lock<std::mutex> lock(lock_);\n  for (auto &item : attached_shm_list_) {\n    auto ret = munmap(item.address, item.bytes_size);\n    if (ret == -1) {\n      MSI_LOG_ERROR << \"Failed to munmap, memory key: \" << item.memory_key;\n    }\n  }\n  attached_shm_list_.clear();\n}\n\nStatus SharedMemoryManager::Attach(const std::string &memory_key, uint64_t bytes_size, uint64_t data_offset,\n                                   uint64_t data_size, SharedMemoryAttachItem *shm_info) {\n  if (data_size > bytes_size || data_offset > bytes_size - data_size) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Invalid memory size info, memory key: \" << memory_key << \", bytes size: \" << bytes_size\n           << \", data offset: \" << data_offset << \", data size: \" << data_size;\n  }\n  SharedMemoryAttach attach_mem;\n  auto status = Attach(memory_key, bytes_size, &attach_mem);\n  if (status != SUCCESS) {\n    return status;\n  }\n  shm_info->memory_key = attach_mem.memory_key;\n  shm_info->offset_address = attach_mem.address + data_offset;\n  shm_info->offset = data_offset;\n  shm_info->size = data_size;\n  return SUCCESS;\n}\n\nStatus SharedMemoryManager::Detach(const std::string &memory_key) {\n  std::unique_lock<std::mutex> lock(lock_);\n  auto it = std::find_if(attached_shm_list_.begin(), attached_shm_list_.end(),\n                         [&memory_key](const SharedMemoryAttach &item) { return memory_key == item.memory_key; });\n  if (it == attached_shm_list_.end()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Cannot find shared memory \" << memory_key;\n  }\n  auto ret = munmap(it->address, it->bytes_size);\n  if (ret == -1) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Failed to munmap, memory key: \" << memory_key;\n  }\n  (void)attached_shm_list_.erase(it);\n  return SUCCESS;\n}\n\nStatus SharedMemoryManager::Attach(const std::string &memory_key, uint64_t bytes_size, SharedMemoryAttach *attach_mem) {\n  std::unique_lock<std::mutex> lock(lock_);\n  for (auto &item : attached_shm_list_) {\n    if (item.memory_key == memory_key) {\n      *attach_mem = item;\n      return SUCCESS;\n    }\n  }\n  auto shm_fd = shm_open(memory_key.c_str(), O_RDWR, S_IRUSR | S_IWUSR);\n  if (shm_fd == -1) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Failed to shm_open \" << memory_key << \" , errno: \" << errno;\n  }\n  auto address = mmap(nullptr, bytes_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);\n  if (address == MAP_FAILED) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Failed to mmap \" << memory_key << \", errno: \" << errno << \", memory size: \" << bytes_size;\n  }\n  auto ret = close(shm_fd);\n  if (ret == -1) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Failed to close \" << memory_key << \", errno: \" << errno;\n  }\n  attach_mem->memory_key = memory_key;\n  attach_mem->bytes_size = bytes_size;\n  attach_mem->address = static_cast<uint8_t *>(address);\n  attached_shm_list_.push_back(*attach_mem);\n  return SUCCESS;\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/shared_memory.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_SHARED_MEMORY_H\n#define MINDSPORE_SERVING_SHARED_MEMORY_H\n\n#include <string>\n#include <map>\n#include <vector>\n#include <memory>\n#include <queue>\n#include <set>\n#include <mutex>\n#include \"common/serving_common.h\"\n#include \"common/buffer_tensor.h\"\n\nnamespace mindspore {\nnamespace serving {\nstruct SharedMemoryItem {\n  std::string memory_key_prefix;\n  std::string memory_key;   // for shm_open\n  uint64_t bytes_size = 0;  // for shm_open\n  uint8_t *offset_address = nullptr;\n  uint64_t offset = 0;\n  uint64_t size = 0;\n};\n\nstruct SharedMemory {\n  std::string memory_key;\n  uint64_t bytes_size = 0;\n  uint8_t *address = nullptr;\n  std::set<uint64_t> free_queue;\n};\n\nstruct SharedMemoryGroup {\n  std::map<std::string, SharedMemory> shm_map;\n  std::string memory_key_prefix;\n  uint64_t item_size = 0;\n  uint64_t item_count = 0;\n  uint64_t free_count = 0;\n};\n\nclass SharedMemoryAllocator {\n public:\n  static SharedMemoryAllocator &Instance();\n  SharedMemoryAllocator();\n  ~SharedMemoryAllocator() noexcept;\n  Status NewMemoryBuffer(const std::string &memory_key_prefix, uint64_t item_size, uint64_t init_item_count);\n  Status AllocMemoryItem(const std::string &memory_key_prefix, SharedMemoryItem *shm_item);\n  void ReleaseMemoryItem(const SharedMemoryItem &shm_item);\n\n private:\n  std::map<std::string, SharedMemoryGroup> memory_map_;\n  std::mutex lock_;\n  Status AddShmMemoryBuffer(SharedMemoryGroup *shm_group);\n};\n\nclass ShmTensor : public BufferTensor {\n public:\n  ShmTensor(DataType type, const std::vector<int64_t> &shape, const SharedMemoryItem &shm_item);\n  ~ShmTensor() noexcept;\n\n private:\n  SharedMemoryItem shm_info_;\n};\n\nstruct SharedMemoryAttach {\n  std::string memory_key;\n  uint64_t bytes_size = 0;\n  uint8_t *address = nullptr;\n};\n\nstruct SharedMemoryAttachItem {\n  std::string memory_key;  // for shm_open\n  uint8_t *offset_address = nullptr;\n  uint64_t offset = 0;\n  uint64_t size = 0;\n};\n\nclass SharedMemoryManager {\n public:\n  static SharedMemoryManager &Instance();\n  SharedMemoryManager();\n  ~SharedMemoryManager() noexcept;\n  Status Attach(const std::string &memory_key, uint64_t bytes_size, uint64_t data_offset, uint64_t data_size,\n                SharedMemoryAttachItem *shm_info);\n  Status Detach(const std::string &memory_key);\n\n private:\n  std::vector<SharedMemoryAttach> attached_shm_list_;\n  std::mutex lock_;\n  Status Attach(const std::string &memory_key, uint64_t bytes_size, SharedMemoryAttach *attach_mem);\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_SHARED_MEMORY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/ssl_config.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_SSL_CONFIG_H\n#define MINDSPORE_SERVING_SSL_CONFIG_H\n\n#include <string>\n\nnamespace mindspore::serving {\nstruct SSLConfig {\n  std::string certificate;\n  std::string private_key;\n  std::string custom_ca;\n  bool verify_client{false};\n  bool use_ssl{false};\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_SSL_CONFIG_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/status.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_STATUS_H\n#define MINDSPORE_SERVING_STATUS_H\n\n#include <chrono>\n#include <string>\n#include <sstream>\n\n#include \"common/log.h\"\n\nnamespace mindspore::serving {\nenum StatusCode {\n  SUCCESS = 0,\n  FAILED,\n  INVALID_INPUTS,\n  SYSTEM_ERROR,\n  WORKER_UNAVAILABLE,\n  SERVABLE_UNAVAILABLE,\n};\n\nclass Status {\n public:\n  Status() : status_code_(FAILED) {}\n  Status(enum StatusCode status_code, const std::string &status_msg = \"\")  // NOLINT(runtime/explicit)\n      : status_code_(status_code), status_msg_(status_msg) {}\n  bool IsSuccess() const { return status_code_ == SUCCESS; }\n  enum StatusCode StatusCode() const { return status_code_; }\n  std::string StatusMessage() const { return status_msg_; }\n  bool operator==(const Status &other) const { return status_code_ == other.status_code_; }\n  bool operator==(enum StatusCode other_code) const { return status_code_ == other_code; }\n  bool operator!=(const Status &other) const { return status_code_ != other.status_code_; }\n  bool operator!=(enum StatusCode other_code) const { return status_code_ != other_code; }\n  operator bool() const = delete;\n  Status &operator<(const LogStream &stream) noexcept __attribute__((visibility(\"default\"))) {\n    status_msg_ = stream.sstream_->str();\n    return *this;\n  }\n  Status &operator=(const std::string &msg) noexcept __attribute__((visibility(\"default\"))) {\n    status_msg_ = msg;\n    return *this;\n  }\n\n private:\n  enum StatusCode status_code_;\n  std::string status_msg_;\n};\n\n#define MSI_TIME_STAMP_START(name) auto time_start_##name = std::chrono::steady_clock::now();\n#define MSI_TIME_STAMP_END(name)                                                                             \\\n  {                                                                                                          \\\n    auto time_end_##name = std::chrono::steady_clock::now();                                                 \\\n    auto time_cost = std::chrono::duration<double, std::milli>(time_end_##name - time_start_##name).count(); \\\n    MSI_LOG_INFO << #name \" Time Cost # \" << time_cost << \" ms ---------------------\";                       \\\n  }\n\n#define MSI_TIME_STAMP_END_EXTRA(name, extra)                                                                \\\n  {                                                                                                          \\\n    auto time_end_##name = std::chrono::steady_clock::now();                                                 \\\n    auto time_cost = std::chrono::duration<double, std::milli>(time_end_##name - time_start_##name).count(); \\\n    MSI_LOG_INFO << extra << \" \" << #name \" Time Cost # \" << time_cost << \" ms ---------------------\";       \\\n  }\n\n#define INFER_STATUS(code) mindspore::serving::Status(code) < mindspore::serving::LogStream()\n\n#define INFER_STATUS_LOG_ERROR(code) mindspore::serving::Status(code) = MSILOG_NOIF(ERROR)\n#define INFER_STATUS_LOG_WARNING(code) mindspore::serving::Status(code) = MSILOG_NOIF(WARNING)\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_STATUS_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/tensor.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"common/tensor.h\"\n#include <securec.h>\n#include <functional>\n#include <utility>\n#include \"common/log.h\"\n\nnamespace mindspore::serving {\nTensor::Tensor() = default;\n\nTensor::Tensor(DataType type, const std::vector<int64_t> &shape, const void *data, size_t data_len)\n    : type_(type), shape_(shape) {\n  (void)set_data(data, data_len);\n}\n\nconst uint8_t *Tensor::data() const {\n  if (data_size() == 0) {\n    return nullptr;\n  }\n  return data_.data();\n}\n\nsize_t Tensor::data_size() const { return data_.size(); }\n\nbool Tensor::resize_data(size_t data_len) {\n  data_.resize(data_len);\n  return true;\n}\n\nuint8_t *Tensor::mutable_data() {\n  if (data_size() == 0) {\n    return nullptr;\n  }\n  return data_.data();\n}\n\n// For kMSI_String and kMSI_Bytes\nvoid Tensor::clear_bytes_data() { bytes_.clear(); }\n\nvoid Tensor::add_bytes_data(const uint8_t *data, size_t bytes_len) {\n  std::vector<uint8_t> bytes(bytes_len);\n  (void)memcpy_s(bytes.data(), bytes.size(), data, bytes_len);\n  bytes_.push_back(std::move(bytes));\n}\n\nsize_t Tensor::bytes_data_size() const { return bytes_.size(); }\n\nvoid Tensor::get_bytes_data(size_t index, const uint8_t **data, size_t *bytes_len) const {\n  MSI_EXCEPTION_IF_NULL(data);\n  MSI_EXCEPTION_IF_NULL(bytes_len);\n  *bytes_len = bytes_[index].size();\n  if (*bytes_len == 0) {\n    *data = nullptr;\n  } else {\n    *data = bytes_[index].data();\n  }\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/tensor.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_TENSOR_H\n#define MINDSPORE_SERVING_TENSOR_H\n\n#include <vector>\n#include \"common/tensor_base.h\"\n\nnamespace mindspore::serving {\nclass MS_API Tensor : public TensorBase {\n public:\n  Tensor();\n  Tensor(DataType type, const std::vector<int64_t> &shape, const void *data, size_t data_len);\n  ~Tensor() = default;\n\n  void set_data_type(DataType type) override { type_ = type; }\n  DataType data_type() const override { return type_; }\n\n  void set_shape(const std::vector<int64_t> &shape) override { shape_ = shape; }\n  std::vector<int64_t> shape() const override { return shape_; }\n\n  const uint8_t *data() const override;\n  size_t data_size() const override;\n\n  bool resize_data(size_t data_len) override;\n  uint8_t *mutable_data() override;\n\n  // For kMSI_String and kMSI_Bytes\n  void clear_bytes_data() override;\n  void add_bytes_data(const uint8_t *data, size_t bytes_len) override;\n  size_t bytes_data_size() const override;\n  void get_bytes_data(size_t index, const uint8_t **data, size_t *bytes_len) const override;\n\n private:\n  DataType type_ = kMSI_Unknown;\n  std::vector<int64_t> shape_;\n  std::vector<uint8_t> data_;\n  // For kMSI_String and kMSI_Bytes\n  std::vector<std::vector<uint8_t>> bytes_;\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_TENSOR_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/tensor_base.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"common/tensor_base.h\"\n#include <securec.h>\n#include <string>\n#include \"common/log.h\"\n\n#define TENSOR_MAX_ELEMENT_COUNT UINT32_MAX\n\nnamespace mindspore::serving {\nTensorBase::TensorBase() = default;\nTensorBase::~TensorBase() = default;\n\nbool TensorBase::set_data(const void *data, size_t data_len) {\n  if (data_size() != data_len) {\n    (void)resize_data(data_len);\n    if (data_len == 0) {\n      MSI_LOG_INFO << \"set data to data len 0\";\n      return true;\n    }\n  }\n  if (mutable_data() == nullptr) {\n    MSI_LOG_ERROR << \"set data failed, data len \" << data_len;\n    return false;\n  }\n  if (data_size() != data_len) {\n    MSI_LOG_ERROR << \"set data failed, tensor current data size \" << data_size() << \" not match data len \" << data_len;\n    return false;\n  }\n  (void)memcpy_s(mutable_data(), data_size(), data, data_len);\n  return true;\n}\n\nsize_t TensorBase::itemsize() const { return GetTypeSize(data_type()); }\n\nsize_t TensorBase::element_cnt() const {\n  size_t element_num = 1;\n  for (auto dim : shape()) {\n    if (dim <= 0 || TENSOR_MAX_ELEMENT_COUNT / static_cast<size_t>(dim) < element_num) {\n      return 0;\n    }\n    element_num *= static_cast<size_t>(dim);\n  }\n  return element_num;\n}\n\nsize_t TensorBase::GetTypeSize(DataType type) {\n  const std::map<DataType, size_t> type_size_map{\n    {kMSI_Bool, sizeof(bool)},       {kMSI_Float64, sizeof(double)},   {kMSI_Int8, sizeof(int8_t)},\n    {kMSI_Uint8, sizeof(uint8_t)},   {kMSI_Int16, sizeof(int16_t)},    {kMSI_Uint16, sizeof(uint16_t)},\n    {kMSI_Int32, sizeof(int32_t)},   {kMSI_Uint32, sizeof(uint32_t)},  {kMSI_Int64, sizeof(int64_t)},\n    {kMSI_Uint64, sizeof(uint64_t)}, {kMSI_Float16, sizeof(uint16_t)}, {kMSI_Float32, sizeof(float)},\n  };\n  auto it = type_size_map.find(type);\n  if (it != type_size_map.end()) {\n    return it->second;\n  }\n  return 0;\n}\n\nvoid TensorBase::assign(const TensorBase &other) {\n  if (is_bytes_val_data()) {\n    clear_bytes_data();\n  }\n  set_shape(other.shape());\n  set_data_type(other.data_type());\n  if (other.is_bytes_val_data()) {\n    for (size_t i = 0; i < other.bytes_data_size(); i++) {\n      const uint8_t *data;\n      size_t data_len;\n      other.get_bytes_data(i, &data, &data_len);\n      add_bytes_data(data, data_len);\n    }\n  } else {\n    (void)set_data(other.data(), other.data_size());\n  }\n}\n\nLogStream &operator<<(LogStream &stream, DataType data_type) {\n  const std::map<DataType, std::string> type_name_map{\n    {kMSI_Unknown, \"kMSI_Unknown\"}, {kMSI_Bool, \"kMSI_Bool\"},       {kMSI_Int8, \"kMSI_Int8\"},\n    {kMSI_Uint8, \"kMSI_Uint8\"},     {kMSI_Int16, \"kMSI_Int16\"},     {kMSI_Uint16, \"kMSI_Uint16\"},\n    {kMSI_Int32, \"kMSI_Int32\"},     {kMSI_Uint32, \"kMSI_Uint32\"},   {kMSI_Int64, \"kMSI_Int64\"},\n    {kMSI_Uint64, \"kMSI_Uint64\"},   {kMSI_Float16, \"kMSI_Float16\"}, {kMSI_Float32, \"kMSI_Float32\"},\n    {kMSI_Float64, \"kMSI_Float64\"}, {kMSI_Bytes, \"kMSI_Bytes\"},     {kMSI_String, \"kMSI_String\"},\n  };\n  auto it = type_name_map.find(data_type);\n  if (it != type_name_map.end()) {\n    stream << it->second;\n  } else {\n    stream << \"kMSI_Unknown\";\n  }\n  return stream;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/tensor_base.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_TENSOR_BASE_H\n#define MINDSPORE_SERVING_TENSOR_BASE_H\n\n#include <utility>\n#include <vector>\n#include <memory>\n#include <numeric>\n#include <map>\n#include <functional>\n#include \"common/log.h\"\n#include \"common/status.h\"\n\nnamespace mindspore {\nnamespace serving {\nenum DataType {\n  kMSI_Unknown = 0,\n  kMSI_Bool = 1,\n  kMSI_Int8 = 2,\n  kMSI_Int16 = 3,\n  kMSI_Int32 = 4,\n  kMSI_Int64 = 5,\n  kMSI_Uint8 = 6,\n  kMSI_Uint16 = 7,\n  kMSI_Uint32 = 8,\n  kMSI_Uint64 = 9,\n  kMSI_Float16 = 10,\n  kMSI_Float32 = 11,\n  kMSI_Float64 = 12,\n  kMSI_String = 13,  // for model STRING input\n  kMSI_Bytes = 14,   // for image etc.\n};\n\nclass TensorBase;\nusing TensorBasePtr = std::shared_ptr<TensorBase>;\n\nclass MS_API TensorBase : public std::enable_shared_from_this<TensorBase> {\n public:\n  TensorBase();\n  virtual ~TensorBase();\n\n  // For all data type\n  virtual std::vector<int64_t> shape() const = 0;\n  virtual void set_shape(const std::vector<int64_t> &shape) = 0;\n  virtual DataType data_type() const = 0;\n  virtual void set_data_type(DataType type) = 0;\n\n  // All the following interfaces are not for  kMSI_String and kMSI_Bytes\n  virtual const uint8_t *data() const = 0;\n  virtual size_t data_size() const = 0;\n  virtual bool resize_data(size_t data_len) = 0;\n  virtual uint8_t *mutable_data() = 0;\n\n  // Byte size of a single element.\n  size_t itemsize() const;\n  // Total number of elements.\n  size_t element_cnt() const;\n  // resize and copy data\n  bool set_data(const void *data, size_t data_len);\n  static size_t GetTypeSize(DataType type);\n\n  // For kMSI_String and kMSI_Bytes\n  virtual void clear_bytes_data() = 0;\n  virtual void add_bytes_data(const uint8_t *data, size_t bytes_len) = 0;\n  virtual size_t bytes_data_size() const = 0;\n  virtual void get_bytes_data(size_t index, const uint8_t **data, size_t *bytes_len) const = 0;\n\n  // TensorBase(const TensorBase& other) = delete;\n  // TensorBase& operator=(const TensorBase& other) = delete;\n  void assign(const TensorBase &other);\n  bool is_bytes_val_data() const { return data_type() == kMSI_Bytes || data_type() == kMSI_String; }\n};\n\nclass RequestBase {\n public:\n  RequestBase() = default;\n  virtual ~RequestBase() = default;\n  virtual size_t size() const = 0;\n  virtual const TensorBase *operator[](size_t index) const = 0;\n};\n\nclass ReplyBase {\n public:\n  ReplyBase() = default;\n  virtual ~ReplyBase() = default;\n  virtual size_t size() const = 0;\n  virtual TensorBase *operator[](size_t index) = 0;\n  virtual const TensorBase *operator[](size_t index) const = 0;\n  virtual TensorBase *add() = 0;\n  virtual void clear() = 0;\n};\n\nextern MS_API LogStream &operator<<(LogStream &stream, DataType data_type);\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_TENSOR_BASE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/thread_pool.cc",
    "content": "/**\n * Copyright 2019-2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"common/thread_pool.h\"\n#include <atomic>\n#include <functional>\n#include <queue>\n#include <stdexcept>\n#include <utility>\n#include <vector>\n\nnamespace mindspore::serving {\nThreadPool::ThreadPool(uint32_t size) : is_stoped_(false), idle_thrd_num_(size < 1 ? 1 : size) {\n  for (uint32_t i = 0; i < idle_thrd_num_; ++i) {\n    (void)pool_.emplace_back(ThreadFunc, this);\n  }\n}\n\nThreadPool::~ThreadPool() {\n  {\n    std::unique_lock<std::mutex> lock{m_lock_};\n    is_stoped_.store(true);\n    cond_var_.notify_all();\n  }\n\n  for (std::thread &thd : pool_) {\n    if (thd.joinable()) {\n      try {\n        thd.join();\n      } catch (const std::system_error &) {\n      } catch (...) {\n      }\n    }\n  }\n}\n\nvoid ThreadPool::ThreadFunc(ThreadPool *thread_pool) {\n  if (thread_pool == nullptr) {\n    return;\n  }\n  while (!thread_pool->is_stoped_) {\n    std::function<void()> task;\n    {\n      std::unique_lock<std::mutex> lock{thread_pool->m_lock_};\n      thread_pool->cond_var_.wait(\n        lock, [thread_pool] { return thread_pool->is_stoped_.load() || !thread_pool->tasks_.empty(); });\n      if (thread_pool->is_stoped_ && thread_pool->tasks_.empty()) {\n        return;\n      }\n      task = std::move(thread_pool->tasks_.front());\n      thread_pool->tasks_.pop();\n    }\n    thread_pool->idle_thrd_num_ -= 1;\n    task();\n    thread_pool->idle_thrd_num_ += 1;\n  }\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/thread_pool.h",
    "content": "/**\n * Copyright 2019-2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_THREAD_POOL_H_\n#define MINDSPORE_SERVING_THREAD_POOL_H_\n\n#include <atomic>\n#include <condition_variable>\n#include <functional>\n#include <future>\n#include <memory>\n#include <queue>\n#include <stdexcept>\n#include <thread>\n#include <utility>\n#include <vector>\n\nnamespace mindspore::serving {\nusing ThreadTask = std::function<void()>;\n\nclass ThreadPool {\n public:\n  explicit ThreadPool(uint32_t size = 4);\n\n  ~ThreadPool();\n\n  template <class Func, class... Args>\n  auto commit(Func &&func, Args &&... args) -> std::future<decltype(func(args...))> {\n    using retType = decltype(func(args...));\n    std::future<retType> fail_future;\n    if (is_stoped_.load()) {\n      return fail_future;\n    }\n\n    auto bindFunc = std::bind(std::forward<Func>(func), std::forward<Args>(args)...);\n    auto task = std::make_shared<std::packaged_task<retType()>>(bindFunc);\n    if (task == nullptr) {\n      return fail_future;\n    }\n    std::future<retType> future = task->get_future();\n    {\n      std::lock_guard<std::mutex> lock{m_lock_};\n      (void)tasks_.emplace([task]() { (*task)(); });\n    }\n    cond_var_.notify_one();\n    return future;\n  }\n\n  static void ThreadFunc(ThreadPool *thread_pool);\n\n private:\n  std::vector<std::thread> pool_;\n  std::queue<ThreadTask> tasks_;\n  std::mutex m_lock_;\n  std::condition_variable cond_var_;\n  std::atomic<bool> is_stoped_;\n  std::atomic<uint32_t> idle_thrd_num_;\n};\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_THREAD_POOL_H_\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/utils.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"common/utils.h\"\n#include <unistd.h>\n#include <string>\n#include <algorithm>\n\nnamespace mindspore::serving::common {\nStatus CheckAddress(const std::string &address, const std::string &server_tag, std::string *ip, uint16_t *port) {\n  Status status;\n  auto position = address.find_last_of(':');\n  if (position == std::string::npos) {\n    status = INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Serving Error: The format of the \" << server_tag << \" address '\" << address << \"' is illegal\";\n    return status;\n  }\n  if (position == 0 || position == address.size() - 1) {\n    status = INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Serving Error: Missing ip or port of the \" << server_tag << \" address '\" << address << \"'\";\n    return status;\n  }\n  if (ip != nullptr) {\n    *ip = address.substr(0, position);\n  }\n  try {\n    auto port_number = std::stoi(address.substr(position + 1, address.size()));\n    constexpr int port_min = 1;\n    constexpr int port_max = 65535;\n    if (port_number < port_min || port_number > port_max) {\n      status = INFER_STATUS_LOG_ERROR(FAILED) << \"Serving Error: The port of the \" << server_tag << \" address '\"\n                                              << address << \"' is out of legal range [1 ~ 65535]\";\n      return status;\n    }\n    if (port != nullptr) {\n      *port = static_cast<uint16_t>(port_number);\n    }\n  } catch (const std::invalid_argument &) {\n    status = INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Serving Error: The type of \" << server_tag << \" address '\" << address << \"' port is not a number\";\n    return status;\n  } catch (const std::out_of_range &) {\n    status = INFER_STATUS_LOG_ERROR(FAILED) << \"Serving Error: The port of the \" << server_tag << \" address '\"\n                                            << address << \"' is out of legal range [1 ~ 65535]\";\n    return status;\n  }\n  return SUCCESS;\n}\n\nbool DirOrFileExist(const std::string &file_path) {\n  int ret = access(file_path.c_str(), 0);\n  return (ret == -1) ? false : true;\n}\n}  // namespace mindspore::serving::common\n"
  },
  {
    "path": "mindspore_serving/ccsrc/common/utils.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_COMMON_UTILS_H\n#define MINDSPORE_SERVING_COMMON_UTILS_H\n\n#include <string>\n#include \"common/status.h\"\n\nnamespace mindspore::serving::common {\n\nstatic inline std::string GetEnv(const std::string &env_var) {\n  const char *value = ::getenv(env_var.c_str());\n  if (value == nullptr) {\n    return std::string();\n  }\n  return std::string(value);\n}\n\nStatus CheckAddress(const std::string &address, const std::string &server_tag, std::string *ip, uint16_t *port);\n\nbool DirOrFileExist(const std::string &file_path);\n\n}  // namespace mindspore::serving::common\n\n#endif  // MINDSPORE_SERVING_COMMON_UTILS_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/dispacther.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"master/dispacther.h\"\n\n#include <utility>\n#include \"common/proto_tensor.h\"\n#include \"master/master_context.h\"\n#include \"master/notify_worker/grpc_notify.h\"\n\nnamespace mindspore::serving {\nDispatcher::Dispatcher() {}\n\nDispatcher::~Dispatcher() { Clear(); }\n\nstd::shared_ptr<ServableEndPoint> Dispatcher::GetWorkerEndpoint(const RequestSpec &request_spec) const {\n  Status status;\n  if (request_spec.version_number > 0) {\n    auto item = find_if(servable_list_.begin(), servable_list_.end(), [&](const std::shared_ptr<ServableEndPoint> &v) {\n      return v->GetServableName() == request_spec.servable_name && v->GetVersionNumber() == request_spec.version_number;\n    });\n    if (item != servable_list_.end()) {\n      return *item;\n    }\n    return nullptr;\n  }\n  uint64_t max_version_number = 0;\n  std::shared_ptr<ServableEndPoint> endpoint = nullptr;\n  for (const auto &item : servable_list_) {\n    if (item->GetServableName() == request_spec.servable_name && max_version_number < item->GetVersionNumber()) {\n      endpoint = item;\n      max_version_number = item->GetVersionNumber();\n    }\n  }\n  return endpoint;\n}\n\nStatus Dispatcher::JudgeInferNum() {\n  auto max_enqueued_requests = MasterContext::Instance()->GetMaxEnqueuedRequests();\n  if (enqueued_requests_ >= max_enqueued_requests) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Serving Error: enqueued requests count exceeds the limit \" << max_enqueued_requests;\n  }\n  return SUCCESS;\n}\n\nvoid Dispatcher::DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                               const PredictOnFinish &on_finish) {\n  MSI_EXCEPTION_IF_NULL(reply);\n  (*reply->mutable_servable_spec()) = request.servable_spec();\n  Status status = JudgeInferNum();\n  if (status != SUCCESS) {\n    GrpcTensorHelper::CreateReplyFromErrorMsg(status, reply);\n    on_finish();\n    return;\n  }\n  try {\n    auto callback = [this, on_finish]() {\n      on_finish();\n      this->enqueued_requests_--;\n    };\n    enqueued_requests_++;\n    status = DispatchAsyncInner(request, reply, callback);\n  } catch (const std::bad_alloc &ex) {\n    MSI_LOG(ERROR) << \"Serving Error: malloc memory failed\";\n  } catch (const std::runtime_error &ex) {\n    MSI_LOG(ERROR) << \"Serving Error: runtime error occurred: \" << ex.what();\n  } catch (const std::exception &ex) {\n    MSI_LOG(ERROR) << \"Serving Error: exception occurred: \" << ex.what();\n  } catch (...) {\n    MSI_LOG(ERROR) << \"Serving Error: exception occurred\";\n  }\n  if (status != SUCCESS) {\n    GrpcTensorHelper::CreateReplyFromErrorMsg(status, reply);\n    on_finish();\n    enqueued_requests_--;\n  }\n}\n\nStatus Dispatcher::DispatchAsyncInner(const proto::PredictRequest &request, proto::PredictReply *reply,\n                                      const PredictOnFinish &on_finish) {\n  MSI_EXCEPTION_IF_NULL(reply);\n  std::shared_lock<std::shared_mutex> lock(servable_shared_lock_);\n  RequestSpec request_spec;\n  GrpcTensorHelper::GetRequestSpec(request, &request_spec);\n  auto endpoint = GetWorkerEndpoint(request_spec);\n  if (endpoint == nullptr) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Request \" << request_spec.Repr() << \", servable is not available\";\n  }\n  auto methods = endpoint->GetMethods();\n  bool find_method = std::any_of(methods.begin(), methods.end(), [&](const ServableMethodInfo &method) {\n    return method.name == request_spec.method_name;\n  });\n  if (!find_method) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Request \" << request_spec.Repr() << \", method is not available\";\n  }\n  return endpoint->DispatchAsync(request, reply, on_finish);\n}\n\nStatus Dispatcher::UnregisterServableCommon(const std::string &worker_address) {\n  std::unique_lock<std::shared_mutex> lock(servable_shared_lock_);\n  std::shared_ptr<WorkerContext> worker_context = nullptr;\n  for (auto &item : worker_list_) {\n    if (item->GetWorkerAddress() == worker_address) {\n      worker_context = item;\n      break;\n    }\n  }\n  if (worker_context == nullptr) {\n    MSI_LOG_ERROR << \"Cannot find worker context of address \" << worker_address;\n    return FAILED;\n  }\n  auto servable_spec = worker_context->GetWorkerSpec().servable_spec;\n  std::shared_ptr<ServableEndPoint> endpoint = nullptr;\n  for (auto &item : servable_list_) {\n    if (item->GetServableName() == servable_spec.servable_name &&\n        item->GetVersionNumber() == servable_spec.version_number) {\n      endpoint = item;\n      break;\n    }\n  }\n  if (endpoint) {\n    endpoint->UnregisterWorker(worker_address);\n  }\n  worker_context->OnExit();\n  MSI_LOG_INFO << \"Unregister worker exit success, worker pid: \" << worker_context->GetWorkerPid()\n               << \", worker address: \" << worker_context->GetWorkerAddress();\n  return SUCCESS;\n}\n\nStatus Dispatcher::RegisterServable(const proto::RegisterRequest &request, proto::RegisterReply *) {\n  WorkerRegSpec worker_spec;\n  GrpcTensorHelper::ConvertProtoWorkerSpec(request, &worker_spec);\n  auto create_notify_worker = [](const WorkerRegSpec &worker_spec) {\n    std::shared_ptr<BaseNotifyWorker> notify_worker = std::make_shared<GrpcNotifyWorker>(worker_spec.worker_address);\n    return notify_worker;\n  };\n  return RegisterServableCommon(worker_spec, create_notify_worker);\n}\n\nStatus Dispatcher::NotifyWorkerExit(const proto::ExitRequest &request, proto::ExitReply *) {\n  return UnregisterServableCommon(request.address());\n}\n\nvoid Dispatcher::UnregisterWorkerContext(WorkerContext *worker_context) {\n  MSI_EXCEPTION_IF_NULL(worker_context);\n  std::unique_lock<std::shared_mutex> lock(servable_shared_lock_);\n  auto worker_spec = worker_context->GetWorkerSpec();\n  auto &servable_spec = worker_spec.servable_spec;\n  std::shared_ptr<ServableEndPoint> endpoint = nullptr;\n  for (auto &item : servable_list_) {\n    if (item->GetServableName() == servable_spec.servable_name &&\n        item->GetVersionNumber() == servable_spec.version_number) {\n      endpoint = item;\n      break;\n    }\n  }\n  if (endpoint) {\n    endpoint->UnregisterWorker(worker_context->GetWorkerAddress());\n  }\n}\n\nStatus Dispatcher::NotifyWorkerNotAlive(WorkerContext *worker_context) {\n  MSI_EXCEPTION_IF_NULL(worker_context);\n  UnregisterWorkerContext(worker_context);\n  worker_context->OnNotAlive();\n  return SUCCESS;\n}\n\nStatus Dispatcher::NotifyWorkerNotAvailable(WorkerContext *worker_context) {\n  MSI_EXCEPTION_IF_NULL(worker_context);\n  UnregisterWorkerContext(worker_context);\n  worker_context->OnNotAvailable();\n  return SUCCESS;\n}\n\nvoid Dispatcher::GetModelInfo(const proto::GetModelInfoRequest *request, proto::GetModelInfoReply *reply) {\n  auto &servable_name = request->servable_name();\n  auto version_number = request->version_number();\n  for (auto &worker : worker_list_) {\n    auto worker_spec = worker->GetWorkerSpec();\n    if (worker_spec.servable_spec.servable_name == servable_name &&\n        worker_spec.servable_spec.version_number == version_number && worker_spec.servable_spec.own_device) {\n      reply->set_servable_name(servable_name);\n      reply->set_version_number(version_number);\n      GrpcTensorHelper::ConvertModelInfos(worker_spec.servable_spec.models, reply->mutable_model_infos());\n      return;\n    }\n  }\n  auto status = INFER_STATUS_LOG_ERROR(FAILED)\n                << \"Servable '\" << servable_name << \"' has models declared by declare_model, but parameter 'device_ids'\"\n                << \" of ServableStartConfig is not set in Serving startup script when the device target is not CPU\";\n  auto error_msg = reply->mutable_error_msg();\n  error_msg->set_error_code(FAILED);\n  error_msg->set_error_msg(status.StatusMessage());\n}\n\nbool Dispatcher::OnlyModelStage(const std::string &servable_name) {\n  for (auto &worker : worker_list_) {\n    auto worker_spec = worker->GetWorkerSpec();\n    if (worker_spec.servable_spec.servable_name != servable_name) {\n      continue;\n    }\n    for (auto &method : worker_spec.servable_spec.methods) {\n      // cppcheck-suppress useStlAlgorithm\n      if (!method.only_model_stage) {\n        return false;\n      }\n    }\n    return true;\n  }\n  return false;\n}\n\nvoid Dispatcher::Clear() {\n  std::unique_lock<std::shared_mutex> lock(servable_shared_lock_);\n\n  for (auto &endpoint : servable_list_) {\n    endpoint->Clear();\n  }\n  for (auto &worker : worker_list_) {\n    worker->Clear();\n  }\n  servable_list_.clear();\n  worker_list_.clear();\n}\n\nStatus Dispatcher::RegisterServableCommon(const WorkerRegSpec &worker_spec, CreateNotifyWorkerFunc func) {\n  MSI_EXCEPTION_IF_NULL(func);\n  std::unique_lock<std::shared_mutex> lock(servable_shared_lock_);\n  std::shared_ptr<WorkerContext> worker_context = nullptr;\n  for (auto &item : worker_list_) {\n    if (item->GetWorkerPid() == worker_spec.worker_pid) {\n      worker_context = item;\n      break;\n    }\n  }\n  bool ready = true;\n  if (worker_context == nullptr) {\n    worker_context = std::make_shared<WorkerContext>();\n    worker_context->UpdateWorkerPid(worker_spec.worker_pid);\n    worker_list_.push_back(worker_context);\n    ready = false;\n  }\n  worker_context->OnWorkerRegRequest(worker_spec, func(worker_spec));\n  if (ready) {\n    auto status = RegisterWorkerContext(worker_context);\n    if (status != SUCCESS) {\n      MSI_LOG_ERROR << \"Registered worker failed\";\n      worker_context->OnStartError(\"Registered worker failed\");\n    }\n  }\n  return SUCCESS;\n}\n\nStatus Dispatcher::NotifyWorkerFailed(const proto::NotifyFailedRequest *request, proto::NotifyFailedReply *reply) {\n  auto worker_pid = request->worker_pid();\n  auto error_msg = request->error_msg();\n  MSI_LOG_ERROR << \"Worker notify failed, worker pid: \" << worker_pid << \", error reported: <\" << error_msg << \">\";\n  std::unique_lock<std::shared_mutex> lock(servable_shared_lock_);\n  std::shared_ptr<WorkerContext> worker_context = nullptr;\n  for (auto &item : worker_list_) {\n    if (item->GetWorkerPid() == worker_pid) {\n      worker_context = item;\n      break;\n    }\n  }\n  if (worker_context == nullptr) {\n    worker_context = std::make_shared<WorkerContext>();\n    worker_context->UpdateWorkerPid(worker_pid);\n    worker_list_.push_back(worker_context);\n  }\n  worker_context->OnStartError(error_msg);\n  return SUCCESS;\n}\n\nstd::shared_ptr<WorkerContext> Dispatcher::InitWorkerContext(const ServableReprInfo &repr, uint64_t worker_pid) {\n  std::unique_lock<std::shared_mutex> lock(servable_shared_lock_);\n  std::shared_ptr<WorkerContext> worker_context = nullptr;\n  for (auto &item : worker_list_) {\n    if (item->GetWorkerPid() == worker_pid) {\n      worker_context = item;\n      break;\n    }\n  }\n  bool ready = true;\n  if (worker_context == nullptr) {\n    worker_context = std::make_shared<WorkerContext>();\n    worker_context->UpdateWorkerPid(worker_pid);\n    worker_list_.push_back(worker_context);\n    ready = false;\n  }\n  worker_context->InitServableReprInfo(repr);\n  if (ready) {\n    auto status = RegisterWorkerContext(worker_context);\n    if (status != SUCCESS) {\n      MSI_LOG_ERROR << \"Registered worker failed\";\n      worker_context->OnStartError(\"Registered worker failed\");\n    }\n  }\n  return worker_context;\n}\n\nStatus Dispatcher::RegisterWorkerContext(std::shared_ptr<WorkerContext> worker_context) {\n  auto worker_spec = worker_context->GetWorkerSpec();\n  auto &servable_spec = worker_spec.servable_spec;\n  if (servable_spec.servable_name.empty()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Register failed, servable name cannot be empty\";\n  }\n  if (servable_spec.version_number <= 0) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Register failed, servable name \" << servable_spec.servable_name\n                                          << \" version number \" << servable_spec.version_number << \" cannot be 0\";\n  }\n  std::shared_ptr<ServableEndPoint> endpoint = nullptr;\n  for (auto &item : servable_list_) {\n    if (item->GetServableName() == servable_spec.servable_name &&\n        item->GetVersionNumber() == servable_spec.version_number) {\n      endpoint = item;\n      break;\n    }\n  }\n  if (!endpoint) {\n    endpoint = std::make_shared<ServableEndPoint>(worker_context->GetServableReprInfo());\n    servable_list_.push_back(endpoint);\n  }\n  endpoint->RegisterWorker(servable_spec, worker_context);\n  worker_context->OnReady();\n  return SUCCESS;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/dispacther.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_DISPACTHER_H\n#define MINDSPORE_SERVING_MASTER_DISPACTHER_H\n\n#include <memory>\n#include <string>\n#include <unordered_map>\n#include <vector>\n#include <shared_mutex>\n#include \"proto/ms_worker.grpc.pb.h\"\n#include \"common/serving_common.h\"\n#include \"common/instance.h\"\n#include \"common/servable.h\"\n#include \"master/notify_worker/base_notify.h\"\n#include \"common/grpc_client.h\"\n#include \"master/worker_context.h\"\n#include \"master/servable_endpoint.h\"\n\nnamespace mindspore::serving {\nclass Dispatcher {\n public:\n  Dispatcher();\n  ~Dispatcher();\n  void DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                     const PredictOnFinish &on_finish);\n\n  Status RegisterServable(const proto::RegisterRequest &request, proto::RegisterReply *reply);\n  Status NotifyWorkerExit(const proto::ExitRequest &request, proto::ExitReply *reply);\n  Status NotifyWorkerFailed(const proto::NotifyFailedRequest *request, proto::NotifyFailedReply *reply);\n  Status NotifyWorkerNotAlive(WorkerContext *worker_context);\n  Status NotifyWorkerNotAvailable(WorkerContext *worker_context);\n  void GetModelInfo(const proto::GetModelInfoRequest *request, proto::GetModelInfoReply *reply);\n  void Clear();\n\n  std::shared_ptr<WorkerContext> InitWorkerContext(const ServableReprInfo &repr, uint64_t worker_pid);\n  bool OnlyModelStage(const std::string &servable_name);\n\n private:\n  std::vector<std::shared_ptr<ServableEndPoint>> servable_list_;\n  std::vector<std::shared_ptr<WorkerContext>> worker_list_;\n\n  std::shared_mutex servable_shared_lock_;\n  std::atomic_uint32_t enqueued_requests_ = 0;\n\n  Status JudgeInferNum();\n  std::shared_ptr<ServableEndPoint> GetWorkerEndpoint(const RequestSpec &request_spec) const;\n\n  using CreateNotifyWorkerFunc = std::function<std::shared_ptr<BaseNotifyWorker>(const WorkerRegSpec &worker_spec)>;\n\n  Status RegisterServableCommon(const WorkerRegSpec &worker_spec, CreateNotifyWorkerFunc func);\n  Status UnregisterServableCommon(const std::string &worker_address);\n  Status DispatchAsyncInner(const proto::PredictRequest &request, proto::PredictReply *reply,\n                            const PredictOnFinish &on_finish);\n  Status RegisterWorkerContext(std::shared_ptr<WorkerContext> worker_context);\n\n  void UnregisterWorkerContext(WorkerContext *worker_context);\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_MASTER_DISPACTHER_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/grpc/grpc_process.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"master/grpc/grpc_process.h\"\n#include <string>\n#include \"master/dispacther.h\"\n\nnamespace mindspore {\nnamespace serving {\nnamespace {\nstd::string GetProtorWorkerSpecRepr(const proto::WorkerRegSpec &worker_spec) {\n  std::stringstream str;\n  auto &servable_spec = worker_spec.servable_spec();\n  str << \"{name:\" << servable_spec.name() << \", version:\" << servable_spec.version_number() << \", method:[\";\n  for (int k = 0; k < servable_spec.methods_size(); k++) {\n    str << servable_spec.methods(k).name();\n    if (k + 1 < servable_spec.methods_size()) {\n      str << \",\";\n    }\n  }\n  str << \"]}\";\n  return str.str();\n}\n}  // namespace\n\nvoid MSServiceImpl::PredictAsync(const proto::PredictRequest *request, proto::PredictReply *reply,\n                                 PredictOnFinish on_finish) {\n  dispatcher_->DispatchAsync(*request, reply, on_finish);\n}\n\ngrpc::Status MSMasterImpl::Register(const proto::RegisterRequest *request, proto::RegisterReply *reply) {\n  MSI_EXCEPTION_IF_NULL(request);\n  MSI_EXCEPTION_IF_NULL(reply);\n  auto worker_sig = [request]() {\n    std::stringstream str;\n    str << \"worker address: \" << request->worker_spec().address() << \", servable: \";\n    str << GetProtorWorkerSpecRepr(request->worker_spec());\n    return str.str();\n  };\n  Status status(FAILED);\n  status = dispatcher_->RegisterServable(*request, reply);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Register servable failed, \" << worker_sig();\n    return grpc::Status::OK;\n  }\n  MSI_LOG(INFO) << \"Register success: \" << worker_sig();\n  return grpc::Status::OK;\n}\n\ngrpc::Status MSMasterImpl::Exit(const proto::ExitRequest *request, proto::ExitReply *reply) {\n  MSI_EXCEPTION_IF_NULL(request);\n  MSI_EXCEPTION_IF_NULL(reply);\n  auto worker_sig = [request]() {\n    std::stringstream str;\n    str << \"worker address: \" << request->address();\n    return str.str();\n  };\n\n  MSI_LOG(INFO) << \"Worker Exit, \" << worker_sig();\n  Status status = dispatcher_->NotifyWorkerExit(*request, reply);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"UnRegister servable failed, \" << worker_sig();\n    return grpc::Status::OK;\n  }\n  return grpc::Status::OK;\n}\n\ngrpc::Status MSMasterImpl::NotifyFailed(const proto::NotifyFailedRequest *request, proto::NotifyFailedReply *reply) {\n  dispatcher_->NotifyWorkerFailed(request, reply);\n  return grpc::Status::OK;\n}\n\ngrpc::Status MSMasterImpl::GetModelInfo(const proto::GetModelInfoRequest *request, proto::GetModelInfoReply *reply) {\n  dispatcher_->GetModelInfo(request, reply);\n  return grpc::Status::OK;\n}\n\nvoid MSMasterImpl::PredictAsync(const proto::PredictRequest *request, proto::PredictReply *reply,\n                                const PredictOnFinish &on_finish) {\n  dispatcher_->DispatchAsync(*request, reply, on_finish);\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/grpc/grpc_process.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_GRPC_PROCESS_H\n#define MINDSPORE_SERVING_MASTER_GRPC_PROCESS_H\n\n#include <grpcpp/grpcpp.h>\n#include <grpcpp/health_check_service_interface.h>\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\n#include <memory>\n#include <string>\n#include \"common/serving_common.h\"\n#include \"common/heart_beat.h\"\n#include \"proto/ms_service.pb.h\"\n#include \"proto/ms_service.grpc.pb.h\"\n#include \"proto/ms_master.pb.h\"\n#include \"proto/ms_master.grpc.pb.h\"\n#include \"proto/ms_worker.pb.h\"\n#include \"proto/ms_worker.grpc.pb.h\"\n#include \"master/dispacther.h\"\n\nnamespace mindspore {\nnamespace serving {\n// Service Implement\nclass MSServiceImpl {\n public:\n  explicit MSServiceImpl(std::shared_ptr<Dispatcher> dispatcher) : dispatcher_(dispatcher) {}\n  ~MSServiceImpl() = default;\n\n  void PredictAsync(const proto::PredictRequest *request, proto::PredictReply *reply, PredictOnFinish on_finish);\n\n private:\n  std::shared_ptr<Dispatcher> dispatcher_;\n};\n\n// Service Implement\nclass MSMasterImpl {\n public:\n  explicit MSMasterImpl(std::shared_ptr<Dispatcher> dispatcher) : dispatcher_(dispatcher) {}\n  ~MSMasterImpl() = default;\n\n  grpc::Status Register(const proto::RegisterRequest *request, proto::RegisterReply *reply);\n  grpc::Status Exit(const proto::ExitRequest *request, proto::ExitReply *reply);\n  grpc::Status NotifyFailed(const proto::NotifyFailedRequest *request, proto::NotifyFailedReply *reply);\n  grpc::Status GetModelInfo(const proto::GetModelInfoRequest *request, proto::GetModelInfoReply *reply);\n  void PredictAsync(const proto::PredictRequest *request, proto::PredictReply *reply, const PredictOnFinish &on_finish);\n\n private:\n  std::shared_ptr<Dispatcher> dispatcher_;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_MASTER_GRPC_PROCESS_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/grpc/grpc_server.cc",
    "content": "/**\r\n * Copyright 2020 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"master/grpc/grpc_server.h\"\r\n#include <string>\r\n#include <memory>\r\n#include \"common/grpc_async_server.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {}  // namespace serving\r\n}  // namespace mindspore\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/grpc/grpc_server.h",
    "content": "/**\r\n * Copyright 2020 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_MASTER_GRPC_SERVER_H\r\n#define MINDSPORE_SERVING_MASTER_GRPC_SERVER_H\r\n\r\n#include <string>\r\n#include <vector>\r\n#include <memory>\r\n#include \"common/serving_common.h\"\r\n#include \"proto/ms_worker.pb.h\"\r\n#include \"proto/ms_worker.grpc.pb.h\"\r\n#include \"common/grpc_async_server.h\"\r\n#include \"master/grpc/grpc_process.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\ntemplate <class Derived>\r\nclass ServiceGrpcContext : public GrpcAsyncServiceContext<MSServiceImpl, proto::MSService::AsyncService, Derived> {\r\n public:\r\n  ServiceGrpcContext(MSServiceImpl *service_impl, proto::MSService::AsyncService *async_service,\r\n                     grpc::ServerCompletionQueue *cq)\r\n      : GrpcAsyncServiceContext<MSServiceImpl, proto::MSService::AsyncService, Derived>(service_impl, async_service,\r\n                                                                                        cq) {}\r\n\r\n  virtual void StartEnqueueRequest() = 0;\r\n  virtual void HandleRequest() = 0;\r\n};\r\n\r\nclass ServicePredictContext : public ServiceGrpcContext<ServicePredictContext> {\r\n public:\r\n  ServicePredictContext(MSServiceImpl *service_impl, proto::MSService::AsyncService *async_service,\r\n                        grpc::ServerCompletionQueue *cq)\r\n      : ServiceGrpcContext<ServicePredictContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~ServicePredictContext() = default;\r\n\r\n  void StartEnqueueRequest() override { async_service_->RequestPredict(&ctx_, &request_, &responder_, cq_, cq_, this); }\r\n\r\n  void HandleRequest() override {\r\n    MSI_TIME_STAMP_START(RequestHandle)\r\n    auto instance_size = request_.instances_size();\r\n    PredictOnFinish on_finish = [this, time_start_RequestHandle, instance_size]() {\r\n      responder_.Finish(response_, grpc::Status::OK, this);\r\n      MSI_TIME_STAMP_END_EXTRA(RequestHandle, \"Request count \" + std::to_string(instance_size))\r\n    };\r\n    service_impl_->PredictAsync(&request_, &response_, on_finish);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::PredictReply> responder_;\r\n  proto::PredictRequest request_;\r\n  proto::PredictReply response_;\r\n};\r\n\r\nclass ServiceGrpcServer : public GrpcAsyncServer<proto::MSService::AsyncService> {\r\n public:\r\n  explicit ServiceGrpcServer(std::shared_ptr<Dispatcher> dispatcher)\r\n      : GrpcAsyncServer<proto::MSService::AsyncService>(), service_impl_(MSServiceImpl(dispatcher)) {}\r\n  ~ServiceGrpcServer() {}\r\n\r\n  void EnqueueRequests() override { ServicePredictContext::EnqueueRequest(&service_impl_, &svc_, cq_.get()); }\r\n\r\n protected:\r\n  MSServiceImpl service_impl_;\r\n};\r\n\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n\r\n#endif  // MINDSPORE_SERVING_MASTER_GRPC_SERVER_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/grpc/master_server.h",
    "content": "/**\r\n * Copyright 2020 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_MASTER_MASTER_SERVER_H\r\n#define MINDSPORE_SERVING_MASTER_MASTER_SERVER_H\r\n\r\n#include <string>\r\n#include <vector>\r\n#include <memory>\r\n#include \"common/serving_common.h\"\r\n#include \"proto/ms_worker.pb.h\"\r\n#include \"proto/ms_worker.grpc.pb.h\"\r\n#include \"common/grpc_async_server.h\"\r\n#include \"master/grpc/grpc_process.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\ntemplate <class Derived>\r\nclass MasterGrpcContext : public GrpcAsyncServiceContext<MSMasterImpl, proto::MSMaster::AsyncService, Derived> {\r\n public:\r\n  MasterGrpcContext(MSMasterImpl *service_impl, proto::MSMaster::AsyncService *async_service,\r\n                    grpc::ServerCompletionQueue *cq)\r\n      : GrpcAsyncServiceContext<MSMasterImpl, proto::MSMaster::AsyncService, Derived>(service_impl, async_service, cq) {\r\n  }\r\n\r\n  virtual void StartEnqueueRequest() = 0;\r\n  virtual void HandleRequest() = 0;\r\n};\r\n\r\nclass MasterRegisterContext : public MasterGrpcContext<MasterRegisterContext> {\r\n public:\r\n  MasterRegisterContext(MSMasterImpl *service_impl, proto::MSMaster::AsyncService *async_service,\r\n                        grpc::ServerCompletionQueue *cq)\r\n      : MasterGrpcContext<MasterRegisterContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~MasterRegisterContext() = default;\r\n\r\n  void StartEnqueueRequest() override {\r\n    async_service_->RequestRegister(&ctx_, &request_, &responder_, cq_, cq_, this);\r\n  }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->Register(&request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::RegisterReply> responder_;\r\n  proto::RegisterRequest request_;\r\n  proto::RegisterReply response_;\r\n};\r\n\r\nclass MasterExitContext : public MasterGrpcContext<MasterExitContext> {\r\n public:\r\n  MasterExitContext(MSMasterImpl *service_impl, proto::MSMaster::AsyncService *async_service,\r\n                    grpc::ServerCompletionQueue *cq)\r\n      : MasterGrpcContext<MasterExitContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~MasterExitContext() = default;\r\n\r\n  void StartEnqueueRequest() override { async_service_->RequestExit(&ctx_, &request_, &responder_, cq_, cq_, this); }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->Exit(&request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::ExitReply> responder_;\r\n  proto::ExitRequest request_;\r\n  proto::ExitReply response_;\r\n};\r\n\r\nclass MasterNotifyFailedContext : public MasterGrpcContext<MasterNotifyFailedContext> {\r\n public:\r\n  MasterNotifyFailedContext(MSMasterImpl *service_impl, proto::MSMaster::AsyncService *async_service,\r\n                            grpc::ServerCompletionQueue *cq)\r\n      : MasterGrpcContext<MasterNotifyFailedContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~MasterNotifyFailedContext() = default;\r\n\r\n  void StartEnqueueRequest() override {\r\n    async_service_->RequestNotifyFailed(&ctx_, &request_, &responder_, cq_, cq_, this);\r\n  }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->NotifyFailed(&request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::NotifyFailedReply> responder_;\r\n  proto::NotifyFailedRequest request_;\r\n  proto::NotifyFailedReply response_;\r\n};\r\n\r\nclass MasterGetModelInfoContext : public MasterGrpcContext<MasterGetModelInfoContext> {\r\n public:\r\n  MasterGetModelInfoContext(MSMasterImpl *service_impl, proto::MSMaster::AsyncService *async_service,\r\n                            grpc::ServerCompletionQueue *cq)\r\n      : MasterGrpcContext<MasterGetModelInfoContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~MasterGetModelInfoContext() = default;\r\n\r\n  void StartEnqueueRequest() override {\r\n    async_service_->RequestGetModelInfo(&ctx_, &request_, &responder_, cq_, cq_, this);\r\n  }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->GetModelInfo(&request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::GetModelInfoReply> responder_;\r\n  proto::GetModelInfoRequest request_;\r\n  proto::GetModelInfoReply response_;\r\n};\r\n\r\nclass MasterPredictContext : public MasterGrpcContext<MasterPredictContext> {\r\n public:\r\n  MasterPredictContext(MSMasterImpl *service_impl, proto::MSMaster::AsyncService *async_service,\r\n                       grpc::ServerCompletionQueue *cq)\r\n      : MasterGrpcContext<MasterPredictContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~MasterPredictContext() = default;\r\n\r\n  void StartEnqueueRequest() override {\r\n    async_service_->RequestCallModel(&ctx_, &request_, &responder_, cq_, cq_, this);\r\n  }\r\n\r\n  void HandleRequest() override {\r\n    PredictOnFinish on_finish = [this]() { responder_.Finish(response_, grpc::Status::OK, this); };\r\n    service_impl_->PredictAsync(&request_, &response_, on_finish);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::PredictReply> responder_;\r\n  proto::PredictRequest request_;\r\n  proto::PredictReply response_;\r\n};\r\n\r\nclass MasterGrpcServer : public GrpcAsyncServer<proto::MSMaster::AsyncService> {\r\n public:\r\n  explicit MasterGrpcServer(std::shared_ptr<Dispatcher> dispatcher)\r\n      : GrpcAsyncServer<proto::MSMaster::AsyncService>(), service_impl_(MSMasterImpl(dispatcher)) {}\r\n  ~MasterGrpcServer() {}\r\n\r\n  void EnqueueRequests() override {\r\n    MasterRegisterContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n    MasterExitContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n    MasterNotifyFailedContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n    MasterGetModelInfoContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n    MasterPredictContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n  }\r\n\r\n protected:\r\n  MSMasterImpl service_impl_;\r\n};\r\n\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n\r\n#endif  // MINDSPORE_SERVING_MASTER_MASTER_SERVER_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/master_context.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"master/master_context.h\"\n\nnamespace mindspore::serving {\nstd::shared_ptr<MasterContext> MasterContext::Instance() {\n  static std::shared_ptr<MasterContext> instance = nullptr;\n  if (instance == nullptr) {\n    instance = std::make_shared<MasterContext>();\n  }\n  return instance;\n}\n\nvoid MasterContext::SetMaxEnqueuedRequests(uint32_t max_enqueued_requests) {\n  max_enqueued_requests_ = max_enqueued_requests;\n}\n\nuint32_t MasterContext::GetMaxEnqueuedRequests() const { return max_enqueued_requests_; }\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/master_context.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_CONTEXT_H\n#define MINDSPORE_SERVING_MASTER_CONTEXT_H\n\n#include <string>\n#include <memory>\n#include <vector>\n#include \"common/serving_common.h\"\n\nnamespace mindspore::serving {\nclass MS_API MasterContext {\n public:\n  static std::shared_ptr<MasterContext> Instance();\n\n  void SetMaxEnqueuedRequests(uint32_t max_enqueued_requests);\n  uint32_t GetMaxEnqueuedRequests() const;\n\n private:\n  uint32_t max_enqueued_requests_ = 10000;  // default 10000\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_MASTER_CONTEXT_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/model_thread.cc",
    "content": "/**\r\n * Copyright 2021 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"master/model_thread.h\"\r\n#include \"common/proto_tensor.h\"\r\n\r\nnamespace mindspore::serving {\r\nModelThread::ModelThread(const std::string &servable_name, const std::string &method_name, uint64_t version_number,\r\n                         uint64_t batch_size, ServableMethodInfo method_info) {\r\n  spec_.servable_name = servable_name;\r\n  spec_.method_name = method_name;\r\n  spec_.version_number = version_number;\r\n  method_info_ = method_info;\r\n  batch_size_ = batch_size;\r\n}\r\n\r\nvoid ModelThread::Clear() {\r\n  std::unique_lock<std::mutex> lock(lock_);\r\n  InnerClear();\r\n}\r\n\r\nvoid ModelThread::InnerClear() {\r\n  for (auto &job_item : job_) {\r\n    auto reply = job_item.second.reply;\r\n    bool has_reply = false;\r\n    bool has_error = false;\r\n    proto::ErrorMsg detect_error;\r\n    proto::ErrorMsg exit_error;\r\n    RequestSpec request_spec;\r\n    GrpcTensorHelper::GetRequestSpec(*job_item.second.request, &request_spec);\r\n    auto status = INFER_STATUS(INVALID_INPUTS) << \"Request \" << request_spec.Repr() << \", servable is not available\";\r\n    exit_error.set_error_code(status.StatusCode());\r\n    exit_error.set_error_msg(status.StatusMessage());\r\n    for (auto &task_item : job_item.second.task) {\r\n      auto instance = reply->add_instances();\r\n      auto error = reply->add_error_msg();\r\n      if (task_item.error.error_code() != 0) {\r\n        *error = task_item.error;\r\n        if (!has_error) {\r\n          has_error = true;\r\n          detect_error = task_item.error;\r\n        }\r\n      } else if (task_item.output != nullptr) {\r\n        *instance = *task_item.output;\r\n        has_reply = true;\r\n      } else {\r\n        *error = exit_error;\r\n      }\r\n    }\r\n    if (!has_error && !has_reply) {\r\n      job_item.second.reply->clear_instances();\r\n      job_item.second.reply->clear_error_msg();\r\n      auto error_msg = job_item.second.reply->add_error_msg();\r\n      *error_msg = exit_error;\r\n    } else if (!has_reply) {\r\n      job_item.second.reply->clear_instances();\r\n      job_item.second.reply->clear_error_msg();\r\n      auto error_msg = job_item.second.reply->add_error_msg();\r\n      *error_msg = detect_error;\r\n    }\r\n    job_item.second.callback();\r\n  }\r\n  job_.clear();\r\n  pid_process_.clear();\r\n  task_wait_queue_ = std::queue<std::pair<uint64_t, uint64_t>>();\r\n  worker_wait_map_.clear();\r\n}\r\n\r\nModelThread::~ModelThread() { Clear(); }\r\n\r\nStatus ModelThread::AddWorker(uint64_t pid, const std::shared_ptr<WorkerContext> &notify) {\r\n  {\r\n    std::unique_lock<std::mutex> lock(lock_);\r\n    auto it = pid_process_.find(pid);\r\n    if (it != pid_process_.end()) {\r\n      MSI_LOG(INFO) << \"pid is existed: \" << pid;\r\n      return FAILED;\r\n    }\r\n    pid_process_.insert(std::make_pair(pid, notify));\r\n    if (single_batch_dispatch_) {\r\n      worker_wait_map_[pid] = static_cast<int64_t>(round_ * batch_size_);\r\n    } else {\r\n      worker_wait_map_[pid] = static_cast<int64_t>(round_);\r\n    }\r\n  }\r\n  SendTasks();\r\n  return SUCCESS;\r\n}\r\n\r\nStatus ModelThread::DelWorker(uint64_t pid) {\r\n  {\r\n    std::unique_lock<std::mutex> lock(lock_);\r\n    auto it = pid_process_.find(pid);\r\n    if (it == pid_process_.end()) {\r\n      MSI_LOG(INFO) << \"pid not existed: \" << pid;\r\n      return FAILED;\r\n    }\r\n    (void)pid_process_.erase(it);\r\n    auto worker_it = worker_wait_map_.find(pid);\r\n    if (worker_it == worker_wait_map_.end()) {\r\n      MSI_LOG(INFO) << \"pid not existed in worker wait map: \" << pid;\r\n      return FAILED;\r\n    }\r\n    (void)worker_wait_map_.erase(worker_it);\r\n    for (auto &job_item : job_) {\r\n      auto job_id = job_item.first;\r\n      auto &task_list = job_item.second.task;\r\n      for (size_t i = 0; i < task_list.size(); ++i) {\r\n        if (task_list[i].pid == pid) {\r\n          auto task_id = i;\r\n          task_wait_queue_.push(std::make_pair(job_id, task_id));\r\n        }\r\n      }\r\n    }\r\n    if (pid_process_.empty()) {\r\n      InnerClear();\r\n    }\r\n  }\r\n  SendTasks();\r\n  return SUCCESS;\r\n}\r\n\r\nStatus ModelThread::FindProcessQueue(uint64_t *pid) {\r\n  int64_t max_free_slot = 0;\r\n  uint64_t cur_pid = 0;\r\n  for (auto &item : worker_wait_map_) {\r\n    auto slot = item.second;\r\n    if (slot <= 0 || slot < max_free_slot) {\r\n      continue;\r\n    }\r\n    if (slot > max_free_slot || (cur_pid <= last_worker_pid_ && item.first > last_worker_pid_)) {\r\n      max_free_slot = slot;\r\n      cur_pid = item.first;\r\n    }\r\n  }\r\n  if (cur_pid != 0) {\r\n    worker_wait_map_[cur_pid]--;\r\n    last_worker_pid_ = cur_pid;\r\n    *pid = cur_pid;\r\n    return SUCCESS;\r\n  }\r\n  return FAILED;\r\n}\r\n\r\nStatus ModelThread::PushTasks(const proto::PredictRequest &request, proto::PredictReply *reply,\r\n                              const PredictOnFinish &callback) {\r\n  auto status = GrpcTensorHelper::CheckRequestInstances(request, method_info_.input_names);\r\n  if (status != SUCCESS) {\r\n    MSI_LOG_ERROR << \"Check request failed\";\r\n    return status;\r\n  }\r\n  std::unique_lock<std::mutex> lock(lock_);\r\n  if (pid_process_.empty()) {\r\n    RequestSpec request_spec;\r\n    GrpcTensorHelper::GetRequestSpec(request, &request_spec);\r\n    return INFER_STATUS_LOG_ERROR(SERVABLE_UNAVAILABLE)\r\n           << \"Request \" << request_spec.Repr() << \", servable is not available\";\r\n  }\r\n  auto it = job_.find(job_id_);\r\n  if (it != job_.end()) {\r\n    MSI_LOG(ERROR) << \"job_id has existed: \" << job_id_;\r\n    return FAILED;\r\n  }\r\n  int instance_size = request.instances_size();\r\n  Job job;\r\n  job.wait_task_num = instance_size;\r\n  job.callback = callback;\r\n  job.request = &request;\r\n  job.reply = reply;\r\n  job.task.resize(instance_size);\r\n  for (int i = 0; i < instance_size; i++) {\r\n    Task &task = job.task[i];\r\n    task.input = &request.instances(i);\r\n    task.pid = 0;\r\n    task_wait_queue_.push(std::make_pair(job_id_, i));\r\n  }\r\n  job_.insert(std::make_pair(job_id_, job));\r\n  job_id_++;\r\n  return SUCCESS;\r\n}\r\n\r\nStatus ModelThread::DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\r\n                                  const PredictOnFinish &callback) {\r\n  auto status = PushTasks(request, reply, callback);\r\n  if (status != SUCCESS) {\r\n    MSI_LOG_ERROR << \"Push tasks into queue failed\";\r\n    return status;\r\n  }\r\n  SendTasks();\r\n  return SUCCESS;\r\n}\r\n\r\nStatus ModelThread::Combine(const std::vector<std::pair<uint64_t, uint64_t>> &ids, uint64_t pid,\r\n                            proto::PredictRequest *msg) {\r\n  std::vector<const proto::Instance *> inputs;\r\n  // ids->inputs\r\n  for (auto it = begin(ids); it != end(ids); it++) {\r\n    uint64_t job_id = it->first;\r\n    uint64_t task_id = it->second;\r\n    job_[job_id].task[task_id].pid = pid;\r\n    inputs.push_back(job_[job_id].task[task_id].input);\r\n  }\r\n  return GrpcTensorHelper::CreatePredictRequestFromInstances(spec_, inputs, msg);\r\n}\r\n\r\nvoid ModelThread::SendTasks() {\r\n  while (true) {\r\n    std::shared_ptr<PredictContext> context;\r\n    std::shared_ptr<WorkerContext> worker;\r\n    {  // pop tasks\r\n      std::unique_lock<std::mutex> lock(lock_);\r\n      if (task_wait_queue_.empty()) {\r\n        return;\r\n      }\r\n      uint64_t pid;\r\n      auto status = FindProcessQueue(&pid);\r\n      if (status != SUCCESS) {\r\n        return;\r\n      }\r\n      context = std::make_shared<PredictContext>();\r\n      std::vector<std::pair<uint64_t, uint64_t>> &inputs = context->inputs;\r\n      if (single_batch_dispatch_) {\r\n        inputs.push_back(task_wait_queue_.front());\r\n        task_wait_queue_.pop();\r\n      } else {\r\n        for (uint64_t i = 0; i < batch_size_; i++) {\r\n          if (task_wait_queue_.empty()) {\r\n            break;\r\n          }\r\n          inputs.push_back(task_wait_queue_.front());\r\n          task_wait_queue_.pop();\r\n        }\r\n      }\r\n      context->pid = pid;\r\n      Combine(inputs, pid, &context->request);  // inputs string->InstanceData,task pid status\r\n      worker = pid_process_[pid];\r\n    }\r\n    // send request\r\n    PredictOnFinish callback = [context, worker, this]() {\r\n      bool worker_not_available = false;\r\n      for (auto &error : context->reply.error_msg()) {\r\n        if (error.error_code() == WORKER_UNAVAILABLE) {\r\n          worker_not_available = true;\r\n          break;\r\n        }\r\n      }\r\n      if (worker_not_available) {\r\n        worker->NotifyNotAvailable();\r\n      } else {\r\n        Commit(context);\r\n      }\r\n    };\r\n    auto status = worker->DispatchAsync(context->request, &context->reply, callback);\r\n    if (status != SUCCESS) {\r\n      auto error_msg = context->reply.add_error_msg();\r\n      error_msg->set_error_code(WORKER_UNAVAILABLE);\r\n      error_msg->set_error_msg(status.StatusMessage());\r\n      worker->NotifyNotAvailable();\r\n    }\r\n  }\r\n}\r\n\r\nvoid ModelThread::OnTasksFinished(const std::shared_ptr<PredictContext> &context) {\r\n  std::unique_lock<std::mutex> lock(lock_);\r\n  const auto pid = context->pid;\r\n  const auto &inputs = context->inputs;\r\n  if (pid_process_.find(pid) != pid_process_.end()) {\r\n    worker_wait_map_[pid]++;\r\n  }\r\n  std::vector<proto::ErrorMsg> error;\r\n  std::vector<const proto::Instance *> output;\r\n  auto status = GrpcTensorHelper::CreateInstanceFromPredictReply(spec_, context->reply, &error, &output);\r\n  if (status != SUCCESS) {\r\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\r\n             << \"Get reply failed, servable name: \" << spec_.servable_name << \", method name: \" << spec_.method_name\r\n             << \", version number: \" << spec_.version_number;\r\n  }\r\n  if (!output.empty() && output.size() != inputs.size()) {\r\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\r\n             << \"The instance count \" << output.size() << \" of reply is not equal to the count \" << inputs.size()\r\n             << \" of request\";\r\n  }\r\n  if (status != SUCCESS) {\r\n    output.clear();\r\n    error.clear();\r\n    proto::ErrorMsg error_msg;\r\n    error_msg.set_error_code(status.StatusCode());\r\n    error_msg.set_error_msg(status.StatusMessage());\r\n    error.push_back(error_msg);\r\n  }\r\n  for (unsigned int i = 0; i < inputs.size(); i++) {\r\n    uint64_t task_id = inputs[i].second;\r\n    uint64_t job_id = inputs[i].first;\r\n    auto iter2 = job_.find(job_id);\r\n    if (iter2 == job_.end()) {\r\n      MSI_LOG_ERROR << \"job_id not exist: \" << job_id;\r\n      continue;\r\n    }\r\n    auto &job_item = iter2->second;\r\n    // collect result\r\n    auto &task_item = job_item.task[task_id];\r\n    task_item.pid = 0;\r\n    if (i < output.size()) {\r\n      task_item.output = output[i];\r\n    }\r\n    if (error.empty()) {\r\n      task_item.error.set_error_code(0);\r\n    } else if (error.size() == 1) {\r\n      task_item.error = error[0];\r\n    } else {\r\n      task_item.error = error[i];\r\n    }\r\n    job_item.wait_task_num--;\r\n    job_item.reply_context_list.push_back(context);\r\n    if (job_item.wait_task_num == 0) {\r\n      // reply job\r\n      std::vector<const proto::Instance *> out;\r\n      std::vector<proto::ErrorMsg> error_reply;\r\n      for (auto &item : job_item.task) {\r\n        out.push_back(item.output);\r\n        error_reply.push_back(item.error);\r\n      }\r\n      GrpcTensorHelper::CreatePredictReplyFromInstances(*job_item.request, error_reply, out, job_item.reply);\r\n      job_item.callback();\r\n      (void)job_.erase(iter2);\r\n    }\r\n  }\r\n}\r\n\r\nvoid ModelThread::Commit(const std::shared_ptr<PredictContext> &context) {\r\n  OnTasksFinished(context);\r\n  SendTasks();\r\n}\r\n}  // namespace mindspore::serving\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/model_thread.h",
    "content": "/**\r\n * Copyright 2021 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_MASTER_MODEL_THREAD_H\r\n#define MINDSPORE_SERVING_MASTER_MODEL_THREAD_H\r\n#include <memory>\r\n#include <unordered_map>\r\n#include <vector>\r\n#include <string>\r\n#include <utility>\r\n#include <mutex>\r\n#include <map>\r\n#include <queue>\r\n#include \"common/serving_common.h\"\r\n#include \"common/instance.h\"\r\n#include \"master/notify_worker/base_notify.h\"\r\n#include \"proto/ms_service.pb.h\"\r\n#include \"proto/ms_service.grpc.pb.h\"\r\n#include \"master/worker_context.h\"\r\n\r\nnamespace mindspore::serving {\r\nstruct Task {\r\n  const proto::Instance *input = nullptr;\r\n  const proto::Instance *output = nullptr;\r\n  proto::ErrorMsg error;\r\n  uint64_t pid = 0;  // 0:not execute or have executed.others: executing\r\n};\r\n\r\nstruct PredictContext {\r\n  proto::PredictRequest request;\r\n  proto::PredictReply reply;\r\n  uint64_t pid;\r\n  std::vector<std::pair<uint64_t, uint64_t>> inputs;\r\n};\r\n\r\nstruct Job {\r\n  std::vector<Task> task;\r\n  uint64_t wait_task_num = 0;\r\n  PredictOnFinish callback;\r\n  const proto::PredictRequest *request = nullptr;\r\n  proto::PredictReply *reply = nullptr;\r\n  std::vector<std::shared_ptr<PredictContext>> reply_context_list;\r\n};\r\n\r\nclass ModelThread {\r\n public:\r\n  ModelThread(const std::string &servable_name, const std::string &method_name, uint64_t version_number,\r\n              uint64_t batch_size, ServableMethodInfo method_info);\r\n  ~ModelThread();\r\n  Status DelWorker(uint64_t pid);\r\n  Status AddWorker(uint64_t pid, const std::shared_ptr<WorkerContext> &notify);\r\n  Status DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\r\n                       const PredictOnFinish &callback);\r\n\r\n private:\r\n  std::map<uint64_t, std::shared_ptr<WorkerContext>> pid_process_;\r\n  uint64_t last_worker_pid_ = 0;\r\n  std::map<uint64_t, int64_t> worker_wait_map_;\r\n  std::queue<std::pair<uint64_t, uint64_t>> task_wait_queue_;\r\n  std::map<uint64_t, Job> job_;\r\n  uint64_t job_id_ = 0;\r\n  uint64_t round_ = 3;\r\n  std::mutex lock_;\r\n  RequestSpec spec_;\r\n  ServableMethodInfo method_info_;\r\n  uint64_t batch_size_;\r\n  bool single_batch_dispatch_ = false;\r\n\r\n  void Clear();\r\n  void InnerClear();\r\n  Status FindProcessQueue(uint64_t *pid);\r\n  Status PushTasks(const proto::PredictRequest &request, proto::PredictReply *reply, const PredictOnFinish &callback);\r\n  Status Combine(const std::vector<std::pair<uint64_t, uint64_t>> &ids, uint64_t pid, proto::PredictRequest *msg);\r\n  void OnTasksFinished(const std::shared_ptr<PredictContext> &context);\r\n  void SendTasks();\r\n  void Commit(const std::shared_ptr<PredictContext> &context);\r\n};\r\n\r\n}  // namespace mindspore::serving\r\n\r\n#endif  // MINDSPORE_SERVING_MASTER_MODEL_THREAD_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/notify_worker/base_notify.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_BASE_NOTIFY_H\n#define MINDSPORE_SERVING_MASTER_BASE_NOTIFY_H\n#include <vector>\n#include <functional>\n#include <future>\n#include \"common/serving_common.h\"\n#include \"common/servable.h\"\n#include \"proto/ms_service.pb.h\"\n#include \"common/grpc_client.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API BaseNotifyWorker {\n public:\n  BaseNotifyWorker() = default;\n  virtual ~BaseNotifyWorker() = default;\n  virtual Status DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                               const PredictOnFinish &on_finish) = 0;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_MASTER_BASE_NOTIFY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/notify_worker/grpc_notify.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"master/notify_worker/grpc_notify.h\"\n#include <grpcpp/grpcpp.h>\n#include <grpcpp/health_check_service_interface.h>\n#include <thread>\n#include \"common/exit_handle.h\"\n#include \"common/grpc_server.h\"\n#include \"common/proto_tensor.h\"\n\nnamespace mindspore {\nnamespace serving {\nGrpcNotifyWorker::GrpcNotifyWorker(const std::string &worker_address) {\n  worker_address_ = worker_address;\n  std::shared_ptr<grpc::Channel> channel = GrpcServer::CreateChannel(worker_address);\n  stub_ = proto::MSWorker::NewStub(channel);\n}\n\nGrpcNotifyWorker::~GrpcNotifyWorker() = default;\n\nStatus GrpcNotifyWorker::DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                                       const PredictOnFinish &on_finish) {\n  if (!stub_) {\n    return INFER_STATUS_LOG_ERROR(WORKER_UNAVAILABLE)\n           << \"Predict failed, worker gRPC has not been inited or has already exited, worker address \"\n           << worker_address_;\n  }\n  if (!client_) {\n    client_ = std::make_unique<MSPredictClient>();\n    client_->Start();\n  }\n  AsyncPredictCallback callback = [reply, on_finish](Status status) {\n    GrpcTensorHelper::CreateReplyFromErrorMsg(status, reply);\n    on_finish();\n  };\n  client_->PredictAsync(request, reply, stub_.get(), callback, worker_address_);\n  return SUCCESS;\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/notify_worker/grpc_notify.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_GRPC_NOTIFY_H\n#define MINDSPORE_SERVING_MASTER_GRPC_NOTIFY_H\n#include <vector>\n#include <string>\n#include <memory>\n#include <atomic>\n#include \"master/notify_worker/base_notify.h\"\n#include \"proto/ms_worker.pb.h\"\n#include \"proto/ms_worker.grpc.pb.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API GrpcNotifyWorker : public BaseNotifyWorker {\n public:\n  explicit GrpcNotifyWorker(const std::string &worker_address);\n  ~GrpcNotifyWorker() override;\n\n  Status DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                       const PredictOnFinish &on_finish) override;\n\n private:\n  std::string worker_address_;\n  std::shared_ptr<proto::MSWorker::Stub> stub_ = nullptr;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_MASTER_GRPC_NOTIFY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/restful/http_handle.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"master/restful/http_handle.h\"\n#include <stdio.h>\n#include <string.h>\n#include <assert.h>\n#include <vector>\n\n#include \"master/restful/http_process.h\"\n#include \"master/server.h\"\n\nnamespace mindspore {\nnamespace serving {\nstatic std::vector<unsigned char> encode_table = {\n  'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n  'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\n  's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'};\nstatic std::vector<unsigned char> decode_table = {\n  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n  255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62,\n  255, 255, 255, 63,  52,  53,  54,  55,  56,  57,  58,  59,  60,  61,  255, 255, 255, 255, 255, 255, 255, 0,\n  1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,  15,  16,  17,  18,  19,  20,  21,  22,\n  23,  24,  25,  255, 255, 255, 255, 255, 255, 26,  27,  28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,\n  39,  40,  41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  255, 255, 255, 255, 255};\n\nsize_t Base64Encode(const uint8_t *input, size_t length, uint8_t *output) {\n  if (length == 0) {\n    return 0;\n  }\n\n  size_t i, j;\n  for (i = 0, j = 0; i + 3 <= length; i += 3) {\n    output[j++] = encode_table[input[i] >> 2];\n    output[j++] = encode_table[((input[i] << 4) & 0x30) | (input[i + 1] >> 4)];\n    output[j++] = encode_table[((input[i + 1] << 2) & 0x3c) | (input[i + 2] >> 6)];\n    output[j++] = encode_table[input[i + 2] & 0x3f];\n  }\n\n  if (i < length) {\n    uint32_t left_num = length - i;\n    if (left_num == 1) {\n      output[j++] = encode_table[input[i] >> 2];\n      output[j++] = encode_table[(input[i] << 4) & 0x30];\n      output[j++] = '=';\n      output[j++] = '=';\n    } else {\n      output[j++] = encode_table[input[i] >> 2];\n      output[j++] = encode_table[((input[i] << 4) & 0x30) | (input[i + 1] >> 4)];\n      output[j++] = encode_table[(input[i + 1] << 2) & 0x3c];\n      output[j++] = '=';\n    }\n  }\n  return j;\n}\n\nsize_t Base64Decode(const uint8_t *target, size_t target_length, uint8_t *origin) {\n  if (target_length == 0 || target_length % 4 != 0) {\n    return 0;\n  }\n  size_t i, j = 0;\n  uint8_t value[4];\n  for (i = 0; i < target_length; i += 4) {\n    for (size_t k = 0; k < 4; k++) {\n      value[k] = decode_table[target[i + k]];\n    }\n\n    // value[2], value[3]:may be '='\n    if (value[0] >= 64 || value[1] >= 64) {\n      MSI_LOG_EXCEPTION << \"Decode value is not more than max value 64\";\n    }\n\n    origin[j++] = (value[0] << 2) | (value[1] >> 4);\n\n    if (value[2] >= 64) {\n      break;\n    } else if (value[3] >= 64) {\n      origin[j++] = (value[1] << 4) | (value[2] >> 2);\n      break;\n    } else {\n      origin[j++] = (value[1] << 4) | (value[2] >> 2);\n      origin[j++] = (value[2] << 6) | value[3];\n    }\n  }\n  return j;\n}\n\nsize_t GetB64TargetSize(size_t origin_len) {\n  size_t target_size = 0;\n  if (origin_len % 3 == 0) {\n    target_size = (origin_len / 3) * 4;\n  } else {\n    target_size = (origin_len / 3 + 1) * 4;\n  }\n  return target_size;\n}\n\nsize_t GetB64OriginSize(size_t target_len, size_t tail_size) {\n  size_t origin_length = 0;\n  if (target_len == 0 || target_len % 4 != 0) {\n    return origin_length;\n  }\n  origin_length = 3 * (target_len / 4) - tail_size;\n  return origin_length;\n}\n\nsize_t GetTailEqualSize(const std::string &str) {\n  size_t length = str.size();\n  if (length % 4 != 0) {\n    return UINT32_MAX;\n  }\n  size_t count = 0;\n  if (length >= 1 && str[length - 1] == '=') {\n    count++;\n  }\n  if (length >= 2 && str[length - 2] == '=') {\n    count++;\n  }\n  return count;\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/restful/http_handle.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_HTTP_HANDLE_H\n#define MINDSPORE_SERVING_MASTER_HTTP_HANDLE_H\n\n#include <string>\n#include <memory>\n#include \"common/serving_common.h\"\n#include \"master/restful/restful_request.h\"\n\nusing nlohmann::json;\nnamespace mindspore {\nnamespace serving {\nsize_t Base64Encode(const uint8_t *input, size_t length, uint8_t *output);\nsize_t Base64Decode(const uint8_t *target, size_t target_length, uint8_t *origin);\nsize_t GetB64TargetSize(size_t origin_len);\nsize_t GetB64OriginSize(size_t target_len, size_t tail_size);\nsize_t GetTailEqualSize(const std::string &str);\n\n}  // namespace serving\n}  // namespace mindspore\n#endif  // MINDSPORE_SERVING_MASTER_HTTP_HANDLE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/restful/http_process.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"master/restful/http_process.h\"\n#include <map>\n#include <vector>\n#include <functional>\n#include <utility>\n#include <iostream>\n#include <sstream>\n#include <algorithm>\n#include \"common/serving_common.h\"\n#include \"master/restful/http_handle.h\"\n#include \"common/float16.h\"\n#include \"master/server.h\"\n\nusing mindspore::serving::proto::Instance;\nusing mindspore::serving::proto::PredictReply;\nusing mindspore::serving::proto::PredictRequest;\n\nnamespace mindspore {\nnamespace serving {\nconst int BUF_MAX = 0x7FFFFFFF;\n\nstatic const std::map<DataType, HTTP_DATA_TYPE> infer_type2_http_type{{DataType::kMSI_Int32, HTTP_DATA_INT},\n                                                                      {DataType::kMSI_Float32, HTTP_DATA_FLOAT}};\n\nstatic const std::map<HTTP_DATA_TYPE, DataType> http_type2_infer_type{{HTTP_DATA_INT, DataType::kMSI_Int32},\n                                                                      {HTTP_DATA_FLOAT, DataType::kMSI_Float32},\n                                                                      {HTTP_DATA_BOOL, DataType::kMSI_Bool},\n                                                                      {HTTP_DATA_STR, DataType::kMSI_String},\n                                                                      {HTTP_DATA_OBJ, DataType::kMSI_Bytes}};\n\nstatic const std::map<std::string, DataType> str2_infer_type{\n  {\"int8\", DataType::kMSI_Int8},       {\"int16\", DataType::kMSI_Int16},     {\"int32\", DataType::kMSI_Int32},\n  {\"int64\", DataType::kMSI_Int64},     {\"uint8\", DataType::kMSI_Uint8},     {\"uint16\", DataType::kMSI_Uint16},\n  {\"uint32\", DataType::kMSI_Uint32},   {\"uint64\", DataType::kMSI_Uint64},   {\"fp16\", DataType::kMSI_Float16},\n  {\"fp32\", DataType::kMSI_Float32},    {\"fp64\", DataType::kMSI_Float64},    {\"float16\", DataType::kMSI_Float16},\n  {\"float32\", DataType::kMSI_Float32}, {\"float64\", DataType::kMSI_Float64}, {\"bool\", DataType::kMSI_Bool},\n  {\"str\", DataType::kMSI_String},      {\"bytes\", DataType::kMSI_Bytes}};\n\ntemplate <typename T>\nbool RestfulService::IsString() {\n  return typeid(T).hash_code() == typeid(std::string).hash_code();\n}\n\nstd::string RestfulService::GetString(const uint8_t *ptr, size_t length) {\n  std::string str;\n  for (size_t i = 0; i < length; i++) {\n    str += ptr[i];\n  }\n  return str;\n}\n\nStatus RestfulService::CheckObjTypeMatchShape(DataType data_type, const std::vector<int64_t> &shape) {\n  if (data_type == kMSI_String || data_type == kMSI_Bytes) {\n    size_t elements_nums = std::accumulate(shape.begin(), shape.end(), 1LL, std::multiplies<size_t>());\n    if (elements_nums != 1) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"json object, only support scalar when data type is string or bytes, please check 'type' or 'shape'\";\n    }\n  }\n  return SUCCESS;\n}\n\nRequestType RestfulService::GetReqType(const std::string &str) {\n  auto it = std::find(request_type_list_.begin(), request_type_list_.end(), str);\n  if (it == request_type_list_.end()) {\n    return kInvalidType;\n  }\n\n  if (*it == kInstancesRequest) {\n    return kInstanceType;\n  }\n\n  return kInvalidType;\n}\n\nstd::string RestfulService::GetReqTypeStr(RequestType req_type) {\n  switch (req_type) {\n    case kInstanceType:\n      return kInstancesRequest;\n    default:\n      break;\n  }\n  return \"\";\n}\n\nStatus RestfulService::CheckObjType(const string &type) {\n  Status status(SUCCESS);\n  auto it = str2_infer_type.find(type);\n  if (it == str2_infer_type.end()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json object, specified type:'\" << type << \"' is illegal\";\n  }\n  return status;\n}\n\nDataType RestfulService::GetObjDataType(const json &js) {\n  DataType type = kMSI_Unknown;\n  if (!js.is_object()) {\n    return type;\n  }\n\n  auto it1 = js.find(kType);\n  if (it1 == js.end()) {\n    type = kMSI_Bytes;\n  } else {\n    auto type_str = it1.value();\n    auto it2 = str2_infer_type.find(type_str);\n    if (it2 != str2_infer_type.end()) {\n      type = it2->second;\n    }\n  }\n\n  return type;\n}\n\nstd::string RestfulService::GetStringByDataType(DataType type) {\n  for (const auto &item : str2_infer_type) {\n    // cppcheck-suppress useStlAlgorithm\n    if (item.second == type) {\n      return item.first;\n    }\n  }\n  return \"\";\n}\n\nbool RestfulService::JsonMatchDataType(const json &js, DataType type) {\n  bool flag = false;\n  if (js.is_number_integer()) {\n    if (type >= kMSI_Int8 && type <= kMSI_Uint64) {\n      flag = true;\n    }\n  } else if (js.is_number_float()) {\n    if (type >= kMSI_Float16 && type <= kMSI_Float64) {\n      flag = true;\n    }\n  } else if (js.is_string()) {\n    if (type == kMSI_String) {\n      flag = true;\n    }\n  } else if (js.is_boolean()) {\n    if (type == kMSI_Bool) {\n      flag = true;\n    }\n  }\n\n  return flag;\n}\n\nstd::vector<int64_t> RestfulService::GetObjShape(const json &js) {\n  std::vector<int64_t> shape;\n  auto it = js.find(kShape);\n  if (it != js.end()) {\n    shape = GetSpecifiedShape(it.value());\n  }\n  return shape;\n}\n\nstd::vector<int64_t> RestfulService::GetArrayShape(const json &json_array) {\n  std::vector<int64_t> json_shape;\n  const json *tmp_json = &json_array;\n  while (tmp_json->is_array()) {\n    if (tmp_json->empty()) {\n      break;\n    }\n\n    (void)json_shape.emplace_back(tmp_json->size());\n    tmp_json = &tmp_json->at(0);\n  }\n\n  return json_shape;\n}\n\nstd::vector<int64_t> RestfulService::GetSpecifiedShape(const json &js) {\n  std::vector<int64_t> shape;\n  if (!js.is_array()) {\n    return shape;\n  }\n  if (js.empty()) {\n    return shape;\n  }\n\n  for (size_t i = 0; i < js.size(); i++) {\n    auto &item = js.at(i);\n    if (!item.is_number_unsigned()) {\n      return {};\n    } else {\n      shape.push_back(item.get<uint32_t>());\n    }\n  }\n\n  return shape;\n}\n\nDataType RestfulService::GetArrayDataType(const json &json_array, HTTP_DATA_TYPE *type_format_ptr) {\n  MSI_EXCEPTION_IF_NULL(type_format_ptr);\n  auto &type_format = *type_format_ptr;\n  DataType data_type = kMSI_Unknown;\n  const json *tmp_json = &json_array;\n  while (tmp_json->is_array()) {\n    if (tmp_json->empty()) {\n      return data_type;\n    }\n\n    tmp_json = &tmp_json->at(0);\n  }\n\n  if (tmp_json->is_number_integer()) {\n    type_format = HTTP_DATA_INT;\n    data_type = http_type2_infer_type.at(type_format);\n  } else if (tmp_json->is_number_float()) {\n    type_format = HTTP_DATA_FLOAT;\n    data_type = http_type2_infer_type.at(type_format);\n  } else if (tmp_json->is_boolean()) {\n    type_format = HTTP_DATA_BOOL;\n    data_type = http_type2_infer_type.at(type_format);\n  } else if (tmp_json->is_object()) {\n    type_format = HTTP_DATA_OBJ;\n    data_type = GetObjDataType(*tmp_json);\n  } else if (tmp_json->is_string()) {\n    type_format = HTTP_DATA_STR;\n    data_type = http_type2_infer_type.at(type_format);\n  }\n\n  return data_type;\n}\n\nStatus RestfulService::CheckReqJsonValid(const json &js_msg) {\n  int count = 0;\n  for (auto &item : request_type_list_) {\n    auto it = js_msg.find(item);\n    if (it != js_msg.end()) {\n      count++;\n      auto request_type = GetReqType(item);\n      if (request_type == kInvalidType) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"only support instances mode\";\n      }\n\n      request_type_ = request_type;\n    }\n  }\n\n  if (count != 1) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n           << \"key 'instances' expects to exist once, but actually \" << count << \" times\";\n  }\n  return SUCCESS;\n}\n\nStatus RestfulService::GetInstancesType(const json &instances) {\n  Status status{SUCCESS};\n  // Eg:{\"instances\" : 1}\n  if (!(instances.is_array() || instances.is_object())) {\n    instances_type_ = kNokeyWay;\n    return status;\n  }\n\n  // Eg:{\"instances\":{\"A\":1, \"B\":2}}\n  if (instances.is_object()) {\n    instances_type_ = kKeyWay;\n    return status;\n  }\n\n  // array:\n  if (instances.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"instances value is array type, but no value\";\n  }\n  auto first_instance = instances.at(0);\n  if (first_instance.is_object()) {\n    instances_type_ = kKeyWay;\n  } else {\n    instances_type_ = kNokeyWay;\n  }\n\n  return status;\n}\n\nStatus RestfulService::CheckObj(const json &js) {\n  if (!js.is_object()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json is not object\";\n  }\n\n  if (js.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json object, value is empty\";\n  }\n\n  // 1)required:b64 2)optional:type 3)optional:shape\n  if (js.size() > 3) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n           << \"json object, items size is more than 3, only support specified ['b64', 'type', 'shape']\";\n  }\n\n  int b64_count = 0;\n  int shape_count = 0;\n  int type_count = 0;\n  for (auto item = js.begin(); item != js.end(); ++item) {\n    const auto &key = item.key();\n    auto value = item.value();\n    if (key == kB64) {\n      b64_count++;\n    } else if (key == kType) {\n      if (!value.is_string()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json object, key is 'type', value should be string type\";\n      }\n      auto status = CheckObjType(value);\n      if (status != SUCCESS) {\n        return status;\n      }\n      type_count++;\n    } else if (key == kShape) {\n      if (!value.is_array()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json object, key is 'shape', value should be array type\";\n      }\n      bool zero_dims_before = false;\n      for (auto it = value.begin(); it != value.end(); ++it) {\n        if (zero_dims_before) {\n          return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n                 << \"json object, key is 'shape', invalid shape value \" << value.dump();\n        }\n        if (!(it->is_number_unsigned())) {\n          return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n                 << \"json object, key is 'shape', array value should be unsigned integer\";\n        }\n        auto number = it->get<int32_t>();\n        if (number < 0) {\n          return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n                 << \"json object, key is 'shape', number value should not be negative number, shape value: \"\n                 << value.dump();\n        }\n        if (number == 0) {\n          zero_dims_before = true;\n        }\n      }\n      shape_count++;\n    } else {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"json object, key is not ['b64', 'type', 'shape'], fail key:\" << key;\n    }\n  }\n\n  if (b64_count != 1) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json object, 'b64' should be specified only one time\";\n  }\n\n  if (type_count > 1) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json object, 'type' should be specified no more than one time\";\n  }\n\n  if (shape_count > 1) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json object, 'shape' should be specified no more than one time\";\n  }\n\n  return SUCCESS;\n}\n\nStatus RestfulService::ParseItemScalar(const json &value, ProtoTensor *const pb_tensor) {\n  Status status(SUCCESS);\n  std::vector<int64_t> scalar_shape = {};\n  if (value.is_number_integer()) {\n    DataType type = kMSI_Int32;\n    pb_tensor->set_data_type(type);\n    pb_tensor->set_shape(scalar_shape);\n    pb_tensor->resize_data(pb_tensor->GetTypeSize(type));\n    status = GetScalarByType(type, value, 0, pb_tensor);\n  } else if (value.is_number_float()) {\n    DataType type = kMSI_Float32;\n    pb_tensor->set_data_type(type);\n    pb_tensor->set_shape(scalar_shape);\n    pb_tensor->resize_data(pb_tensor->GetTypeSize(type));\n    status = GetScalarByType(type, value, 0, pb_tensor);\n  } else if (value.is_boolean()) {\n    DataType type = kMSI_Bool;\n    pb_tensor->set_data_type(type);\n    pb_tensor->set_shape(scalar_shape);\n    pb_tensor->resize_data(pb_tensor->GetTypeSize(type));\n    status = GetScalarByType(type, value, 0, pb_tensor);\n  } else if (value.is_string()) {\n    DataType type = kMSI_String;\n    pb_tensor->set_data_type(type);\n    pb_tensor->set_shape(scalar_shape);\n    status = GetScalarByType(type, value, 0, pb_tensor);\n  } else if (value.is_null()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json value is null, it is not supported\";\n  } else if (value.is_discarded()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json value is discarded type, it is not supported\";\n  } else {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json value type is unregistered\";\n  }\n  return status;\n}\n\nStatus RestfulService::ParseItemObject(const json &value, ProtoTensor *const pb_tensor) {\n  auto status = CheckObj(value);\n  if (status != SUCCESS) {\n    return status;\n  }\n\n  DataType type = GetObjDataType(value);\n  if (type == kMSI_Unknown) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json object, type is unknown\";\n  }\n\n  std::vector<int64_t> shape = GetObjShape(value);\n  bool is_tensor = false;\n  if (type != kMSI_String && type != kMSI_Bytes) {\n    is_tensor = true;\n  }\n  if (is_tensor) {\n    size_t shape_size = std::accumulate(shape.begin(), shape.end(), 1LL, std::multiplies<size_t>());\n    size_t type_size = pb_tensor->GetTypeSize(type);\n    pb_tensor->resize_data(shape_size * type_size);\n  }\n  status = CheckObjTypeMatchShape(type, shape);\n  if (status != SUCCESS) {\n    return status;\n  }\n  pb_tensor->set_data_type(type);\n  pb_tensor->set_shape(shape);\n  status = GetScalarByType(serving::kMSI_Bytes, value[kB64], 0, pb_tensor);\n  return status;\n}\n\nStatus RestfulService::ParseItemArray(const json &value, ProtoTensor *const pb_tensor) {\n  HTTP_DATA_TYPE type_format = HTTP_DATA_NONE;\n  auto shape = GetArrayShape(value);\n  if (shape.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json array, shape is empty\";\n  }\n  DataType data_type = GetArrayDataType(value, &type_format);\n  if (data_type == kMSI_Unknown) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json array, data type is unknown\";\n  }\n  bool is_tensor = false;\n  if (data_type != kMSI_String && data_type != kMSI_Bytes) {\n    is_tensor = true;\n  }\n  // instances mode:only support one item\n  if (request_type_ == kInstanceType) {\n    if (!is_tensor) {\n      size_t elements_nums = std::accumulate(shape.begin(), shape.end(), 1LL, std::multiplies<size_t>());\n      if (elements_nums != 1) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json array, string or bytes type only support one item\";\n      }\n    }\n  }\n  // set real data type\n  pb_tensor->set_data_type(data_type);\n  pb_tensor->set_shape(shape);\n\n  if (is_tensor) {\n    size_t shape_size = std::accumulate(shape.begin(), shape.end(), 1LL, std::multiplies<size_t>());\n    size_t type_size = pb_tensor->GetTypeSize(data_type);\n    pb_tensor->resize_data(shape_size * type_size);\n  }\n\n  if (type_format == HTTP_DATA_OBJ) {\n    if (data_type != kMSI_Bytes && data_type != kMSI_String) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"json array, item is object type, object only support string or bytes type\";\n    }\n  }\n  return RecursiveGetArray(value, 0, 0, type_format, pb_tensor);\n}\n\n// 1. parse request common func\nStatus RestfulService::ParseItem(const json &value, ProtoTensor *const pb_tensor) {\n  if (value.is_object()) {\n    return ParseItemObject(value, pb_tensor);\n  } else if (value.is_array()) {\n    return ParseItemArray(value, pb_tensor);\n  } else {\n    return ParseItemScalar(value, pb_tensor);\n  }\n}\n\nStatus RestfulService::RecursiveGetArray(const json &json_data, size_t depth, size_t data_index,\n                                         HTTP_DATA_TYPE type_format, ProtoTensor *const request_tensor) {\n  Status status(SUCCESS);\n  std::vector<int64_t> required_shape = request_tensor->shape();\n  if (depth >= required_shape.size()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n           << \"invalid json array: current depth \" << depth << \" is more than shape dims \" << required_shape.size();\n  }\n  if (!json_data.is_array()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"invalid json array: json type is not array\";\n  }\n  if (json_data.size() != static_cast<size_t>(required_shape[depth])) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n           << \"invalid json array: json size is \" << json_data.size() << \", the dim \" << depth << \" expected to be \"\n           << required_shape[depth];\n  }\n  if (depth + 1 < required_shape.size()) {\n    size_t sub_element_cnt =\n      std::accumulate(required_shape.begin() + depth + 1, required_shape.end(), 1LL, std::multiplies<size_t>());\n    for (size_t k = 0; k < json_data.size(); k++) {\n      status =\n        RecursiveGetArray(json_data[k], depth + 1, data_index + sub_element_cnt * k, type_format, request_tensor);\n      if (status != SUCCESS) {\n        return status;\n      }\n    }\n  } else {\n    status = GetArrayData(json_data, data_index, type_format, request_tensor);\n    if (status != SUCCESS) {\n      return status;\n    }\n  }\n  return status;\n}\n\nStatus RestfulService::GetArrayData(const json &js, size_t data_index, HTTP_DATA_TYPE type,\n                                    ProtoTensor *const request_tensor) {\n  Status status(SUCCESS);\n  size_t element_nums = js.size();\n  if (type != HTTP_DATA_OBJ) {\n    for (size_t k = 0; k < element_nums; k++) {\n      auto &json_data = js[k];\n      if (!(json_data.is_number() || json_data.is_boolean() || json_data.is_string())) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json array, data should be number, bool, string or bytes\";\n      }\n      auto flag = JsonMatchDataType(json_data, request_tensor->data_type());\n      if (!flag) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json array, elements type is not equal\";\n      }\n      status = GetScalarByType(request_tensor->data_type(), json_data, data_index + k, request_tensor);\n      if (status != SUCCESS) {\n        return status;\n      }\n    }\n  } else {\n    for (size_t k = 0; k < element_nums; k++) {\n      auto &json_data = js[k];\n      auto value_type = GetObjDataType(json_data);\n      // Array:object only support string or bytes\n      if (value_type != kMSI_String && value_type != kMSI_Bytes) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json array, object type only support string or bytes type\";\n      }\n\n      if (value_type != request_tensor->data_type()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json array, elements type is not equal\";\n      }\n\n      status = GetScalarByType(value_type, json_data[kB64], data_index + k, request_tensor);\n      if (status != SUCCESS) {\n        return status;\n      }\n    }\n  }\n  return status;\n}\n\nStatus RestfulService::GetScalarByType(DataType type, const json &js, size_t index, ProtoTensor *const request_tensor) {\n  Status status(SUCCESS);\n  if (type == kMSI_Unknown) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"data type is unknown\";\n  }\n  switch (type) {\n    case kMSI_Bool:\n      status = GetScalarData<bool>(js, index, false, request_tensor);\n      break;\n    case kMSI_Int8:\n      status = GetScalarData<int8_t>(js, index, false, request_tensor);\n      break;\n    case kMSI_Int16:\n      status = GetScalarData<int16_t>(js, index, false, request_tensor);\n      break;\n    case kMSI_Int32:\n      status = GetScalarData<int32_t>(js, index, false, request_tensor);\n      break;\n    case kMSI_Int64:\n      status = GetScalarData<int64_t>(js, index, false, request_tensor);\n      break;\n    case kMSI_Uint8:\n      status = GetScalarData<uint8_t>(js, index, false, request_tensor);\n      break;\n    case kMSI_Uint16:\n      status = GetScalarData<uint16_t>(js, index, false, request_tensor);\n      break;\n    case kMSI_Uint32:\n      status = GetScalarData<uint32_t>(js, index, false, request_tensor);\n      break;\n    case kMSI_Uint64:\n      status = GetScalarData<uint64_t>(js, index, false, request_tensor);\n      break;\n    case kMSI_Float16:\n      status = GetScalarData<float>(js, index, false, request_tensor);\n      break;\n    case kMSI_Float32:\n      status = GetScalarData<float>(js, index, false, request_tensor);\n      break;\n    case kMSI_Float64:\n      status = GetScalarData<double>(js, index, false, request_tensor);\n      break;\n    case kMSI_String:\n      status = GetScalarData<std::string>(js, index, false, request_tensor);\n      break;\n    case kMSI_Bytes:\n      status = GetScalarData<std::string>(js, index, true, request_tensor);\n      break;\n    default:\n      auto type_str = GetStringByDataType(type);\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"data type:\" << type_str << \" is not supported\";\n  }\n  return status;\n}\n\ntemplate <typename T>\nStatus RestfulService::GetScalarData(const json &js, size_t index, bool is_bytes, ProtoTensor *const request_tensor) {\n  Status status(SUCCESS);\n  if (IsString<T>()) {\n    // 1.string\n    if (!js.is_string()) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"get scalar data failed, type is string, but json is not string type\";\n    }\n\n    auto value = js.get<std::string>();\n    if (is_bytes) {\n      DataType real_type = request_tensor->data_type();\n      auto tail_equal_size = GetTailEqualSize(value);\n      if (tail_equal_size == UINT32_MAX) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"'\" << value << \"' is illegal b64 encode string\";\n      }\n      auto origin_size = GetB64OriginSize(value.length(), tail_equal_size);\n      std::vector<uint8_t> buffer(origin_size, 0);\n      auto target_size = Base64Decode(reinterpret_cast<uint8_t *>(value.data()), value.length(), buffer.data());\n      if (target_size != origin_size) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"decode base64 failed, size is not matched.\";\n      }\n      if (real_type == kMSI_Bytes || real_type == kMSI_String) {\n        request_tensor->add_bytes_data(buffer.data(), origin_size);\n      } else {\n        auto type_size = request_tensor->GetTypeSize(real_type);\n        auto element_cnt = request_tensor->element_cnt();\n        if (origin_size != type_size * element_cnt) {\n          return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n                 << \"size is not matched, decode base64 size:\" << origin_size\n                 << \"; Given info: type:\" << GetStringByDataType(real_type) << \"; type size:\" << type_size\n                 << \"; element nums:\" << element_cnt;\n        }\n        if (origin_size > 0) {\n          auto data = reinterpret_cast<T *>(request_tensor->mutable_data()) + index;\n          (void)memcpy_s(data, origin_size, buffer.data(), buffer.size());\n        }\n      }\n    } else {\n      request_tensor->add_bytes_data(reinterpret_cast<uint8_t *>(value.data()), value.length());\n    }\n  } else {\n    DataType data_type = request_tensor->data_type();\n    auto flag = JsonMatchDataType(js, data_type);\n    if (!flag) {\n      auto type_str = GetStringByDataType(data_type);\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"data type and json type is not matched, data type is:\" << type_str;\n    }\n\n    // 2.number\n    if ((js.is_number() || js.is_boolean())) {\n      // 1)common number\n      auto data = reinterpret_cast<T *>(request_tensor->mutable_data()) + index;\n      *data = js.get<T>();\n    }\n  }\n\n  return status;\n}\n// 2.main\nvoid RestfulService::RunRestful(const std::shared_ptr<RestfulRequest> &restful_request) {\n  auto restful_service = std::make_shared<RestfulService>();\n  restful_service->RunRestfulInner(restful_request, restful_service);\n}\n\nvoid RestfulService::RunRestfulInner(const std::shared_ptr<RestfulRequest> &restful_request,\n                                     const std::shared_ptr<RestfulService> &restful_service) {\n  MSI_TIME_STAMP_START(RunRestful)\n  auto status = ParseRequest(restful_request, &request_);\n  if (status != SUCCESS) {\n    std::string msg = \"Parser request failed, \" + status.StatusMessage();\n    restful_request->ErrorMessage(Status(status.StatusCode(), msg));\n    return;\n  }\n  auto callback = [restful_service, restful_request, time_start_RunRestful]() {\n    nlohmann::json predict_json;\n    Status status;\n    try {\n      status = restful_service->ParseReply(restful_service->reply_, &predict_json);\n    } catch (std::exception &e) {\n      MSI_LOG_ERROR << \"Failed to construct the response: \" << e.what();\n      restful_request->ErrorMessage(Status(status.StatusCode(), \"Failed to construct the response\"));\n      return;\n    }\n    if (status != SUCCESS) {\n      std::string msg = \"Failed to construct the response: \" + status.StatusMessage();\n      restful_request->ErrorMessage(Status(status.StatusCode(), msg));\n    } else {\n      restful_request->RestfulReplay(predict_json.dump());\n    }\n    MSI_TIME_STAMP_END(RunRestful)\n  };\n  auto dispatcher = Server::Instance().GetDispatcher();\n  dispatcher->DispatchAsync(request_, &reply_, callback);\n}\n\n// 3.parse request\nStatus RestfulService::ParseRequest(const std::shared_ptr<RestfulRequest> &restful_request,\n                                    PredictRequest *const request) {\n  Status status(SUCCESS);\n  // 1. parse common msg\n  status = ParseReqCommonMsg(restful_request, request);\n  if (status != SUCCESS) {\n    return status;\n  }\n\n  // 2. parse json\n  auto request_ptr = restful_request->decompose_event_request();\n  auto &js_msg = request_ptr->request_message_;\n  status = CheckReqJsonValid(js_msg);\n  if (status != SUCCESS) {\n    return status;\n  }\n\n  switch (request_type_) {\n    case kInstanceType:\n      status = ParseInstancesMsg(js_msg, request);\n      break;\n    default:\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"restful request only support instances mode\";\n  }\n\n  return status;\n}\n\nStatus RestfulService::ParseReqCommonMsg(const std::shared_ptr<RestfulRequest> &restful_request,\n                                         PredictRequest *const request) {\n  Status status(SUCCESS);\n  auto request_ptr = restful_request->decompose_event_request();\n  if (request_ptr == nullptr) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Decompose event request is nullptr\";\n  }\n  request->mutable_servable_spec()->set_name(request_ptr->model_name_);\n  request->mutable_servable_spec()->set_version_number(request_ptr->version_);\n  request->mutable_servable_spec()->set_method_name(request_ptr->service_method_);\n  return status;\n}\n\nStatus RestfulService::ParseInstancesMsg(const json &js_msg, PredictRequest *const request) {\n  Status status = SUCCESS;\n  auto type = GetReqTypeStr(request_type_);\n  auto instances = js_msg.find(type);\n  if (instances == js_msg.end()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"instances request json should have instances key word\";\n  }\n\n  // get instances way:{key, value} or {value}\n  status = GetInstancesType(*instances);\n  if (status != SUCCESS) {\n    return status;\n  }\n\n  switch (instances_type_) {\n    case kKeyWay: {\n      status = ParseKeyInstances(*instances, request);\n      break;\n    }\n    case kNokeyWay: {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"instances no key mode is not supported\";\n    }\n    case kInvalidWay: {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"invalid request type\";\n    }\n  }\n  return status;\n}\n\nStatus RestfulService::ParseKeyInstances(const json &instances, PredictRequest *const request) {\n  Status status(SUCCESS);\n  if (instances.is_object()) {\n    // one instance:{\"instances\"：{\"A\":1, \"B\": 2}}\n    if (instances.empty()) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json object, value is empty\";\n    }\n    status = PaserKeyOneInstance(instances, request);\n    if (status != SUCCESS) {\n      MSI_LOG_ERROR << \"instances:parse one instance failed\";\n      return status;\n    }\n    instances_nums_ = 1;\n  } else {\n    // multi instance:{\"instances\":[{}, {}]}\n    for (size_t i = 0; i < instances.size(); i++) {\n      auto &instance = instances.at(i);\n      if (!instance.is_object()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json array, instance is not object type\";\n      }\n\n      if (instance.empty()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"json array, instance is object type, but no value\";\n      }\n\n      status = PaserKeyOneInstance(instance, request);\n      if (status != SUCCESS) {\n        return status;\n      }\n    }\n    instances_nums_ = instances.size();\n  }\n  return status;\n}\n\n// instance_mgs:one instance, type is object\nStatus RestfulService::PaserKeyOneInstance(const json &instance_msg, PredictRequest *const request) {\n  Status status(SUCCESS);\n  auto instance = request->add_instances();\n\n  for (auto it = instance_msg.begin(); it != instance_msg.end(); ++it) {\n    auto key = it.key();\n    if (key.empty()) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"string key is empty\";\n    }\n    auto value = it.value();\n\n    auto &map_item = *(instance->mutable_items());\n    proto::Tensor &tensor = map_item[key];\n    ProtoTensor pb_tensor(&tensor);\n\n    status = ParseItem(value, &pb_tensor);\n    if (status != SUCCESS) {\n      return status;\n    }\n  }\n  return status;\n}\n\n// 4.parse reply common func\nStatus RestfulService::ParseReplyDetail(const proto::Tensor &tensor, json *const js) {\n  Status status(SUCCESS);\n  const ProtoTensor pb_tensor(const_cast<proto::Tensor *>(&tensor));\n  auto shape = pb_tensor.shape();\n  if (shape.empty()) {\n    status = ParseScalar(pb_tensor, 0, js);\n    if (status != SUCCESS) {\n      return status;\n    }\n  } else {\n    status = CheckReply(pb_tensor);\n    if (status != SUCCESS) {\n      return status;\n    }\n    status = RecursiveParseArray(pb_tensor, 0, 0, js);\n    if (status != SUCCESS) {\n      return status;\n    }\n  }\n  return status;\n}\n\nStatus RestfulService::ParseScalar(const ProtoTensor &pb_tensor, size_t index, json *const js) {\n  Status status(SUCCESS);\n  DataType data_type = pb_tensor.data_type();\n  if (data_type == kMSI_Unknown) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Data type is unknown\";\n  }\n  switch (data_type) {\n    case kMSI_Bool:\n      status = ParseScalarData<bool>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Int8:\n      status = ParseScalarData<int8_t>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Int16:\n      status = ParseScalarData<int16_t>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Int32:\n      status = ParseScalarData<int32_t>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Int64:\n      status = ParseScalarData<int64_t>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Uint8:\n      status = ParseScalarData<uint8_t>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Uint16:\n      status = ParseScalarData<uint16_t>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Uint32:\n      status = ParseScalarData<uint32_t>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Uint64:\n      status = ParseScalarData<uint64_t>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Float16: {\n      const float16 *data = reinterpret_cast<const float16 *>(pb_tensor.data()) + index;\n      float value = half_to_float(*data);\n      *js = value;\n      break;\n    }\n    case kMSI_Float32:\n      status = ParseScalarData<float>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Float64:\n      status = ParseScalarData<double>(pb_tensor, false, index, js);\n      break;\n    case kMSI_String:\n      status = ParseScalarData<std::string>(pb_tensor, false, index, js);\n      break;\n    case kMSI_Bytes:\n      status = ParseScalarData<std::string>(pb_tensor, true, index, js);\n      break;\n    default:\n      status = INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"reply data type is not supported\";\n      break;\n  }\n  return status;\n}\n\ntemplate <typename T>\nStatus RestfulService::ParseScalarData(const ProtoTensor &pb_tensor, bool is_bytes, size_t index, json *const js) {\n  Status status(SUCCESS);\n\n  if (!IsString<T>()) {\n    const T *data = reinterpret_cast<const T *>(pb_tensor.data()) + index;\n    T value = *data;\n    *js = value;\n  } else if (IsString<T>()) {\n    if (!is_bytes) {\n      auto str_nums = pb_tensor.bytes_data_size();\n      if (str_nums == 0) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"reply string, size is 0\";\n      }\n      if (index >= str_nums) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"reply string, index:\" << index << \" is more than size:\" << str_nums;\n      }\n\n      std::string value;\n      size_t length;\n      const uint8_t *ptr = nullptr;\n      pb_tensor.get_bytes_data(index, &ptr, &length);\n      value.resize(length);\n      (void)memcpy_s(value.data(), length, ptr, length);\n      *js = value;\n    } else {\n      auto str_nums = pb_tensor.bytes_data_size();\n      if (str_nums == 0) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"reply bytes, size is 0\";\n      }\n\n      if (index >= str_nums) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"reply bytes, index:\" << index << \" is more than size:\" << str_nums;\n      }\n\n      std::string value;\n      size_t length;\n      const uint8_t *ptr = nullptr;\n      pb_tensor.get_bytes_data(index, &ptr, &length);\n      value.resize(length);\n      (void)memcpy_s(value.data(), length, ptr, length);\n\n      auto target_size = GetB64TargetSize(length);\n      std::vector<uint8_t> buffer(target_size, 0);\n      auto size = Base64Encode(reinterpret_cast<uint8_t *>(value.data()), value.length(), buffer.data());\n      if (size != target_size) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"reply bytes, size is not matched, expected size:\" << target_size << \", encode size:\" << size;\n      }\n      std::string str = GetString(buffer.data(), buffer.size());\n      (*js)[kB64] = str;\n    }\n  }\n  return status;\n}\n\nStatus RestfulService::RecursiveParseArray(const ProtoTensor &pb_tensor, size_t depth, size_t pos,\n                                           json *const out_json) {\n  Status status(SUCCESS);\n  std::vector<int64_t> required_shape = pb_tensor.shape();\n  if (depth >= required_shape.size()) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"result shape dims is larger than result shape size \" << required_shape.size();\n  }\n  if (depth == required_shape.size() - 1) {\n    if (required_shape[depth] == 0) {  // make empty array\n      out_json->push_back(json());\n      out_json->clear();\n    }\n    for (int i = 0; i < required_shape[depth]; i++) {\n      out_json->push_back(json());\n      json &scalar_json = out_json->back();\n      status = ParseScalar(pb_tensor, pos + i, &scalar_json);\n      if (status != SUCCESS) {\n        return status;\n      }\n    }\n  } else {\n    for (int i = 0; i < required_shape[depth]; i++) {\n      // array:\n      out_json->push_back(json());\n      json &tensor_json = out_json->back();\n      size_t sub_element_cnt =\n        std::accumulate(required_shape.begin() + depth + 1, required_shape.end(), 1LL, std::multiplies<size_t>());\n      status = RecursiveParseArray(pb_tensor, depth + 1, i * sub_element_cnt + pos, &tensor_json);\n      if (status != SUCCESS) {\n        return status;\n      }\n    }\n  }\n  return status;\n}\n\nStatus RestfulService::CheckReply(const ProtoTensor &pb_tensor) {\n  Status status(SUCCESS);\n  DataType data_type = pb_tensor.data_type();\n  if (data_type == kMSI_Unknown) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"reply data type is unknown\";\n  }\n\n  if (data_type == kMSI_String || data_type == kMSI_Bytes) {\n    auto shape = pb_tensor.shape();\n    if (shape.size() != 1) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"reply string or bytes, shape should be 1, given shape size:\" << shape.size();\n    }\n  }\n  return status;\n}\n\n// 5.Parse reply\nStatus RestfulService::ParseReply(const PredictReply &reply, json *const out_json) {\n  Status status(SUCCESS);\n  switch (request_type_) {\n    case kInstanceType:\n      status = ParseInstancesReply(reply, out_json);\n      break;\n    default:\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"restful request only support instance mode\";\n  }\n\n  return status;\n}\n\nStatus RestfulService::ParseInstancesReply(const PredictReply &reply, json *const out_json) {\n  Status status(SUCCESS);\n  auto error_size = reply.error_msg_size();\n  auto reply_size = reply.instances().size();\n  if (error_size == 1 && reply_size == 0) {\n    (*out_json)[kErrorMsg] = reply.error_msg()[0].error_msg();\n    return SUCCESS;\n  }\n  if (error_size != 0 && error_size != instances_nums_) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"reply error size:\" << error_size << \" is not 0,1 or instances size \"\n                                          << instances_nums_ << \", reply instances size \" << reply_size;\n  }\n  if (reply_size != instances_nums_) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"reply size:\" << reply_size << \" is not matched request size:\" << instances_nums_;\n  }\n\n  (*out_json)[kInstancesReply] = json();\n  json &instances_json = (*out_json)[kInstancesReply];\n\n  for (int32_t i = 0; i < instances_nums_; i++) {\n    instances_json.push_back(json());\n    auto &instance = instances_json.back();\n    if (error_size != 0 && reply.error_msg()[i].error_code() != 0) {\n      instance[kErrorMsg] = reply.error_msg(i).error_msg();\n      continue;\n    }\n    auto &cur_instance = reply.instances(i);\n    auto &items = cur_instance.items();\n    if (items.empty()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"reply instance items is empty\";\n    }\n\n    for (auto &item : items) {\n      instance[item.first] = json();\n      auto &value_json = instance[item.first];\n      status = ParseReplyDetail(item.second, &value_json);\n      if (status != SUCCESS) {\n        return status;\n      }\n    }\n  }\n  return status;\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/restful/http_process.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_HTTP_PROCESS_H\n#define MINDSPORE_SERVING_MASTER_HTTP_PROCESS_H\n\n#include <string>\n#include <memory>\n#include <vector>\n#include <nlohmann/json.hpp>\n#include \"proto/ms_service.pb.h\"\n#include \"master/dispacther.h\"\n#include \"common/proto_tensor.h\"\n#include \"master/restful/restful_request.h\"\n\nusing nlohmann::json;\nusing std::string;\n\nnamespace mindspore {\nnamespace serving {\nconstexpr auto kInstancesRequest = \"instances\";\nconstexpr auto kInstancesReply = \"instances\";\nconstexpr auto kErrorMsg = \"error_msg\";\nconstexpr auto kType = \"type\";\nconstexpr auto kShape = \"shape\";\nconstexpr auto kB64 = \"b64\";\n\nenum RequestType { kInstanceType = 0, kInvalidType };\nenum InstancesType { kNokeyWay = 0, kKeyWay, kInvalidWay };\nenum HTTP_DATA_TYPE { HTTP_DATA_NONE, HTTP_DATA_INT, HTTP_DATA_FLOAT, HTTP_DATA_BOOL, HTTP_DATA_STR, HTTP_DATA_OBJ };\nclass RestfulService {\n public:\n  RestfulService() = default;\n  ~RestfulService() = default;\n\n  static void RunRestful(const std::shared_ptr<RestfulRequest> &restful_request);\n\n private:\n  void RunRestfulInner(const std::shared_ptr<RestfulRequest> &restful_request,\n                       const std::shared_ptr<RestfulService> &restful_service);\n  Status CheckObjTypeMatchShape(DataType data_type, const std::vector<int64_t> &shape);\n  std::string GetString(const uint8_t *ptr, size_t length);\n  Status CheckObj(const json &js);\n  Status CheckObjType(const std::string &type);\n  DataType GetObjDataType(const json &js);\n  std::vector<int64_t> GetObjShape(const json &js);\n  std::vector<int64_t> GetArrayShape(const json &json_array);\n  std::vector<int64_t> GetSpecifiedShape(const json &js);\n  DataType GetArrayDataType(const json &json_array, HTTP_DATA_TYPE *type_format);\n  Status CheckReqJsonValid(const json &js_msg);\n  std::string GetStringByDataType(DataType type);\n  bool JsonMatchDataType(const json &js, DataType type);\n\n  template <typename T>\n  Status GetScalarData(const json &js, size_t index, bool is_bytes, ProtoTensor *const request_tensor);\n  Status GetScalarByType(DataType type, const json &js, size_t index, ProtoTensor *const request_tensor);\n\n  Status RecursiveGetArray(const json &json_data, size_t depth, size_t data_index, HTTP_DATA_TYPE type_format,\n                           ProtoTensor *const request_tensor);\n  Status GetArrayData(const json &js, size_t data_index, HTTP_DATA_TYPE type, ProtoTensor *const request_tensor);\n\n  Status ParseReqCommonMsg(const std::shared_ptr<RestfulRequest> &restful_request,\n                           proto::PredictRequest *const request);\n\n  Status ParseInstancesMsg(const json &js_msg, proto::PredictRequest *const request);\n  Status GetInstancesType(const json &instances);\n  Status ParseKeyInstances(const json &instances, proto::PredictRequest *const request);\n  Status PaserKeyOneInstance(const json &instance_msg, proto::PredictRequest *const request);\n\n  Status ParseItemScalar(const json &value, ProtoTensor *const pb_tensor);\n  Status ParseItemArray(const json &value, ProtoTensor *const pb_tensor);\n  Status ParseItemObject(const json &value, ProtoTensor *const pb_tensor);\n  Status ParseItem(const json &value, ProtoTensor *const pb_tensor);\n\n  Status ParseRequest(const std::shared_ptr<RestfulRequest> &restful_request, proto::PredictRequest *const request);\n  Status ParseReply(const proto::PredictReply &reply, json *const out_json);\n  // parse reply:trans RequestReply to http msg\n  RequestType GetReqType(const std::string &str);\n  std::string GetReqTypeStr(RequestType req_type);\n\n  Status CheckReply(const ProtoTensor &pb_tensor);\n  Status ParseInstancesReply(const proto::PredictReply &reply, json *const out_json);\n  Status ParseReplyDetail(const proto::Tensor &tensor, json *const js);\n  Status ParseScalar(const ProtoTensor &pb_tensor, size_t index, json *const js);\n  Status RecursiveParseArray(const ProtoTensor &pb_tensor, size_t depth, size_t pos, json *const out_json);\n\n  template <typename T>\n  Status ParseScalarData(const ProtoTensor &pb_tensor, bool is_bytes, size_t index, json *const js);\n  template <typename T>\n  bool IsString();\n\n  RequestType request_type_{kInvalidType};\n  InstancesType instances_type_{kInvalidWay};\n  int64_t instances_nums_{0};\n  std::vector<std::string> request_type_list_ = {kInstancesRequest};\n  proto::PredictRequest request_;\n  proto::PredictReply reply_;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n#endif  // MINDSPORE_SERVING_MASTER_HTTP_PROCESS_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/restful/restful_request.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"master/restful/restful_request.h\"\n#include <event2/buffer.h>\n#include <event2/http.h>\n#include <evhttp.h>\n#include <algorithm>\n#include <utility>\n\nnamespace {\nconst char kUrlKeyModel[] = \"model\";\nconst char kUrlKeyVersion[] = \"version\";\nconst char kUrlSplit[] = \"/\";\nconst char kUrlKeyEnd[] = \":\";\n}  // namespace\n\nnamespace mindspore {\nnamespace serving {\nDecomposeEvRequest::DecomposeEvRequest(struct evhttp_request *request, int max_msg_size)\n    : event_request_(request), max_msg_size_(max_msg_size) {}\n\nstd::string DecomposeEvRequest::UrlQuery(const std::string &url, const std::string &key) const {\n  std::string::size_type start_pos(0);\n  if (key == kUrlKeyEnd) {\n    if ((start_pos = url_.find(kUrlKeyEnd)) != std::string::npos) {\n      return url_.substr(start_pos + 1, url_.size());\n    }\n  }\n\n  size_t key_size = key.size() + 1;\n  std::string::size_type end_pos(0);\n  if ((start_pos = url.find(key)) != std::string::npos) {\n    end_pos = std::min(url.find(kUrlSplit, start_pos + key_size), url.find(kUrlKeyEnd, start_pos + key_size));\n    if (end_pos == std::string::npos) {\n      return url.substr(start_pos + key_size);\n    }\n    return url.substr(start_pos + key_size, end_pos - start_pos - key_size);\n  }\n  return \"\";\n}\n\nStatus DecomposeEvRequest::GetPostMessageToJson() {\n  Status status(SUCCESS);\n  std::string message;\n  size_t input_size = evbuffer_get_length(event_request_->input_buffer);\n  if (input_size == 0) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"http message invalid\";\n  } else if (input_size > max_msg_size_) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"http message is bigger than \" << max_msg_size_;\n  } else {\n    message.resize(input_size);\n    auto src_data = evbuffer_pullup(event_request_->input_buffer, -1);\n    if (src_data == nullptr) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"get http message failed.\";\n    }\n    if (memcpy_s(message.data(), input_size, src_data, input_size) != EOK) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"copy http message failed.\";\n    }\n  }\n  MSI_TIME_STAMP_START(ParseJson)\n  try {\n    request_message_ = nlohmann::json::parse(message);\n  } catch (nlohmann::json::exception &e) {\n    std::string json_exception = e.what();\n    MSI_LOG_ERROR << \"Illegal JSON format.\" + json_exception;\n    // Remove invalid character that cannot be converted to Json.\n    const std::string find_msg = \"invalid literal\";  // invalid literal; last read: '{invalid character}'\n    auto find_pos = json_exception.find(find_msg);\n    if (find_pos != std::string::npos) {\n      json_exception = json_exception.substr(0, find_pos + find_msg.size());\n    }\n    return INFER_STATUS(INVALID_INPUTS) << \"Illegal JSON format.\" + json_exception;\n  }\n  MSI_TIME_STAMP_END(ParseJson)\n\n  return status;\n}\n\nStatus DecomposeEvRequest::CheckRequestMethodValid() {\n  auto cmd = evhttp_request_get_command(event_request_);\n  if (cmd != EVHTTP_REQ_POST) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"http message only support POST right now\";\n  }\n  request_method_ = \"POST\";\n  return SUCCESS;\n}\n\nStatus DecomposeEvRequest::Decompose() {\n  Status status(SUCCESS);\n  status = CheckRequestMethodValid();\n  if (status != SUCCESS) {\n    return status;\n  }\n\n  status = GetPostMessageToJson();\n  if (status != SUCCESS) {\n    return status;\n  }\n\n  // eg: /model/resnet/version/1:predict\n  url_ = evhttp_request_get_uri(event_request_);\n  if (url_.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"evhttp url is empty.\";\n  }\n  MSI_LOG_INFO << \"url_: \" << url_;\n\n  model_name_ = UrlQuery(url_, kUrlKeyModel);\n  if (model_name_.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"please check url, the keyword:[model] must contain.\";\n  }\n  MSI_LOG_INFO << \"model_name_: \" << model_name_;\n  if (url_.find(kUrlKeyVersion) != std::string::npos) {\n    auto version_str = UrlQuery(url_, kUrlKeyVersion);\n    try {\n      auto version = std::stol(version_str);\n      if (version < 0 || version >= UINT32_MAX) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n               << \"please check url, version number range failed, request version number \" << version_str;\n      }\n      version_ = static_cast<uint32_t>(version);\n    } catch (const std::invalid_argument &) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"please check url, the keyword:[version] value invalid, request version number \" << version_str;\n    } catch (const std::out_of_range &) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"please check url, version number range failed, request version number \" << version_str;\n    }\n    MSI_LOG_INFO << \"version_: \" << version_;\n  }\n\n  service_method_ = UrlQuery(url_, kUrlKeyEnd);\n  if (service_method_.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"please check url, the keyword:[service method] must contain.\";\n  }\n  MSI_LOG_INFO << \"service_method_: \" << service_method_;\n  return status;\n}\n\nRestfulRequest::RestfulRequest(std::shared_ptr<DecomposeEvRequest> request)\n    : decompose_event_request_(std::move(request)) {}\n\nRestfulRequest::~RestfulRequest() {\n  if (replay_buffer_ != nullptr) {\n    evbuffer_free(replay_buffer_);\n    replay_buffer_ = nullptr;\n  }\n}\n\nStatus RestfulRequest::RestfulReplayBufferInit() {\n  replay_buffer_ = evbuffer_new();\n  if (replay_buffer_ == nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"create restful replay buffer fail\";\n  }\n  return SUCCESS;\n}\n\nStatus RestfulRequest::RestfulReplay(const std::string &replay) {\n  if (replay_buffer_ == nullptr) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"replay_buffer_ is nullptr\";\n  }\n  if (decompose_event_request_ == nullptr) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"decompose_event_request_ is nullptr\";\n  }\n  auto &request = decompose_event_request_->event_request_;\n  if (request == nullptr) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"decompose_event_request_->event_request_ is nullptr\";\n  }\n  auto resp_headers = evhttp_request_get_output_headers(request);\n  (void)evhttp_add_header(resp_headers, \"Content-Type\", \"application/json\");\n  (void)evbuffer_add(replay_buffer_, replay.data(), replay.size());\n  evhttp_send_reply(request, HTTP_OK, \"Client\", replay_buffer_);\n  return SUCCESS;\n}\n\nvoid RestfulRequest::ErrorMessage(const Status &status) {\n  std::string out_error_str;\n  try {\n    nlohmann::json error_json = {{\"error_msg\", status.StatusMessage()}};\n    out_error_str = error_json.dump();\n  } catch (nlohmann::json::exception &e) {\n    nlohmann::json error_json = {{\"error_msg\", \"Illegal JSON format.\"}};\n    out_error_str = error_json.dump();\n  }\n  (void)RestfulReplay(out_error_str);\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/restful/restful_request.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_RESTFUL_REQUEST_H\n#define MINDSPORE_SERVING_MASTER_RESTFUL_REQUEST_H\n\n#include <event2/event.h>\n#include <event2/http.h>\n#include <string>\n#include <memory>\n#include <nlohmann/json.hpp>\n#include \"common/serving_common.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass DecomposeEvRequest {\n public:\n  explicit DecomposeEvRequest(struct evhttp_request *request, int max_msg_size);\n  ~DecomposeEvRequest() = default;\n  std::string UrlQuery(const std::string &url, const std::string &key) const;\n  Status CheckRequestMethodValid();\n  Status Decompose();\n  Status GetPostMessageToJson();\n\n  evhttp_request *event_request_;\n  std::string request_method_;\n  std::string model_name_;\n  std::string url_;\n  std::string service_method_;\n  uint32_t version_{};\n  uint32_t max_msg_size_{};\n  nlohmann::json request_message_;\n};\n\nclass RestfulRequest {\n public:\n  explicit RestfulRequest(std::shared_ptr<DecomposeEvRequest> request);\n  ~RestfulRequest();\n\n  RestfulRequest(const RestfulRequest &other) = delete;\n  RestfulRequest &operator=(const RestfulRequest &other) = delete;\n\n  Status RestfulReplayBufferInit();\n  Status RestfulReplay(const std::string &replay);\n  void ErrorMessage(const Status &status);\n  std::shared_ptr<DecomposeEvRequest> decompose_event_request() { return decompose_event_request_; }\n\n private:\n  std::shared_ptr<DecomposeEvRequest> decompose_event_request_{nullptr};\n  evbuffer *replay_buffer_ = nullptr;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_MASTER_RESTFUL_REQUEST_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/restful/restful_server.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n#include <vector>\n#include \"openssl/ssl.h\"\n#include \"openssl/err.h\"\n#include \"event2/bufferevent.h\"\n#include \"event2/http.h\"\n#include \"event2/bufferevent_ssl.h\"\n#include \"master/restful/http_handle.h\"\n#include \"master/restful/restful_server.h\"\n#include \"common/utils.h\"\n#include \"master/restful/http_process.h\"\n\nnamespace mindspore::serving {\nconst std::vector<std::string> kCiphers = {\n  \"ECDHE-RSA-AES128-GCM-SHA256\",   \"ECDHE-ECDSA-AES128-GCM-SHA256\", \"ECDHE-RSA-AES256-GCM-SHA384\",\n  \"ECDHE-ECDSA-AES256-GCM-SHA384\", \"ECDHE-RSA-CHACHA20-POLY1305\",   \"ECDHE-PSK-CHACHA20-POLY1305\",\n  \"ECDHE-ECDSA-AES128-CCM\",        \"ECDHE-ECDSA-AES256-CCM\",        \"ECDHE-ECDSA-CHACHA20-POLY1305\"};\n\nvoid RestfulServer::Committer(const std::shared_ptr<RestfulRequest> &restful_request) {\n  thread_pool_.commit([restful_request]() { RestfulService::RunRestful(restful_request); });\n}\n\nvoid RestfulServer::DispatchEvHttpRequest(evhttp_request *request) {\n  Status status(SUCCESS);\n\n  auto de_request = std::make_unique<DecomposeEvRequest>(request, max_msg_size_);\n  Status de_status = de_request->Decompose();\n  auto restful_request = std::make_shared<RestfulRequest>(std::move(de_request));\n  status = restful_request->RestfulReplayBufferInit();\n  if (status != SUCCESS) {\n    restful_request->ErrorMessage(status);\n    return;\n  }\n\n  if (de_status != SUCCESS) {\n    restful_request->ErrorMessage(de_status);\n    return;\n  }\n  Committer(restful_request);\n}\n\nvoid RestfulServer::EvCallBack(evhttp_request *request, void *arg) {\n  auto *restful_server = static_cast<RestfulServer *>(arg);\n  restful_server->DispatchEvHttpRequest(const_cast<evhttp_request *>(request));\n}\n\nStatus RestfulServer::CreatRestfulServer(int time_out_second) {\n  evthread_use_pthreads();\n  auto status = InitEvHttp();\n  if (status != SUCCESS) {\n    return status;\n  }\n  evhttp_set_gencb(event_http_, &EvCallBack, this);\n  evhttp_set_timeout(event_http_, time_out_second);\n  return SUCCESS;\n}\n\nStatus RestfulServer::CreatHttpsServer(int time_out_second, const SSLConfig &ssl_config) {\n  InitOpenSSL();\n  evthread_use_pthreads();\n\n  Status status;\n  status = InitEvHttp();\n  if (status != SUCCESS) {\n    return status;\n  }\n\n  SSL_CTX *ctx = SSL_CTX_new(SSLv23_method());\n  SSL_CTX_set_options(ctx, SSL_OP_SINGLE_DH_USE | SSL_OP_SINGLE_ECDH_USE | SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 |\n                             SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1);\n  std::string cipher_list = kCiphers[0];\n  for (size_t index = 1; index < kCiphers.size(); ++index) {\n    cipher_list += ':';\n    cipher_list += kCiphers[index];\n  }\n\n  if (!SSL_CTX_set_cipher_list(ctx, cipher_list.c_str())) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"SSL use set cipher list failed!\";\n    return status;\n  }\n  if (ssl_config.verify_client) {\n    SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr);\n    if (!ssl_config.custom_ca.empty() &&\n        SSL_CTX_load_verify_locations(ctx, ssl_config.custom_ca.c_str(), nullptr) != 1) {\n      status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n               << \"Serving Error: load root certificate from \" << ssl_config.custom_ca << \" failed\";\n      return status;\n    } else {\n      if (SSL_CTX_set_default_verify_paths(ctx) != 1) {\n        status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: set default verify paths failed\";\n        return status;\n      }\n    }\n  }\n  EC_KEY *ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);\n  if (ecdh == nullptr) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: EC_KEY_new_by_curve_name failed\";\n    return status;\n  }\n  if (!SSL_CTX_set_tmp_ecdh(ctx, ecdh)) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: SSL_CTX_set_tmp_ecdh failed\";\n    return status;\n  }\n\n  status = ServerSetupCerts(ctx, ssl_config);\n  if (status != SUCCESS) {\n    return status;\n  }\n\n  evhttp_set_bevcb(event_http_, bevcb, ctx);\n  evhttp_set_gencb(event_http_, &EvCallBack, this);\n  evhttp_set_timeout(event_http_, time_out_second);\n  return SUCCESS;\n}\n\nStatus RestfulServer::ServerSetupCerts(SSL_CTX *ctx, const SSLConfig &ssl_config) {\n  Status status;\n  if (SSL_CTX_use_certificate_chain_file(ctx, ssl_config.certificate.c_str()) != 1) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n             << \"Serving Error: load certificate_chain from \" << ssl_config.certificate << \" failed\";\n    return status;\n  }\n  if (SSL_CTX_use_PrivateKey_file(ctx, ssl_config.private_key.c_str(), SSL_FILETYPE_PEM) != 1) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n             << \"Serving Error: load private_key from \" << ssl_config.private_key << \" failed\";\n    return status;\n  }\n  if (SSL_CTX_check_private_key(ctx) != 1) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n             << \"Serving Error: private_key is not consistent with certificate \" << ssl_config.certificate;\n    return status;\n  }\n  return SUCCESS;\n}\n\nstruct bufferevent *RestfulServer::bevcb(struct event_base *base, void *args) {\n  struct bufferevent *r;\n  SSL_CTX *ctx = static_cast<SSL_CTX *>(args);\n  r = bufferevent_openssl_socket_new(base, -1, SSL_new(ctx), BUFFEREVENT_SSL_ACCEPTING, BEV_OPT_CLOSE_ON_FREE);\n  return r;\n}\n\nStatus RestfulServer::InitEvHttp() {\n  event_base_ = event_base_new();\n  Status status(SUCCESS);\n  if (event_base_ == nullptr) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n             << \"Serving Error: RESTful server start failed, new http event failed\";\n    return status;\n  }\n  event_http_ = evhttp_new(event_base_);\n  if (event_http_ == nullptr) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n             << \"Serving Error: RESTful server start failed, create http server failed\";\n    event_base_free(event_base_);\n    event_base_ = nullptr;\n    return status;\n  }\n  return status;\n}\n\nvoid RestfulServer::FreeEvhttp() {\n  if (event_http_ != nullptr) {\n    evhttp_free(event_http_);\n    event_http_ = nullptr;\n  }\n  if (event_base_ != nullptr) {\n    event_base_free(event_base_);\n    event_base_ = nullptr;\n  }\n}\n\nvoid RestfulServer::RunEvhttp() {\n  auto event_http_run = [this]() {\n    MSI_LOG(INFO) << \"Serving RESTful server listening on \" << socket_address_;\n    std::cout << \"Serving: Serving RESTful server start success, listening on \" << socket_address_ << std::endl;\n    event_base_dispatch(event_base_);\n  };\n  event_thread_ = std::thread(event_http_run);\n}\n\nStatus RestfulServer::StartRestfulServer() {\n  Status status(SUCCESS);\n  uint16_t port;\n  std::string ip;\n  status = GetSocketAddress(&ip, &port);\n  if (status != SUCCESS) {\n    return status;\n  }\n  auto ret = evhttp_bind_socket(event_http_, ip.c_str(), port);\n  if (ret != 0) {\n    FreeEvhttp();\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n             << \"Serving Error: RESTful server start failed, bind to the socket address \" << socket_address_\n             << \" failed\";\n    return status;\n  }\n  RunEvhttp();\n  return SUCCESS;\n}\n\nStatus RestfulServer::GetSocketAddress(std::string *ip, uint16_t *port) {\n  MSI_EXCEPTION_IF_NULL(ip);\n  MSI_EXCEPTION_IF_NULL(port);\n  Status status;\n  std::string prefix = \"unix:\";\n  if (socket_address_.substr(0, prefix.size()) == prefix) {\n    status = INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Serving Error: RESTful server does not support binding to unix domain socket\";\n    return status;\n  }\n  status = common::CheckAddress(socket_address_, \"RESTful server\", ip, port);\n  if (status != SUCCESS) {\n    return status;\n  }\n  return SUCCESS;\n}\n\nStatus RestfulServer::Start(const std::string &socket_address, const SSLConfig &ssl_config, int max_msg_size,\n                            int time_out_second) {\n  Status status(SUCCESS);\n  if (in_running_) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: RESTful server is already running\";\n  }\n  socket_address_ = socket_address;\n  constexpr int mbytes_to_bytes = static_cast<int>(1u << 20);\n  max_msg_size_ = max_msg_size * mbytes_to_bytes;\n\n  if (ssl_config.use_ssl) {\n    status = CreatHttpsServer(time_out_second, ssl_config);\n  } else {\n    status = CreatRestfulServer(time_out_second);\n  }\n\n  if (status != SUCCESS) {\n    return status;\n  }\n  status = StartRestfulServer();\n  if (status != SUCCESS) {\n    return status;\n  }\n  in_running_ = true;\n  return status;\n}\n\nvoid RestfulServer::Stop() {\n  if (in_running_) {\n    event_base_loopexit(event_base_, nullptr);\n    event_thread_.join();\n  }\n  in_running_ = false;\n  FreeEvhttp();\n}\n\nvoid RestfulServer::InitOpenSSL() {\n#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || (defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER < 0x20700000L)\n  SSL_library_init();\n  ERR_load_crypto_strings();\n  SSL_load_error_strings();\n  OpenSSL_add_all_algorithms();\n#endif\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/restful/restful_server.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_RESTFUL_SERVER_H\n#define MINDSPORE_SERVING_MASTER_RESTFUL_SERVER_H\n\n#include <event.h>\n#include <event2/event.h>\n#include <event2/http.h>\n#include <event2/listener.h>\n#include <event2/thread.h>\n#include <evhttp.h>\n#include <memory>\n#include <future>\n#include <string>\n#include <utility>\n\n#include \"openssl/ssl.h\"\n#include \"openssl/err.h\"\n#include \"event2/bufferevent.h\"\n\n#include \"master/restful/restful_request.h\"\n#include \"common/serving_common.h\"\n#include \"common/thread_pool.h\"\n#include \"common/ssl_config.h\"\n\nnamespace mindspore::serving {\nconstexpr const uint32_t kDefaultRestfulThreadPoolNum = 3;\n\nclass RestfulServer {\n public:\n  RestfulServer() : thread_pool_(kDefaultRestfulThreadPoolNum) {}\n  ~RestfulServer() { Stop(); }\n\n  Status Start(const std::string &socket_address, const SSLConfig &ssl_config, int max_msg_size, int time_out_second);\n  void Stop();\n\n private:\n  Status CreatRestfulServer(int time_out_second);\n  Status CreatHttpsServer(int time_out_second, const SSLConfig &ssl_config);\n  static void EvCallBack(evhttp_request *request, void *arg);\n  void DispatchEvHttpRequest(evhttp_request *request);\n  void Committer(const std::shared_ptr<RestfulRequest> &restful_request);\n  Status StartRestfulServer();\n  Status GetSocketAddress(std::string *ip, uint16_t *port);\n  static void InitOpenSSL();\n  static Status ServerSetupCerts(SSL_CTX *ctx, const SSLConfig &ssl_config);\n  static struct bufferevent *bevcb(struct event_base *base, void *args);\n  Status InitEvHttp();\n  void FreeEvhttp();\n  void RunEvhttp();\n\n  std::string socket_address_;\n  int max_msg_size_ = 0;\n  bool in_running_ = false;\n\n  struct evhttp *event_http_ = nullptr;\n  struct event_base *event_base_ = nullptr;\n  std::thread event_thread_;\n  ThreadPool thread_pool_;\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_MASTER_RESTFUL_SERVER_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/servable_endpoint.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"master/servable_endpoint.h\"\n\nnamespace mindspore::serving {\nServableEndPoint::ServableEndPoint(const ServableReprInfo &repr) : worker_repr_(repr) {\n  version_number_ = worker_repr_.version_number;\n}\n\nServableEndPoint::~ServableEndPoint() { Clear(); }\n\nStatus ServableEndPoint::DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                                       const PredictOnFinish &on_finish) {\n  auto method_name = request.servable_spec().method_name();\n  auto it = model_thread_list_.find(method_name);\n  if (it == model_thread_list_.end()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Cannot find model thread of method \" << method_name;\n  }\n  auto status = it->second->DispatchAsync(request, reply, on_finish);\n  return status;\n}\n\nStatus ServableEndPoint::RegisterWorker(const ServableRegSpec &servable_spec, std::shared_ptr<WorkerContext> worker) {\n  auto &methods = servable_spec.methods;\n  // first init\n  if (worker_contexts_.empty()) {\n    methods_ = servable_spec.methods;\n    if (version_number_ == 0) {\n      version_number_ = servable_spec.version_number;\n    }\n    for (auto &method : methods) {\n      if (servable_spec.batch_size <= 0) {\n        MSI_LOG_ERROR << \"Register Worker,method batch_size should be greater than 0\";\n        return FAILED;\n      }\n      auto model_thread = std::make_shared<ModelThread>(servable_spec.servable_name, method.name,\n                                                        servable_spec.version_number, servable_spec.batch_size, method);\n      (void)model_thread_list_.emplace(method.name, model_thread);\n    }\n  }\n  worker_contexts_.push_back(worker);\n  std::vector<std::string> method_names;\n  for (auto &method : methods) {\n    auto it = model_thread_list_.find(method.name);\n    if (it == model_thread_list_.end()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Cannot find method \" << method.name << \" registered before\";\n    }\n    it->second->AddWorker(worker->GetWorkerPid(), worker);\n    // cppcheck-suppress useStlAlgorithm\n    method_names.push_back(method.name);\n  }\n  MSI_LOG_INFO << \"Register to servable endpoint success, servable name: \" << worker_repr_.servable_name\n               << \", version number: \" << servable_spec.version_number << \", methods: \" << method_names\n               << \", worker address: \" << worker->GetWorkerAddress();\n  return SUCCESS;\n}\n\nStatus ServableEndPoint::UnregisterWorker(const std::string &worker_address) {\n  auto it = std::find_if(worker_contexts_.begin(), worker_contexts_.end(),\n                         [worker_address](const std::shared_ptr<WorkerContext> &item) {\n                           return item->GetWorkerAddress() == worker_address;\n                         });\n  if (it != worker_contexts_.end()) {\n    auto worker = *it;\n    MSI_LOG_INFO << \"Unregister worker success, \" << worker_repr_.repr << \", version number: \" << version_number_\n                 << \", worker address: \" << worker_address;\n    for (auto &model_thread : model_thread_list_) {\n      model_thread.second->DelWorker(worker->GetWorkerPid());\n    }\n    (void)worker_contexts_.erase(it);\n    return SUCCESS;\n  }\n  MSI_LOG_INFO << \"Worker has already been unregistered, \" << worker_repr_.repr\n               << \", version number: \" << version_number_ << \", worker address: \" << worker_address;\n  return FAILED;\n}\n\nvoid ServableEndPoint::Clear() {\n  worker_contexts_.clear();\n  model_thread_list_.clear();\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/servable_endpoint.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_SERVABLE_ENDPOINT_H\n#define MINDSPORE_SERVING_MASTER_SERVABLE_ENDPOINT_H\n\n#include <memory>\n#include <string>\n#include <unordered_map>\n#include <vector>\n#include <shared_mutex>\n#include <map>\n#include \"common/serving_common.h\"\n#include \"master/worker_context.h\"\n#include \"master/model_thread.h\"\n\nnamespace mindspore::serving {\n// visit by dispatcher\nclass ServableEndPoint {\n public:\n  explicit ServableEndPoint(const ServableReprInfo &repr);\n  ~ServableEndPoint();\n  Status DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                       const PredictOnFinish &on_finish);\n\n  Status RegisterWorker(const ServableRegSpec &servable_spec, std::shared_ptr<WorkerContext> worker);\n  Status UnregisterWorker(const std::string &worker_address);\n  void Clear();\n\n  std::string GetServableName() const { return worker_repr_.servable_name; }\n  uint64_t GetVersionNumber() const { return version_number_; }\n  std::vector<ServableMethodInfo> GetMethods() const { return methods_; }\n\n private:\n  std::map<std::string, std::shared_ptr<ModelThread>> model_thread_list_;\n  ServableReprInfo worker_repr_;\n  std::vector<ServableMethodInfo> methods_;\n  std::vector<std::shared_ptr<WorkerContext>> worker_contexts_;\n  uint32_t version_number_ = 0;\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_MASTER_SERVABLE_ENDPOINT_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/server.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"master/server.h\"\n\n#include <memory>\n#include <string>\n\n#include \"common/serving_common.h\"\n#include \"master/grpc/grpc_process.h\"\n#include \"master/grpc/grpc_server.h\"\n\nnamespace mindspore {\nnamespace serving {\nStatus Server::StartGrpcServer(const std::string &socket_address, const SSLConfig &ssl_config, int max_msg_mb_size) {\n  if (grpc_async_server_) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: Serving gRPC server is already running\";\n  }\n  if (max_msg_mb_size > gRpcMaxMBMsgSize) {\n    MSI_LOG_WARNING << \"The maximum Serving gRPC message size is 512MB and will be updated from \" << max_msg_mb_size\n                    << \"MB to 512MB\";\n    max_msg_mb_size = gRpcMaxMBMsgSize;\n  }\n  grpc_async_server_ = std::make_shared<ServiceGrpcServer>(dispatcher_);\n  return grpc_async_server_->Start(socket_address, ssl_config, max_msg_mb_size, \"Serving gRPC\");\n}\n\nStatus Server::StartGrpcMasterServer(const std::string &master_address) {\n  if (master_async_server_) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: Master gRPC server is already running\";\n  }\n  SSLConfig ssl_config;\n  ssl_config.use_ssl = false;\n  master_async_server_ = std::make_shared<MasterGrpcServer>(dispatcher_);\n  return master_async_server_->Start(master_address, ssl_config, gRpcMaxMBMsgSize, \"Master\");\n}\n\nStatus Server::StartRestfulServer(const std::string &socket_address, const SSLConfig &ssl_config, int max_msg_mb_size,\n                                  int time_out_second) {\n  return restful_server_.Start(socket_address, ssl_config, max_msg_mb_size, time_out_second);\n}\n\nvoid Server::Clear() {\n  MSI_LOG_INFO << \"Server start to clean\";\n  dispatcher_->Clear();\n  restful_server_.Stop();\n  if (master_async_server_) {\n    master_async_server_->Stop();\n    master_async_server_ = nullptr;\n  }\n  if (grpc_async_server_) {\n    grpc_async_server_->Stop();\n    grpc_async_server_ = nullptr;\n  }\n  MSI_LOG_INFO << \"Server end to clean\";\n}\n\nServer::Server() = default;\n\nServer &Server::Instance() {\n  static Server server;\n  return server;\n}\n\nbool Server::OnlyModelStage(const std::string &servable_name) { return dispatcher_->OnlyModelStage(servable_name); }\n\nServer::~Server() = default;\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/server.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_SERVING_MASTER_SERVER_H\n#define MINDSPORE_SERVING_MASTER_SERVER_H\n\n#include <memory>\n#include <string>\n#include \"common/serving_common.h\"\n#include \"common/grpc_server.h\"\n#include \"master/restful/restful_server.h\"\n#include \"master/dispacther.h\"\n#include \"master/grpc/grpc_server.h\"\n#include \"master/grpc/master_server.h\"\n#include \"common/ssl_config.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API Server {\n public:\n  Server();\n  ~Server();\n  Status StartGrpcServer(const std::string &socket_address, const SSLConfig &ssl_config,\n                         int max_msg_mb_size = gRpcDefaultMsgMBSize);\n  Status StartRestfulServer(const std::string &socket_address, const SSLConfig &ssl_config,\n                            int max_msg_mb_size = gRpcDefaultMsgMBSize, int time_out_second = 100);\n  Status StartGrpcMasterServer(const std::string &master_address);\n  void Clear();\n  bool OnlyModelStage(const std::string &servable_name);\n  std::shared_ptr<Dispatcher> GetDispatcher() { return dispatcher_; }\n\n  static Server &Instance();\n\n private:\n  std::shared_ptr<Dispatcher> dispatcher_ = std::make_shared<Dispatcher>();\n  std::shared_ptr<ServiceGrpcServer> grpc_async_server_ = nullptr;\n  std::shared_ptr<MasterGrpcServer> master_async_server_ = nullptr;\n  RestfulServer restful_server_;\n};\n}  // namespace serving\n}  // namespace mindspore\n#endif  // MINDSPORE_SERVING_MASTER_SERVER_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/worker_context.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"master/worker_context.h\"\n#include \"master/servable_endpoint.h\"\n#include \"master/server.h\"\n\nnamespace mindspore::serving {\n// from py\nstd::shared_ptr<WorkerContext> WorkerContext::PyInitWorkerContext(std::string servable_name, uint32_t version_number,\n                                                                  std::string repr, uint64_t worker_pid) {\n  ServableReprInfo servable_repr;\n  servable_repr.servable_name = servable_name;\n  servable_repr.version_number = version_number;\n  servable_repr.repr = repr;\n  return Server::Instance().GetDispatcher()->InitWorkerContext(servable_repr, worker_pid);\n}\n\n// from Dispatcher\nStatus WorkerContext::DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                                    const PredictOnFinish &on_finish) {\n  auto shared_this = shared_from_this();\n  PredictOnFinish callback = [shared_this, on_finish, reply]() {\n    auto &error_msg = reply->error_msg();\n    auto has_error =\n      std::any_of(error_msg.begin(), error_msg.end(), [](const proto::ErrorMsg &msg) { return msg.error_code() != 0; });\n    if (!has_error && reply->instances_size() != 0) {\n      shared_this->normal_handled_count += 1;\n      shared_this->total_normal_handled_count += 1;\n    } else {\n      shared_this->abnormal_handled_count += 1;\n      shared_this->total_abnormal_handled_count += 1;\n    }\n    on_finish();\n  };\n  std::unique_lock<std::mutex> lock(lock_);\n  if (status_ != kWorkerStatusReady && !notify_worker_) {\n    return INFER_STATUS_LOG_ERROR(WORKER_UNAVAILABLE) << \"Worker is not ready\";\n  }\n  request_count += 1;\n  return notify_worker_->DispatchAsync(request, reply, callback);\n}\n\n// from worker\nvoid WorkerContext::OnWorkerRegRequest(const WorkerRegSpec &worker_spec, std::shared_ptr<BaseNotifyWorker> notify) {\n  std::unique_lock<std::mutex> lock(lock_);\n  MSI_LOG_INFO << \"Receive worker registered message, \" << servable_repr_.repr << \", worker pid: \" << worker_pid_\n               << \", worker address: \" << worker_spec.worker_address;\n  worker_spec_ = worker_spec;\n  notify_worker_ = notify;\n}\n\nvoid WorkerContext::OnReady() {\n  std::unique_lock<std::mutex> lock(lock_);\n  MSI_LOG_INFO << \"Notify worker ready, \" << servable_repr_.repr << \", worker pid: \" << worker_pid_\n               << \", worker address: \" << worker_spec_.worker_address;\n  status_ = kWorkerStatusReady;\n}\n\nvoid WorkerContext::OnExit() {\n  std::unique_lock<std::mutex> lock(lock_);\n  MSI_LOG_INFO << \"Notify worker exit, \" << servable_repr_.repr << \", worker pid: \" << worker_pid_\n               << \", worker address: \" << worker_spec_.worker_address;\n  status_ = kWorkerStatusNotifyExit;\n  notify_worker_ = nullptr;\n}\n\nvoid WorkerContext::OnStartError(const std::string &notified_error) {\n  std::unique_lock<std::mutex> lock(lock_);\n  MSI_LOG_ERROR << \"Notify worker start-up error, \" << servable_repr_.repr << \", worker pid: \" << worker_pid_;\n  status_ = kWorkerStatusNotifyFailed;\n  notify_worker_ = nullptr;\n  notified_error_ = notified_error;\n}\n\nvoid WorkerContext::OnNotAvailable() {\n  std::unique_lock<std::mutex> lock(lock_);\n  MSI_LOG_ERROR << \"Notify worker not available, \" << servable_repr_.repr << \", worker pid: \" << worker_pid_;\n  if (status_ != kWorkerStatusNotifyExit && status_ != kWorkerStatusNotAlive) {\n    status_ = kWorkerStatusNotAvailable;\n  }\n  notify_worker_ = nullptr;\n}\n\nvoid WorkerContext::OnNotAlive() {\n  if (HasExitNotified() || HasErrorNotified()) {\n    return;\n  }\n  std::unique_lock<std::mutex> lock(lock_);\n  MSI_LOG_INFO << \"Notify worker not alive, \" << servable_repr_.repr << \", worker pid: \" << worker_pid_\n               << \", worker address: \" << worker_spec_.worker_address;\n  if (status_ != kWorkerStatusNotifyExit) {\n    status_ = kWorkerStatusNotAlive;\n  }\n  notify_worker_ = nullptr;\n}\n\n// from py\nvoid WorkerContext::PyNotifyNotAlive() { Server::Instance().GetDispatcher()->NotifyWorkerNotAlive(this); }\nvoid WorkerContext::PyNotifyStartFailed(const std::string &notified_error) { OnStartError(notified_error); }\nvoid WorkerContext::NotifyNotAvailable() { Server::Instance().GetDispatcher()->NotifyWorkerNotAvailable(this); }\n\nvoid WorkerContext::UpdateWorkerPid(uint64_t new_worker_pid) {\n  std::unique_lock<std::mutex> lock(lock_);\n  MSI_LOG_INFO << \"Update worker pid from \" << worker_pid_ << \" to \" << new_worker_pid;\n  if (status_ != kWorkerStatusReady) {\n    status_ = kWorkerStatusStarting;\n  }\n  worker_pid_ = new_worker_pid;\n  normal_handled_count = 0;\n  abnormal_handled_count = 0;\n}\n\nvoid WorkerContext::Clear() {\n  std::unique_lock<std::mutex> lock(lock_);\n  notify_worker_ = nullptr;\n  status_ = kWorkerStatusNotAlive;\n}\n\nbool WorkerContext::OwnDevice() const { return worker_spec_.servable_spec.own_device; }\n\nvoid WorkerContext::PrintStatus() const {\n  auto repr = servable_repr_.repr;\n  switch (status_) {\n    case kWorkerStatusNotAlive:\n      MSI_LOG_INFO << \"worker \" << GetWorkerPid() << \" status is kWorkerStatusNotAlive, \" << repr;\n      break;\n    case kWorkerStatusStarting:\n      MSI_LOG_INFO << \"worker \" << GetWorkerPid() << \" status is kWorkerStatusStarting, \" << repr;\n      break;\n    case kWorkerStatusReady:\n      MSI_LOG_INFO << \"worker \" << GetWorkerPid() << \" status is kWorkerStatusReady, \" << repr;\n      break;\n    case kWorkerStatusNotifyExit:\n      MSI_LOG_INFO << \"worker \" << GetWorkerPid() << \" status is kWorkerStatusNotifyExit, \" << repr;\n      break;\n    case kWorkerStatusNotifyFailed:\n      MSI_LOG_INFO << \"worker \" << GetWorkerPid() << \" status is kWorkerStatusNotifyFailed, \" << repr;\n      break;\n    case kWorkerStatusNotAvailable:\n      MSI_LOG_INFO << \"worker \" << GetWorkerPid() << \" status is kWorkerStatusNotAvailable, \" << repr;\n      break;\n  }\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/master/worker_context.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_MASTER_WORKER_CONTEXT_H\n#define MINDSPORE_SERVING_MASTER_WORKER_CONTEXT_H\n\n#include <memory>\n#include <string>\n#include <unordered_map>\n#include <vector>\n#include <mutex>\n#include \"proto/ms_worker.grpc.pb.h\"\n#include \"common/serving_common.h\"\n#include \"master/notify_worker/base_notify.h\"\n\nnamespace mindspore::serving {\nclass ServableEndPoint;\n\nenum WorkerStatus {\n  kWorkerStatusNotAlive = 1,\n  kWorkerStatusStarting,\n  kWorkerStatusReady,\n  kWorkerStatusNotifyExit,\n  kWorkerStatusNotifyFailed,\n  kWorkerStatusNotAvailable,\n};\n\nstruct ServableReprInfo {\n  std::string servable_name;\n  uint32_t version_number = 0;\n  std::string repr;\n};\n\nclass MS_API WorkerContext : public std::enable_shared_from_this<WorkerContext> {\n public:\n  WorkerContext() = default;\n  ~WorkerContext() { Clear(); }\n  bool HasErrorNotified() const { return status_ == kWorkerStatusNotifyFailed; }\n  bool HasExitNotified() const { return status_ == kWorkerStatusNotifyExit; }\n  std::string GetNotifiedError() const { return notified_error_; }\n  bool HasReady() const { return status_ == kWorkerStatusReady; }\n  bool IsInStarting() const { return status_ == kWorkerStatusStarting; }\n  bool IsUnavailable() const { return status_ == kWorkerStatusNotAvailable; }\n  void PrintStatus() const;\n  uint64_t GetNormalHandledCount() const { return normal_handled_count; }\n  uint64_t GetWorkerPid() const { return worker_pid_; }\n  WorkerRegSpec GetWorkerSpec() const { return worker_spec_; }\n  ServableReprInfo GetServableReprInfo() const { return servable_repr_; }\n  std::string GetWorkerAddress() const { return worker_spec_.worker_address; }\n\n  void InitServableReprInfo(const ServableReprInfo &repr) { servable_repr_ = repr; }\n  // from py\n  static std::shared_ptr<WorkerContext> PyInitWorkerContext(std::string servable_name, uint32_t version_number,\n                                                            std::string repr, uint64_t worker_pid);\n  void PyNotifyNotAlive();\n  void PyNotifyStartFailed(const std::string &notified_error);\n  void NotifyNotAvailable();\n  void UpdateWorkerPid(uint64_t new_worker_pid);\n  // from Dispatcher\n  Status DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                       const PredictOnFinish &on_finish);\n  // from worker\n  void OnWorkerRegRequest(const WorkerRegSpec &worker_spec, std::shared_ptr<BaseNotifyWorker> notify);\n  void OnReady();\n  void OnExit();\n  void OnStartError(const std::string &notified_error);\n  void OnNotAvailable();\n  // from py\n  void OnNotAlive();\n  void Clear();\n  bool OwnDevice() const;\n\n private:\n  std::mutex lock_;\n  ServableReprInfo servable_repr_;\n  uint32_t device_id_ = 0;\n  uint64_t worker_pid_ = 0;\n\n  // from worker register info\n  WorkerRegSpec worker_spec_;\n  std::shared_ptr<BaseNotifyWorker> notify_worker_ = nullptr;\n  // from python env\n  WorkerStatus status_ = kWorkerStatusNotAlive;\n  std::string notified_error_;\n  std::atomic_uint64_t request_count = 0;\n  std::atomic_uint64_t total_normal_handled_count = 0;\n  std::atomic_uint64_t total_abnormal_handled_count = 0;\n  std::atomic_uint64_t normal_handled_count = 0;\n  std::atomic_uint64_t abnormal_handled_count = 0;\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_MASTER_WORKER_CONTEXT_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/agent/agent_py.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"python/agent/agent_py.h\"\n#include \"common/exit_handle.h\"\n#include \"worker/distributed_worker/agent_startup.h\"\n#include \"worker/distributed_worker/worker_agent.h\"\n\nnamespace mindspore::serving {\nDistributedServableConfig PyAgent::GetAgentsConfigsFromWorker(const std::string &distributed_address) {\n  auto status = WorkerAgentStartUp::Instance().GetAgentsConfigsFromWorker(distributed_address);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n\n  DistributedServableConfig config;\n  status = WorkerAgentStartUp::Instance().GetDistributedServableConfig(&config);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n  return config;\n}\n\nvoid PyAgent::NotifyFailed(const std::string &distributed_address) {\n  WorkerAgentStartUp::Instance().NotifyFailed(distributed_address);\n}\n\nvoid PyAgent::StartAgent(const AgentStartUpConfig &start_config, const std::string &dec_key,\n                         const std::string &dec_mode) {\n  auto status = WorkerAgent::Instance().StartAgent(start_config, dec_key, dec_mode);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\n\nvoid PyAgent::WaitAndClear() {\n  {\n    py::gil_scoped_release release;\n    ExitSignalHandle::Instance().AgentWait();\n  }\n  WorkerAgent::Instance().Clear();\n  MSI_LOG_INFO << \"Python agent end wait and clear\";\n}\n\nvoid PyAgent::StopAndClear() {\n  ExitSignalHandle::Instance().Stop();\n  WorkerAgent::Instance().Clear();\n}\n\nvoid PyAgent::StartupNotifyExit(const std::string &distributed_address, const std::string &agent_ip) {\n  WorkerAgentStartUp::Instance().StartupNotifyExit(distributed_address, agent_ip);\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/agent/agent_py.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVER_AGENT_PY_H\n#define MINDSPORE_SERVER_AGENT_PY_H\n\n#include <pybind11/pybind11.h>\n#include <pybind11/numpy.h>\n#include <pybind11/stl.h>\n#include <string>\n#include <memory>\n#include \"common/serving_common.h\"\n#include \"worker/distributed_worker/common.h\"\n\nnamespace py = pybind11;\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API PyAgent {\n public:\n  static void StartAgent(const AgentStartUpConfig &start_config, const std::string &dec_key,\n                         const std::string &dec_mode);\n\n  static DistributedServableConfig GetAgentsConfigsFromWorker(const std::string &distributed_address);\n  static void WaitAndClear();\n  static void StopAndClear();\n  // from start up, not agent\n  static void NotifyFailed(const std::string &distributed_address);\n  static void StartupNotifyExit(const std::string &distributed_address, const std::string &agent_ip);\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVER_AGENT_PY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/master/master_py.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"python/master/master_py.h\"\n#include \"common/exit_handle.h\"\n#include \"master/server.h\"\n\nnamespace mindspore::serving {\nvoid PyMaster::StartGrpcServer(const std::string &socket_address, const SSLConfig &ssl_config, int max_msg_mb_size) {\n  auto status = Server::Instance().StartGrpcServer(socket_address, ssl_config, max_msg_mb_size);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\n\nvoid PyMaster::StartGrpcMasterServer(const std::string &master_address) {\n  auto status = Server::Instance().StartGrpcMasterServer(master_address);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\n\nvoid PyMaster::StartRestfulServer(const std::string &socket_address, const SSLConfig &ssl_config, int max_msg_mb_size) {\n  auto status = Server::Instance().StartRestfulServer(socket_address, ssl_config, max_msg_mb_size);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\n\nvoid PyMaster::WaitAndClear() {\n  {\n    py::gil_scoped_release release;\n    ExitSignalHandle::Instance().MasterWait();\n  }\n  Server::Instance().Clear();\n  MSI_LOG_INFO << \"Python server end wait and clear\";\n}\n\nvoid PyMaster::StopAndClear() {\n  ExitSignalHandle::Instance().Stop();\n  Server::Instance().Clear();\n}\n\nbool PyMaster::OnlyModelStage(const std::string &servable_name) {\n  return Server::Instance().OnlyModelStage(servable_name);\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/master/master_py.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVER_MASTER_PY_H\n#define MINDSPORE_SERVER_MASTER_PY_H\n\n#include <pybind11/pybind11.h>\n#include <pybind11/numpy.h>\n#include <pybind11/stl.h>\n#include <string>\n#include <memory>\n#include \"common/serving_common.h\"\n#include \"common/ssl_config.h\"\n\nnamespace py = pybind11;\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API PyMaster {\n public:\n  static void StartGrpcServer(const std::string &socket_address, const SSLConfig &ssl_config,\n                              int max_msg_mb_size = 100);\n  static void StartGrpcMasterServer(const std::string &master_address);\n  static void StartRestfulServer(const std::string &socket_address, const SSLConfig &ssl_config,\n                                 int max_msg_mb_size = 100);\n  static void WaitAndClear();\n  static void StopAndClear();\n\n  static bool OnlyModelStage(const std::string &servable_name);\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVER_MASTER_PY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/serving_py.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <string>\n#include \"python/worker/worker_py.h\"\n#include \"python/worker/servable_py.h\"\n#include \"python/tensor_py.h\"\n#include \"common/servable.h\"\n#include \"common/ssl_config.h\"\n#include \"master/server.h\"\n#include \"master/master_context.h\"\n#include \"master/worker_context.h\"\n#include \"worker/context.h\"\n#include \"worker/stage_function.h\"\n#include \"python/master/master_py.h\"\n#include \"python/agent/agent_py.h\"\n#include \"common/exit_handle.h\"\n#include \"worker/distributed_worker/worker_agent.h\"\n\nnamespace mindspore::serving {\nvoid PyRegServable(pybind11::module *m_ptr) {\n  auto &m = *m_ptr;\n  // avoid as numpy object memory copy in PyTensor::AsPythonData\n  py::class_<TensorBase, TensorBasePtr>(m, \"Tensor_\");\n\n  py::class_<PyStageFunctionStorage, std::shared_ptr<PyStageFunctionStorage>>(m, \"StageFunctionStorage_\")\n    .def(py::init<>())\n    .def_static(\"get_instance\", &PyStageFunctionStorage::Instance)\n    .def(\"register\", &PyStageFunctionStorage::Register)\n    .def(\"get_pycpp_function_info\", &PyStageFunctionStorage::GetPyCppFunctionInfo);\n\n  py::class_<MethodSignature>(m, \"MethodSignature_\")\n    .def(py::init<>())\n    .def_readwrite(\"servable_name\", &MethodSignature::servable_name)\n    .def_readwrite(\"method_name\", &MethodSignature::method_name)\n    .def_readwrite(\"inputs\", &MethodSignature::inputs)\n    .def_readwrite(\"outputs\", &MethodSignature::outputs)\n    .def(\"add_stage_function\", &MethodSignature::AddStageFunction)\n    .def(\"add_stage_model\", &MethodSignature::AddStageModel)\n    .def(\"set_return\", &MethodSignature::SetReturn);\n\n  py::class_<RequestSpec>(m, \"RequestSpec_\")\n    .def(py::init<>())\n    .def_readwrite(\"servable_name\", &RequestSpec::servable_name)\n    .def_readwrite(\"version_number\", &RequestSpec::version_number)\n    .def_readwrite(\"method_name\", &RequestSpec::method_name);\n\n  py::class_<CommonModelMeta>(m, \"CommonModelMeta_\")\n    .def(py::init<>())\n    .def_readwrite(\"servable_name\", &CommonModelMeta::servable_name)\n    .def_readwrite(\"model_key\", &CommonModelMeta::model_key)\n    .def_readwrite(\"inputs_count\", &CommonModelMeta::inputs_count)\n    .def_readwrite(\"outputs_count\", &CommonModelMeta::outputs_count)\n    .def_readwrite(\"with_batch_dim\", &CommonModelMeta::with_batch_dim)\n    .def_readwrite(\"without_batch_dim_inputs\", &CommonModelMeta::without_batch_dim_inputs);\n\n  py::class_<LocalModelMeta>(m, \"LocalModelMeta_\")\n    .def(py::init<>())\n    .def_readwrite(\"model_file\", &LocalModelMeta::model_files)\n    .def_readwrite(\"config_file\", &LocalModelMeta::config_file)\n    .def_readwrite(\"model_context\", &LocalModelMeta::model_context)\n    .def(\"set_model_format\", &LocalModelMeta::SetModelFormat);\n\n  py::class_<ModelContext>(m, \"ModelContext_\")\n    .def(py::init<>())\n    .def_readwrite(\"thread_num\", &ModelContext::thread_num)\n    .def_readwrite(\"thread_affinity_core_list\", &ModelContext::thread_affinity_core_list)\n    .def_readwrite(\"enable_parallel\", &ModelContext::enable_parallel)\n    .def_readwrite(\"device_list\", &ModelContext::device_list)\n    .def(\"append_device_info\", &ModelContext::AppendDeviceInfo);\n\n  py::class_<DistributedModelMeta>(m, \"DistributedModelMeta_\")\n    .def(py::init<>())\n    .def_readwrite(\"rank_size\", &DistributedModelMeta::rank_size)\n    .def_readwrite(\"stage_size\", &DistributedModelMeta::stage_size)\n    .def_readwrite(\"enable_pipeline_infer\", &DistributedModelMeta::enable_pipeline_infer);\n\n  py::class_<ModelMeta>(m, \"ModelMeta_\")\n    .def(py::init<>())\n    .def_readwrite(\"common_meta\", &ModelMeta::common_meta)\n    .def_readwrite(\"local_meta\", &ModelMeta::local_meta)\n    .def_readwrite(\"distributed_meta\", &ModelMeta::distributed_meta);\n\n  py::class_<ServableSignature>(m, \"ServableSignature_\")\n    .def(py::init<>())\n    .def_readwrite(\"servable_meta\", &ServableSignature::model_metas)\n    .def_readwrite(\"methods\", &ServableSignature::methods);\n\n  py::class_<PyServableRegister>(m, \"ServableRegister_\")\n    .def_static(\"register_model_input_output_info\", &PyServableRegister::RegisterInputOutputInfo)\n    .def_static(\"register_method\", &PyServableRegister::RegisterMethod)\n    .def_static(\"declare_model\", &PyServableRegister::DeclareModel)\n    .def_static(\"declare_distributed_model\", &PyServableRegister::DeclareDistributedModel)\n    .def_static(\"run\", &PyServableRegister::Run);\n\n  py::class_<OneRankConfig>(m, \"OneRankConfig_\")\n    .def(py::init<>())\n    .def_readwrite(\"device_id\", &OneRankConfig::device_id)\n    .def_readwrite(\"ip\", &OneRankConfig::ip);\n\n  py::class_<DistributedServableConfig>(m, \"DistributedServableConfig_\")\n    .def(py::init<>())\n    .def_readwrite(\"common_meta\", &DistributedServableConfig::common_meta)\n    .def_readwrite(\"distributed_meta\", &DistributedServableConfig::distributed_meta)\n    .def_readwrite(\"rank_table_content\", &DistributedServableConfig::rank_table_content)\n    .def_readwrite(\"rank_list\", &DistributedServableConfig::rank_list);\n}\n\nvoid PyRegMaster(pybind11::module *m_ptr) {\n  auto &m = *m_ptr;\n  py::class_<PyMaster>(m, \"Master_\")\n    .def_static(\"start_grpc_server\", &PyMaster::StartGrpcServer)\n    .def_static(\"start_grpc_master_server\", &PyMaster::StartGrpcMasterServer)\n    .def_static(\"start_restful_server\", &PyMaster::StartRestfulServer)\n    .def_static(\"wait_and_clear\", &PyMaster::WaitAndClear)\n    .def_static(\"stop_and_clear\", &PyMaster::StopAndClear)\n    .def_static(\"only_model_stage\", &PyMaster::OnlyModelStage);\n\n  py::class_<WorkerContext, std::shared_ptr<WorkerContext>>(m, \"WorkerContext_\")\n    .def_static(\"init_worker\", &WorkerContext::PyInitWorkerContext)\n    .def(\"has_error_notified\", &WorkerContext::HasErrorNotified)\n    .def(\"has_exit_notified\", &WorkerContext::HasExitNotified)\n    .def(\"get_notified_error\", &WorkerContext::GetNotifiedError)\n    .def(\"ready\", &WorkerContext::HasReady)\n    .def(\"print_status\", &WorkerContext::PrintStatus)\n    .def(\"is_in_starting\", &WorkerContext::IsInStarting)\n    .def(\"update_worker_pid\", &WorkerContext::UpdateWorkerPid)\n    .def(\"notify_not_alive\", &WorkerContext::PyNotifyNotAlive)\n    .def(\"notify_start_failed\", &WorkerContext::PyNotifyStartFailed)\n    .def_property_readonly(\"is_unavailable\", &WorkerContext::IsUnavailable)\n    .def_property_readonly(\"normal_handled_count\", &WorkerContext::GetNormalHandledCount)\n    .def_property_readonly(\"address\", &WorkerContext::GetWorkerAddress);\n  py::class_<SSLConfig>(m, \"SSLConfig_\")\n    .def(py::init<>())\n    .def_readwrite(\"certificate\", &SSLConfig::certificate)\n    .def_readwrite(\"private_key\", &SSLConfig::private_key)\n    .def_readwrite(\"custom_ca\", &SSLConfig::custom_ca)\n    .def_readwrite(\"verify_client\", &SSLConfig::verify_client)\n    .def_readwrite(\"use_ssl\", &SSLConfig::use_ssl);\n}\n\nvoid PyRegWorker(pybind11::module *m_ptr) {\n  auto &m = *m_ptr;\n  py::class_<TaskItem>(m, \"TaskItem_\")\n    .def(py::init<>())\n    .def_readonly(\"has_stopped\", &TaskItem::has_stopped)\n    .def_property_readonly(\"method_name\", [](const TaskItem &item) { return item.task_info.group_name; })\n    .def_property_readonly(\"stage_index\", [](const TaskItem &item) { return item.task_info.priority; })\n    .def_property_readonly(\"task_name\", [](const TaskItem &item) { return item.task_info.task_name; })\n    .def_property_readonly(\"instance_list\", [](const TaskItem &item) {\n      py::tuple instances(item.instance_list.size());\n      for (size_t i = 0; i < item.instance_list.size(); i++) {\n        instances[i] = PyTensor::AsNumpyTuple(item.instance_list[i]->data);\n      }\n      return instances;\n    });\n\n  py::class_<PyWorker>(m, \"Worker_\")\n    .def_static(\"start_servable\", &PyWorker::StartServable, py::call_guard<py::gil_scoped_release>())\n    .def_static(\"start_distributed_servable\", &PyWorker::StartDistributedServable,\n                py::call_guard<py::gil_scoped_release>())\n    .def_static(\"start_extra_servable\", &PyWorker::StartExtraServable, py::call_guard<py::gil_scoped_release>())\n    .def_static(\"get_declared_model_names\", &PyWorker::GetDeclaredModelNames)\n    .def_static(\"wait_and_clear\", &PyWorker::WaitAndClear)\n    .def_static(\"stop_and_clear\", PyWorker::StopAndClear)\n    .def_static(\"enable_pytask_que\", PyWorker::EnablePyTaskQueue)\n    .def_static(\"get_py_task\", &PyWorker::GetPyTask, py::call_guard<py::gil_scoped_release>())\n    .def_static(\"push_pytask_result\", &PyWorker::PushPyTaskResult)\n    .def_static(\"push_pytask_failed\", &PyWorker::PushPyTaskFailed)\n    .def_static(\"push_pytask_system_failed\", &PyWorker::PushPyTaskSystemFailed)\n    .def_static(\"get_device_type\", &PyWorker::GetDeviceType)\n    .def_static(\"support_reuse_device\", &PyWorker::SupportReuseDevice)\n    .def_static(\"notify_failed\", &PyWorker::NotifyFailed);\n\n  py::class_<ServableContext, std::shared_ptr<ServableContext>>(m, \"ServableContext_\")\n    .def(py::init<>())\n    .def_static(\"get_instance\", &ServableContext::Instance)\n    .def(\"set_device_type_str\",\n         [](ServableContext &context, const std::string &device_type) {\n           auto status = context.SetDeviceTypeStr(device_type);\n           if (status != SUCCESS) {\n             MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n           }\n         })\n    .def(\"set_device_id\", &ServableContext::SetDeviceId)\n    .def(\"set_enable_lite\", &ServableContext::SetEnableLite);\n\n  py::class_<MasterContext, std::shared_ptr<MasterContext>>(m, \"MasterContext_\")\n    .def(py::init<>())\n    .def_static(\"get_instance\", &MasterContext::Instance)\n    .def(\"set_max_enqueued_requests\", &MasterContext::SetMaxEnqueuedRequests);\n}\n\nvoid PyRegWorkerAgent(pybind11::module *m_ptr) {\n  auto &m = *m_ptr;\n  py::class_<PyAgent>(m, \"WorkerAgent_\")\n    .def_static(\"get_agents_config_from_worker\", &PyAgent::GetAgentsConfigsFromWorker)\n    .def_static(\"wait_and_clear\", &PyAgent::WaitAndClear)\n    .def_static(\"stop_and_clear\", &PyAgent::StopAndClear)\n    .def_static(\"notify_failed\", &PyAgent::NotifyFailed)\n    .def_static(\"startup_notify_exit\", &PyAgent::StartupNotifyExit)\n    .def_static(\"start_agent\", &PyAgent::StartAgent);\n\n  py::class_<AgentStartUpConfig>(m, \"AgentStartUpConfig_\")\n    .def(py::init<>())\n    .def_readwrite(\"rank_id\", &AgentStartUpConfig::rank_id)\n    .def_readwrite(\"device_id\", &AgentStartUpConfig::device_id)\n    .def_readwrite(\"model_file_names\", &AgentStartUpConfig::model_file_names)\n    .def_readwrite(\"group_file_names\", &AgentStartUpConfig::group_file_names)\n    .def_readwrite(\"rank_table_json_file_name\", &AgentStartUpConfig::rank_table_json_file_name)\n    .def_readwrite(\"agent_address\", &AgentStartUpConfig::agent_address)\n    .def_readwrite(\"distributed_address\", &AgentStartUpConfig::distributed_address)\n    .def_readwrite(\"common_meta\", &AgentStartUpConfig::common_meta);\n}\n\nclass PyExitSignalHandle {\n public:\n  static void Start() { ExitSignalHandle::Instance().Start(); }\n  static bool HasStopped() { return ExitSignalHandle::Instance().HasStopped(); }\n};\n\n// cppcheck-suppress syntaxError\nPYBIND11_MODULE(_mindspore_serving, m) {\n  PyRegServable(&m);\n  PyRegMaster(&m);\n  PyRegWorker(&m);\n  PyRegWorkerAgent(&m);\n\n  py::class_<PyExitSignalHandle>(m, \"ExitSignalHandle_\")\n    .def_static(\"start\", &PyExitSignalHandle::Start)\n    .def_static(\"has_stopped\", &PyExitSignalHandle::HasStopped);\n\n  (void)py::module::import(\"atexit\").attr(\"register\")(py::cpp_function{[&]() -> void {\n    Server::Instance().Clear();\n    Worker::GetInstance().Clear();\n    WorkerAgent::Instance().Clear();\n  }});\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/tensor_py.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"python/tensor_py.h\"\n#include <pybind11/pytypes.h>\n#include <string>\n#include <vector>\n#include <memory>\n#include <set>\n#include \"mindspore_serving/ccsrc/common/tensor.h\"\n\nnamespace mindspore::serving {\nstatic std::vector<ssize_t> GetStrides(const std::vector<ssize_t> &shape, ssize_t item_size) {\n  std::vector<ssize_t> strides;\n  strides.reserve(shape.size());\n  const auto ndim = shape.size();\n  for (size_t i = 0; i < ndim; ++i) {\n    auto stride = item_size;\n    for (size_t j = i + 1; j < ndim; ++j) {\n      stride *= shape[j];\n    }\n    strides.push_back(stride);\n  }\n  return strides;\n}\n\nDataType NumpyTensor::GetDataType(const py::buffer_info &buf) {\n  std::set<char> fp_format = {'e', 'f', 'd'};\n  std::set<char> int_format = {'b', 'h', 'i', 'l', 'q'};\n  std::set<char> uint_format = {'B', 'H', 'I', 'L', 'Q'};\n  if (buf.format.size() == 1) {\n    char format = buf.format.front();\n    if (fp_format.find(format) != fp_format.end()) {\n      constexpr int size_of_fp16 = 2;\n      constexpr int size_of_fp32 = 4;\n      constexpr int size_of_fp64 = 8;\n      switch (buf.itemsize) {\n        case size_of_fp16:\n          return kMSI_Float16;\n        case size_of_fp32:\n          return kMSI_Float32;\n        case size_of_fp64:\n          return kMSI_Float64;\n      }\n    } else if (int_format.find(format) != int_format.end()) {\n      switch (buf.itemsize) {\n        case sizeof(int8_t):\n          return kMSI_Int8;\n        case sizeof(int16_t):\n          return kMSI_Int16;\n        case sizeof(int32_t):\n          return kMSI_Int32;\n        case sizeof(int64_t):\n          return kMSI_Int64;\n      }\n    } else if (uint_format.find(format) != uint_format.end()) {\n      switch (buf.itemsize) {\n        case sizeof(uint8_t):\n          return kMSI_Uint8;\n        case sizeof(uint16_t):\n          return kMSI_Uint16;\n        case sizeof(uint32_t):\n          return kMSI_Uint32;\n        case sizeof(uint64_t):\n          return kMSI_Uint64;\n      }\n    } else if (format == '?') {\n      return kMSI_Bool;\n    }\n  }\n  MSI_LOG(WARNING) << \"Unsupported DataType format \" << buf.format << \" item size \" << buf.itemsize;\n  return kMSI_Unknown;\n}\n\nstatic std::string GetPyTypeFormat(DataType data_type) {\n  switch (data_type) {\n    case kMSI_Float16:\n      return \"e\";\n    case kMSI_Float32:\n      return py::format_descriptor<float>::format();\n    case kMSI_Float64:\n      return py::format_descriptor<double>::format();\n    case kMSI_Uint8:\n      return py::format_descriptor<uint8_t>::format();\n    case kMSI_Uint16:\n      return py::format_descriptor<uint16_t>::format();\n    case kMSI_Uint32:\n      return py::format_descriptor<uint32_t>::format();\n    case kMSI_Uint64:\n      return py::format_descriptor<uint64_t>::format();\n    case kMSI_Int8:\n      return py::format_descriptor<int8_t>::format();\n    case kMSI_Int16:\n      return py::format_descriptor<int16_t>::format();\n    case kMSI_Int32:\n      return py::format_descriptor<int32_t>::format();\n    case kMSI_Int64:\n      return py::format_descriptor<int64_t>::format();\n    case kMSI_Bool:\n      return py::format_descriptor<bool>::format();\n    default:\n      MSI_LOG(WARNING) << \"Unsupported DataType \" << data_type << \".\";\n      return \"\";\n  }\n}\n\nstatic bool IsCContiguous(const py::array &input) {\n  auto flags = static_cast<unsigned int>(input.flags());\n  return (flags & static_cast<uint32_t>(pybind11::detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_)) != 0;\n}\n\nTensorBasePtr PyTensor::MakeTensor(const py::array &input) {\n  // Get input buffer info.\n  py::buffer_info buf = input.request();\n  // Check data types.\n  auto buf_type = NumpyTensor::GetDataType(buf);\n  if (buf_type == kMSI_Unknown) {\n    MSI_LOG(EXCEPTION) << \"Unsupported tensor type!\";\n  }\n  // Convert input array to C contiguous if need.\n  std::unique_ptr<char[]> tmp_buf;\n  if (!IsCContiguous(input)) {\n    Py_buffer pybuf;\n    if (PyObject_GetBuffer(input.ptr(), &pybuf, PyBUF_ANY_CONTIGUOUS) || pybuf.len < 0) {\n      MSI_LOG(EXCEPTION) << \"Failed to get buffer from the input!\";\n    }\n    tmp_buf = std::make_unique<char[]>(static_cast<size_t>(pybuf.len));\n    if (PyBuffer_ToContiguous(tmp_buf.get(), &pybuf, pybuf.len, 'C')) {\n      MSI_LOG(EXCEPTION) << \"Can't copy numpy.ndarray to a contiguous buffer.\";\n    }\n    PyBuffer_Release(&pybuf);\n    buf.ptr = tmp_buf.get();\n  }\n  // Get tensor shape.\n  std::vector<int64_t> shape(buf.shape.begin(), buf.shape.end());\n  return std::make_shared<Tensor>(buf_type, shape, buf.ptr, buf.size * buf.itemsize);\n}\n\n/// Creates a Tensor from a numpy array without copy\nTensorBasePtr PyTensor::MakeTensorNoCopy(const py::array &input) {\n  // Check format.\n  if (!IsCContiguous(input)) {\n    MSI_LOG(EXCEPTION) << \"Array should be C contiguous.\";\n  }\n  // Get input buffer info.\n  py::buffer_info buf = input.request();\n  // Get tensor dtype and check it.\n  auto dtype = NumpyTensor::GetDataType(buf);\n  if (dtype == kMSI_Unknown) {\n    MSI_LOG(EXCEPTION) << \"Unsupported data type!\";\n  }\n  // Make a tensor with shared data with numpy array.\n  auto tensor_data = std::make_shared<NumpyTensor>(std::move(buf));\n  return tensor_data;\n}\n\npy::object PyTensor::AsPythonData(const TensorBasePtr &tensor, bool copy) {\n  auto data_numpy = std::dynamic_pointer_cast<NumpyTensor>(tensor);\n  if (data_numpy) {\n    return data_numpy->py_array();\n  }\n  if (tensor->is_bytes_val_data()) {\n    if (tensor->bytes_data_size() != 1) {\n      return py::array();\n    }\n    const uint8_t *data = nullptr;\n    size_t bytes_len = 0;\n    tensor->get_bytes_data(0, &data, &bytes_len);\n    if (tensor->data_type() == kMSI_String) {\n      return py::str(reinterpret_cast<const char *>(data), bytes_len);\n    }\n    std::vector<ssize_t> shape{static_cast<ssize_t>(bytes_len)};\n    std::vector<ssize_t> strides = GetStrides(shape, static_cast<ssize_t>(sizeof(uint8_t)));\n    py::buffer_info info(reinterpret_cast<void *>(const_cast<uint8_t *>(data)), sizeof(uint8_t),\n                         py::format_descriptor<uint8_t>::format(), 1, shape, strides);\n    if (!copy) {\n      py::object self = py::cast(tensor);\n      return py::array(py::dtype(info), info.shape, info.strides, info.ptr, self);\n    } else {\n      return py::array(py::dtype(info), info.shape, info.strides, info.ptr);\n    }\n  } else {\n    const auto &tensor_shape = tensor->shape();\n    std::vector<ssize_t> shape(tensor_shape.begin(), tensor_shape.end());\n    std::vector<ssize_t> strides = GetStrides(shape, static_cast<ssize_t>(tensor->itemsize()));\n    py::buffer_info info(reinterpret_cast<void *>(const_cast<uint8_t *>(tensor->data())),\n                         static_cast<ssize_t>(tensor->itemsize()), GetPyTypeFormat(tensor->data_type()),\n                         static_cast<ssize_t>(tensor_shape.size()), shape, strides);\n\n    if (!copy) {\n      py::object self = py::cast(tensor);\n      return py::array(py::dtype(info), info.shape, info.strides, info.ptr, self);\n    } else {\n      return py::array(py::dtype(info), info.shape, info.strides, info.ptr);\n    }\n  }\n}\n\npy::tuple PyTensor::AsNumpyTuple(const InstanceData &instance_data) {\n  py::tuple numpy_inputs_tuple(instance_data.size());\n  for (size_t i = 0; i < instance_data.size(); i++) {  // inputs\n    numpy_inputs_tuple[i] = PyTensor::AsPythonData(instance_data[i], false);\n  }\n  return numpy_inputs_tuple;\n}\n\nInstanceData PyTensor::AsInstanceData(const py::tuple &tuple) {\n  InstanceData instance_data;\n  for (auto &item : tuple) {\n    TensorBasePtr tensor = nullptr;\n    if (py::isinstance<py::bytes>(item)) {  // bytes can be seen as str, so check bytes first\n      tensor = std::make_shared<Tensor>();\n      tensor->set_data_type(serving::kMSI_Bytes);\n      auto val = std::string(item.cast<py::bytes>());\n      tensor->add_bytes_data(reinterpret_cast<const uint8_t *>(val.data()), val.length());\n    } else if (py::isinstance<py::str>(item)) {\n      tensor = std::make_shared<Tensor>();\n      tensor->set_data_type(serving::kMSI_String);\n      auto val = item.cast<std::string>();\n      tensor->add_bytes_data(reinterpret_cast<const uint8_t *>(val.data()), val.length());\n    } else if (py::isinstance<py::bool_>(item)) {\n      auto val = item.cast<bool>();\n      tensor = std::make_shared<Tensor>(serving::kMSI_Bool, std::vector<int64_t>(), &val, sizeof(val));\n    } else if (py::isinstance<py::int_>(item)) {\n      auto val = item.cast<int64_t>();\n      tensor = std::make_shared<Tensor>(serving::kMSI_Int64, std::vector<int64_t>(), &val, sizeof(val));\n    } else if (py::isinstance<py::float_>(item)) {\n      auto val = item.cast<double>();\n      tensor = std::make_shared<Tensor>(serving::kMSI_Float64, std::vector<int64_t>(), &val, sizeof(val));\n    } else {\n      try {\n        tensor = PyTensor::MakeTensorNoCopy(py::cast<py::array>(item));\n      } catch (const std::runtime_error &error) {\n        MSI_LOG_EXCEPTION << \"Get illegal result data with type \" << py::str(item.get_type()).cast<std::string>();\n      }\n    }\n    instance_data.push_back(tensor);\n  }\n  return instance_data;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/tensor_py.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_SERVING_PY_H\n#define MINDSPORE_SERVING_SERVING_PY_H\n\n#include <pybind11/pybind11.h>\n#include <pybind11/numpy.h>\n#include <pybind11/stl.h>\n#include <utility>\n#include <vector>\n#include \"common/serving_common.h\"\n#include \"common/instance.h\"\n\nnamespace py = pybind11;\n\nnamespace mindspore::serving {\nclass NumpyTensor : public TensorBase {\n public:\n  explicit NumpyTensor(py::buffer_info &&buffer) : buffer_(std::move(buffer)) {}\n  ~NumpyTensor() noexcept {\n    py::gil_scoped_acquire acquire;\n    { buffer_ = py::buffer_info(); }\n  }\n  /// py::array object.\n  py::array py_array() const {\n    // Use dummy owner to avoid copy data.\n    py::str dummyOwner;\n    return py::array(py::dtype(buffer_), buffer_.shape, buffer_.strides, buffer_.ptr, dummyOwner);\n  }\n\n  void set_data_type(DataType) override {\n    MSI_LOG_EXCEPTION << \"NumpyTensor is readyonly, cannot invoke set_data_type\";\n  }\n  DataType data_type() const override { return GetDataType(buffer_); }\n\n  void set_shape(const std::vector<int64_t> &) override {\n    MSI_LOG_EXCEPTION << \"NumpyTensor is readyonly, cannot invoke set_shape\";\n  }\n  std::vector<int64_t> shape() const override { return buffer_.shape; }\n\n  const uint8_t *data() const override { return static_cast<const uint8_t *>(buffer_.ptr); }\n  size_t data_size() const override {\n    if (buffer_.size <= 0 || buffer_.itemsize <= 0) {\n      return 0;\n    }\n    return static_cast<size_t>(buffer_.size * buffer_.itemsize);\n  }\n\n  bool resize_data(size_t) override { MSI_LOG_EXCEPTION << \"NumpyTensor is readonly, cannot invoke resize_data\"; }\n  uint8_t *mutable_data() override { MSI_LOG_EXCEPTION << \"NumpyTensor is readonly, cannot invoke mutable_data\"; }\n\n  void clear_bytes_data() override { MSI_LOG_EXCEPTION << \"NumpyTensor is readyonly, cannot invoke clear_bytes_data\"; }\n  void add_bytes_data(const uint8_t *, size_t) override {\n    MSI_LOG_EXCEPTION << \"NumpyTensor is readyonly, cannot invoke add_bytes_data\";\n  }\n\n  size_t bytes_data_size() const override { return 0; }\n  void get_bytes_data(size_t, const uint8_t **, size_t *) const override {\n    MSI_LOG_EXCEPTION << \"NumpyTensor is readyonly, cannot invoke get_bytes_data\";\n  }\n\n  static DataType GetDataType(const py::buffer_info &buf);\n\n private:\n  py::buffer_info buffer_;\n};\n\nclass PyTensor {\n public:\n  // For all type, but for BYTES type, there can only be one item in bytes_val.\n  // If the tensor data is destroyed when the numpy array is return to python env, the tensor data need to be copied\n  static py::object AsPythonData(const TensorBasePtr &tensor, bool copy = false);\n  static TensorBasePtr MakeTensor(const py::array &input);\n  static TensorBasePtr MakeTensorNoCopy(const py::array &input);\n\n  static py::tuple AsNumpyTuple(const InstanceData &instance);\n  static InstanceData AsInstanceData(const py::tuple &tuple);\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_SERVING_PY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/worker/servable_py.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"python/worker/servable_py.h\"\n#include <string>\n#include <sstream>\n#include <vector>\n#include \"worker/servable_register.h\"\n#include \"worker/worker.h\"\n\nnamespace mindspore::serving {\nvoid PyServableRegister::RegisterMethod(const MethodSignature &method) {\n  auto status = ServableRegister::Instance().RegisterMethod(method);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\nvoid PyServableRegister::DeclareModel(const ModelMeta &servable) {\n  auto status = ServableRegister::Instance().DeclareModel(servable);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\nvoid PyServableRegister::DeclareDistributedModel(const ModelMeta &servable) {\n  auto status = ServableRegister::Instance().DeclareDistributedModel(servable);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\nvoid PyServableRegister::RegisterInputOutputInfo(const std::string &model_key, size_t inputs_count,\n                                                 size_t outputs_count, uint64_t subgraph) {\n  auto status = ServableRegister::Instance().RegisterInputOutputInfo(model_key, inputs_count, outputs_count, subgraph);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\n\npy::tuple PyServableRegister::Run(const std::string &model_key, const py::tuple &args, uint64_t subgraph) {\n  std::stringstream model_stream;\n  if (subgraph == 0) {\n    model_stream << \"Model(\" << model_key << \").call()\";\n  } else {\n    model_stream << \"Model(\" << model_key << \", subgraph=\" << subgraph << \").call()\";\n  }\n  const std::string model_str = model_stream.str();\n  RequestSpec request;\n  auto const &signature = ServableRegister::Instance().GetServableSignature();\n  auto model_meta = signature.GetModelDeclare(model_key);\n  if (model_meta == nullptr) {\n    MSI_LOG_EXCEPTION << model_str\n                      << \" failed: the model is not declared, ensure that interface 'declare_model' can take effect \"\n                         \"when importing servable_config.py by the serving server\";\n  }\n  auto &common_meta = model_meta->common_meta;\n  auto input_it = common_meta.inputs_count.find(subgraph);\n  if (input_it == common_meta.inputs_count.end()) {\n    MSI_LOG_EXCEPTION << model_str << \" failed: The model does not have subgraph of index \" << subgraph\n                      << \", the subgraph count of the model is \" << common_meta.inputs_count.size();\n  }\n  auto input_count = input_it->second;\n\n  request.servable_name = ServableRegister::Instance().GetServableSignature().servable_name;\n  request.method_name = ServableRegister::Instance().GetCallModelMethodName(model_key, subgraph);\n\n  std::vector<InstanceData> inputs;\n  auto inputs_args = py::cast<py::tuple>(args);\n  for (size_t i = 0; i < inputs_args.size(); i++) {\n    auto input = PyTensor::AsInstanceData(py::cast<py::tuple>(inputs_args[i]));\n    if (input.size() != input_count) {\n      MSI_LOG_EXCEPTION << model_str << \" failed: The inputs count \" << input.size() << \" of instance \" << i\n                        << \" is not equal to the inputs count \" << input_count << \" of the model\";\n    }\n    inputs.push_back(input);\n  }\n  std::vector<InstancePtr> outs;\n  {\n    py::gil_scoped_release release;\n    auto status = Worker::GetInstance().Run(request, inputs, &outs);\n    if (status != SUCCESS || outs.size() == 0) {\n      MSI_LOG_EXCEPTION << model_str << \" failed: \" << status.StatusMessage();\n    }\n  }\n  py::tuple outputs(outs.size());\n  for (size_t i = 0; i < outs.size(); i++) {\n    auto &out = outs[i];\n    if (out->error_msg != SUCCESS) {\n      MSI_LOG_EXCEPTION << model_str << \" failed: \" << out->error_msg.StatusMessage();\n    }\n    outputs[i] = PyTensor::AsNumpyTuple(out->data);\n  }\n  return outputs;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/worker/servable_py.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_SERVABLE_PY_H\n#define MINDSPORE_SERVING_WORKER_SERVABLE_PY_H\n\n#include <string>\n#include \"common/servable.h\"\n\n#include \"pybind11/pybind11.h\"\n#include \"pybind11/numpy.h\"\n#include \"pybind11/stl.h\"\n#include \"python/tensor_py.h\"\n\nnamespace py = pybind11;\n\nnamespace mindspore::serving {\nclass MS_API PyServableRegister {\n public:\n  static void RegisterMethod(const MethodSignature &method);\n\n  static void DeclareModel(const ModelMeta &servable);\n  static void DeclareDistributedModel(const ModelMeta &servable);\n\n  static void RegisterInputOutputInfo(const std::string &model_key, size_t inputs_count, size_t outputs_count,\n                                      uint64_t subgraph = 0);\n\n  // input args: list<list>, output: tuple<tuple>\n  static py::tuple Run(const std::string &model_key, const py::tuple &args, uint64_t subgraph);\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_WORKER_SERVABLE_PY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/worker/worker_py.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"python/worker/worker_py.h\"\n#include <vector>\n#include <string>\n#include <memory>\n#include <map>\n#include \"common/exit_handle.h\"\n#include \"worker/notfiy_master/grpc_notify.h\"\n#include \"worker/local_servable/local_model_loader.h\"\n#include \"worker/distributed_worker/distributed_model_loader.h\"\n#include \"worker/inference/inference.h\"\n#include \"worker/servable_register.h\"\n#include \"worker/extra_worker/remote_call_model.h\"\n#include \"worker/context.h\"\n\nnamespace mindspore::serving {\nvoid PyWorker::StartServable(const std::string &servable_directory, const std::string &servable_name,\n                             uint32_t version_number, const std::string &master_address,\n                             const std::string &worker_address, const std::string &dec_key,\n                             const std::string &dec_mode) {\n  if (Worker::GetInstance().IsRunning()) {\n    MSI_LOG_EXCEPTION << \"A servable has been started, only one servable can run in a process currently.\";\n  }\n  Worker::GetInstance().StartListeningParentExitThread();\n  const auto &signature = ServableRegister::Instance().GetServableSignature();\n  if (signature.servable_name != servable_name) {\n    MSI_LOG_EXCEPTION << \"Servable '\" << servable_name << \"' has not been registered\";\n  }\n  Status status;\n  std::map<std::string, std::shared_ptr<ModelLoaderBase>> models_loader;\n  Worker::GetInstance().SetContinueListenChildren(true);\n  status =\n    LoadLocalModels(servable_directory, servable_name, version_number, dec_key, dec_mode, signature, &models_loader);\n  Worker::GetInstance().SetContinueListenChildren(false);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n  status = Worker::GetInstance().StartServable(servable_directory, servable_name, version_number, models_loader,\n                                               master_address, worker_address, true);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\n\nStatus PyWorker::LoadLocalModels(const std::string &servable_directory, const std::string &servable_name,\n                                 uint32_t version_number, const std::string &dec_key, const std::string &dec_mode,\n                                 const ServableSignature &signature,\n                                 std::map<std::string, std::shared_ptr<ModelLoaderBase>> *models_loader) {\n  Status status;\n  for (auto &model_meta : signature.model_metas) {\n    auto &model_key = model_meta.common_meta.model_key;\n    auto local_models_loader = std::make_shared<LocalModelLoader>();\n    status =\n      local_models_loader->LoadModel(servable_directory, servable_name, version_number, model_meta, dec_key, dec_mode);\n    if (status != SUCCESS) {\n      local_models_loader->Clear();\n      return status;\n    }\n    status = local_models_loader->AfterLoadModel();\n    if (status != SUCCESS) {\n      local_models_loader->Clear();\n      return status;\n    }\n    (void)models_loader->emplace(model_key, local_models_loader);\n  }\n  return SUCCESS;\n}\n\nvoid PyWorker::StartDistributedServable(const std::string &servable_directory, const std::string &servable_name,\n                                        const std::string &rank_table_json_file, uint32_t version_number,\n                                        const std::string &distributed_address, const std::string &master_address,\n                                        const std::string &worker_address, uint32_t wait_agents_time_in_seconds) {\n  if (Worker::GetInstance().IsRunning()) {\n    MSI_LOG_EXCEPTION << \"A servable has been started, only one servable can run in a process currently.\";\n  }\n  Worker::GetInstance().StartListeningParentExitThread();\n  Status status;\n  auto model_loader = std::make_shared<DistributedModelLoader>();\n  status = Worker::GetInstance().StartDistributedGrpcServer(model_loader, distributed_address);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n\n  status = model_loader->LoadModel(servable_name, rank_table_json_file, wait_agents_time_in_seconds);\n  if (status != SUCCESS) {\n    model_loader->Clear();\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n  status = model_loader->AfterLoadModel();\n  if (status != SUCCESS) {\n    model_loader->Clear();\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n  std::map<std::string, std::shared_ptr<ModelLoaderBase>> models_loader;\n  models_loader[model_loader->GetModelKey()] = model_loader;\n  status = Worker::GetInstance().StartServable(servable_directory, servable_name, version_number, models_loader,\n                                               master_address, worker_address, true);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\n\nvoid PyWorker::StartExtraServable(const std::string &servable_directory, const std::string &servable_name,\n                                  uint32_t version_number, bool device_ids_empty, const std::string &dec_key,\n                                  const std::string &dec_mode, const std::string &master_address,\n                                  const std::string &worker_address) {\n  if (Worker::GetInstance().IsRunning()) {\n    MSI_LOG_EXCEPTION << \"A servable has been started, only one servable can run in a process currently.\";\n  }\n  const auto &signature = ServableRegister::Instance().GetServableSignature();\n  if (signature.servable_name != servable_name) {\n    MSI_LOG_EXCEPTION << \"Servable '\" << servable_name << \"' has not been registered\";\n  }\n  Worker::GetInstance().StartListeningParentExitThread();\n  auto own_device = false;\n  std::map<std::string, std::shared_ptr<ModelLoaderBase>> model_loaders;\n  Status status;\n  if (!signature.model_metas.empty()) {\n    // if device_type is None, device_ids is empty, and there are models declared, Cpu target should be support\n    auto target_device_type = ServableContext::Instance()->GetDeviceType();\n    if (target_device_type == kDeviceTypeNotSpecified && device_ids_empty) {\n      auto support_device_type = InferenceLoader::Instance().GetSupportDeviceType(kDeviceTypeCpu, kUnknownType);\n      if (support_device_type == kDeviceTypeNotSpecified) {\n        MSI_LOG_EXCEPTION\n          << \"Servable '\" << servable_name << \"' has models declared by declare_model, but parameter 'device_ids'\"\n          << \" of ServableStartConfig is not set in Serving startup script when the MindSpore or Lite inference\"\n          << \" package not support CPU\";\n      }\n      target_device_type = kDeviceTypeCpu;\n      ServableContext::Instance()->SetDeviceType(target_device_type);\n    }\n    if (target_device_type == kDeviceTypeCpu) {\n      own_device = true;\n      status = LoadLocalModels(servable_directory, servable_name, version_number, dec_key, dec_mode, signature,\n                               &model_loaders);\n      if (status != SUCCESS) {\n        MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n      }\n    } else {\n      status = RemoteCallModel::InitRemote(servable_name, version_number, master_address, &model_loaders);\n      if (status != SUCCESS) {\n        MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n      }\n    }\n  }\n  status = Worker::GetInstance().StartServable(servable_directory, servable_name, version_number, model_loaders,\n                                               master_address, worker_address, own_device);\n  if (status != SUCCESS) {\n    MSI_LOG_EXCEPTION << \"Raise failed: \" << status.StatusMessage();\n  }\n}\n\nstd::vector<std::string> PyWorker::GetDeclaredModelNames() {\n  std::vector<std::string> model_names;\n  for (auto &model_meta : ServableRegister::Instance().GetServableSignature().model_metas) {\n    // cppcheck-suppress useStlAlgorithm\n    model_names.push_back(model_meta.common_meta.model_key);\n  }\n  return model_names;\n}\n\nbool PyWorker::EnablePyTaskQueue() { return Worker::GetInstance().GetWorkExecutor().GetPyTaskQueue().IsRunning(); }\n\nTaskItem PyWorker::GetPyTask() {\n  TaskItem item;\n  Worker::GetInstance().GetWorkExecutor().GetPyTaskQueue().PyPopTask(&item);\n  return item;\n}\n\nvoid PyWorker::PushPyTaskResult(const py::tuple &instance_outputs) {\n  MSI_TIME_STAMP_START(PushPyTaskResult)\n  std::vector<ResultInstance> outputs;\n  ResultInstance instance;\n  instance.data = PyTensor::AsInstanceData(instance_outputs);\n  outputs.push_back(instance);\n  Worker::GetInstance().GetWorkExecutor().GetPyTaskQueue().PyPushTaskResult(outputs);\n  MSI_TIME_STAMP_END(PushPyTaskResult)\n}\n\nvoid PyWorker::PushPyTaskFailed(int count, const std::string &error_msg) {\n  auto &task_que = Worker::GetInstance().GetWorkExecutor().GetPyTaskQueue();\n  auto task_info = task_que.GetHandledTaskInfo();\n  auto status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                << \"Call \" << task_info.tag << \" Failed, method: '\" << task_info.group_name\n                << \"', stage index(begin with 1): \" << task_info.priority << \", error msg: \" << error_msg;\n  std::vector<ResultInstance> results;\n  for (int i = 0; i < count; i++) {\n    ResultInstance result_instance;\n    result_instance.error_msg = status;\n    results.push_back(result_instance);\n  }\n  task_que.PyPushTaskResult(results);\n}\n\nvoid PyWorker::PushPyTaskSystemFailed(const std::string &error_msg) {\n  auto task_info = Worker::GetInstance().GetWorkExecutor().GetPyTaskQueue().GetHandledTaskInfo();\n  auto status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                << \"Call \" << task_info.tag << \" Failed, method: '\" << task_info.group_name\n                << \"', stage index(begin with 1): \" << task_info.priority << \", error msg: \" << error_msg;\n  Worker::GetInstance().ClearOnSystemFailed(status);\n}\n\nvoid PyWorker::WaitAndClear() {\n  {\n    py::gil_scoped_release release;\n    ExitSignalHandle::Instance().WorkerWait();\n  }\n  Worker::GetInstance().Clear();\n}\n\nvoid PyWorker::StopAndClear() {\n  ExitSignalHandle::Instance().Stop();\n  Worker::GetInstance().Clear();\n}\n\nstd::string PyWorker::GetDeviceType(const std::string &target_device_type, bool enable_lite) {\n  DeviceType target = kDeviceTypeNotSpecified;\n  if (target_device_type == \"cpu\") {\n    target = kDeviceTypeCpu;\n  } else if (target_device_type == \"gpu\") {\n    target = kDeviceTypeGpu;\n  } else if (target_device_type == \"ascend\") {\n    target = kDeviceTypeAscend;\n  }\n  ServableContext::Instance()->SetEnableLite(enable_lite);\n  auto device_type = InferenceLoader::Instance().GetSupportDeviceType(target, kUnknownType);\n  if (device_type == kDeviceTypeAscend) {\n    return \"Ascend\";\n  }\n  if (device_type == kDeviceTypeGpu) {\n    return \"Gpu\";\n  }\n  if (device_type == kDeviceTypeCpu) {\n    return \"Cpu\";\n  }\n  return \"\";\n}\n\nbool PyWorker::SupportReuseDevice() { return InferenceLoader::Instance().SupportReuseDevice(); }\n\nvoid PyWorker::NotifyFailed(const std::string &master_address, const std::string &error_msg) {\n  GrpcNotifyMaster::NotifyFailed(master_address, error_msg);\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/python/worker/worker_py.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_PY_H\n#define MINDSPORE_SERVING_WORKER_PY_H\n\n#include <string>\n#include <vector>\n#include <map>\n#include <memory>\n#include \"common/serving_common.h\"\n#include \"worker/worker.h\"\n#include \"worker/task_queue.h\"\n#include \"python/tensor_py.h\"\n\nnamespace mindspore::serving {\nclass MS_API PyWorker {\n public:\n  static void StartServable(const std::string &model_directory, const std::string &model_name, uint32_t version_number,\n                            const std::string &master_address, const std::string &worker_address,\n                            const std::string &dec_key, const std::string &dec_mode);\n\n  static void StartDistributedServable(const std::string &servable_directory, const std::string &servable_name,\n                                       const std::string &rank_table_json_file, uint32_t version_number,\n                                       const std::string &distributed_address, const std::string &master_address,\n                                       const std::string &worker_address, uint32_t wait_agents_time_in_seconds);\n\n  static void StartExtraServable(const std::string &model_directory, const std::string &model_name,\n                                 uint32_t version_number, bool device_ids_empty, const std::string &dec_key,\n                                 const std::string &dec_mode, const std::string &master_address,\n                                 const std::string &worker_address);\n\n  static std::vector<std::string> GetDeclaredModelNames();\n\n  static void WaitAndClear();\n  static void StopAndClear();\n  static bool EnablePyTaskQueue();\n  static TaskItem GetPyTask();\n\n  static void PushPyTaskResult(const py::tuple &instance_outputs);\n  static void PushPyTaskFailed(int count, const std::string &error_msg);\n  static void PushPyTaskSystemFailed(const std::string &error_msg);\n  static std::string GetDeviceType(const std::string &target_device_type, bool enable_lite);\n  static bool SupportReuseDevice();\n  // for grpc notify failed of worker\n  static void NotifyFailed(const std::string &master_address, const std::string &error_msg);\n\n private:\n  static Status LoadLocalModels(const std::string &servable_directory, const std::string &servable_name,\n                                uint32_t version_number, const std::string &dec_key, const std::string &dec_mode,\n                                const ServableSignature &signature,\n                                std::map<std::string, std::shared_ptr<ModelLoaderBase>> *models_loader);\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_WORKER_PY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/context.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/context.h\"\n\nnamespace mindspore::serving {\nstd::shared_ptr<ServableContext> ServableContext::Instance() {\n  static std::shared_ptr<ServableContext> instance = nullptr;\n  if (instance == nullptr) {\n    instance = std::make_shared<ServableContext>();\n  }\n  return instance;\n}\n\nvoid ServableContext::SetDeviceType(DeviceType device_type) { device_type_ = device_type; }\n\nDeviceType ServableContext::GetDeviceType() const { return device_type_; }\n\nvoid ServableContext::SetDeviceId(uint32_t device_id) { device_id_ = device_id; }\n\nuint32_t ServableContext::GetDeviceId() const { return device_id_; }\n\nStatus ServableContext::SetDeviceTypeStr(const std::string &device_type) {\n  DeviceType type;\n  std::string device_type_lowcase = device_type;\n  for (auto &c : device_type_lowcase) {\n    // cppcheck-suppress useStlAlgorithm\n    if (c >= 'A' && c <= 'Z') {\n      c = c - 'A' + 'a';\n    }\n  }\n  if (device_type_lowcase == \"ascend\" || device_type_lowcase == \"davinci\") {\n    type = kDeviceTypeAscend;\n  } else if (device_type_lowcase == \"gpu\") {\n    type = kDeviceTypeGpu;\n  } else if (device_type_lowcase == \"cpu\") {\n    type = kDeviceTypeCpu;\n  } else if (device_type_lowcase == \"none\") {\n    type = kDeviceTypeNotSpecified;\n  } else {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Unsupported device type '\" << device_type\n                                          << \"', only support 'Ascend', 'GPU', 'CPU' and None, case ignored\";\n  }\n  SetDeviceType(type);\n  return SUCCESS;\n}\n\nvoid ServableContext::SetEnableLite(bool enable_lite) { enable_lite_ = enable_lite; }\n\nbool ServableContext::EnableLite() const { return enable_lite_; }\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/context.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_CONTEXT_H\n#define MINDSPORE_SERVING_WORKER_CONTEXT_H\n\n#include <string>\n#include <memory>\n#include <vector>\n#include \"common/serving_common.h\"\n#include \"worker/inference/inference.h\"\n\nnamespace mindspore::serving {\nclass MS_API ServableContext {\n public:\n  static std::shared_ptr<ServableContext> Instance();\n\n  Status SetDeviceTypeStr(const std::string &device_type);\n\n  void SetDeviceType(DeviceType device_type);\n  DeviceType GetDeviceType() const;\n  void SetDeviceId(uint32_t device_id);\n  uint32_t GetDeviceId() const;\n\n  void SetEnableLite(bool enable_lite);\n  bool EnableLite() const;\n\n private:\n  DeviceType device_type_ = kDeviceTypeNotSpecified;\n  uint32_t device_id_ = 0;\n  bool enable_lite_ = false;\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_WORKER_CONTEXT_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/agent_process/agent_process.cc",
    "content": "/**\r\n * Copyright 2021 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"worker/distributed_worker/agent_process/agent_process.h\"\r\n#include \"worker/distributed_worker/worker_agent.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\ngrpc::Status MSAgentImpl::Exit(grpc::ServerContext *context, const proto::DistributedExitRequest *request,\r\n                               proto::DistributedExitReply *reply) {\r\n  MSI_LOG(INFO) << \"Distributed Worker Exit\";\r\n  WorkerAgent::Instance().StopAgent(false);\r\n  return grpc::Status::OK;\r\n}\r\n\r\ngrpc::Status MSAgentImpl::Predict(grpc::ServerContext *context, const proto::DistributedPredictRequest *request,\r\n                                  proto::DistributedPredictReply *reply) {\r\n  MSI_LOG(INFO) << \"Begin call service Eval\";\r\n  WorkerAgent::Instance().Run(*request, reply);\r\n  MSI_LOG(INFO) << \"End call service Eval\";\r\n  return grpc::Status::OK;\r\n}\r\ngrpc::Status MSAgentImpl::Ping(grpc::ServerContext *context, const proto::PingRequest *request,\r\n                               proto::PingReply *reply) {\r\n  MSI_EXCEPTION_IF_NULL(request);\r\n  MSI_EXCEPTION_IF_NULL(reply);\r\n  watcher_->RecvPing(request->address());\r\n  return grpc::Status::OK;\r\n}\r\n\r\ngrpc::Status MSAgentImpl::Pong(grpc::ServerContext *context, const proto::PongRequest *request,\r\n                               proto::PongReply *reply) {\r\n  MSI_EXCEPTION_IF_NULL(request);\r\n  MSI_EXCEPTION_IF_NULL(reply);\r\n  watcher_->RecvPong(request->address());\r\n  return grpc::Status::OK;\r\n}\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/agent_process/agent_process.h",
    "content": "/**\r\n * Copyright 2021 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_WORKER_AGENT_PROCESS_H\r\n#define MINDSPORE_SERVING_WORKER_AGENT_PROCESS_H\r\n\r\n#include <grpcpp/grpcpp.h>\r\n#include <grpcpp/health_check_service_interface.h>\r\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\r\n#include <memory>\r\n#include <string>\r\n#include \"common/serving_common.h\"\r\n#include \"common/heart_beat.h\"\r\n#include \"proto/ms_agent.pb.h\"\r\n#include \"proto/ms_agent.grpc.pb.h\"\r\n#include \"proto/ms_worker.pb.h\"\r\n#include \"proto/ms_worker.grpc.pb.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\n// Service Implement\r\nclass MSAgentImpl final : public proto::MSAgent::Service {\r\n public:\r\n  explicit MSAgentImpl(const std::string server_address) {\r\n    if (!watcher_) {\r\n      watcher_ = std::make_shared<Watcher<proto::MSDistributedWorker, proto::MSDistributedWorker>>(server_address);\r\n    }\r\n  }\r\n  grpc::Status Predict(grpc::ServerContext *context, const proto::DistributedPredictRequest *request,\r\n                       proto::DistributedPredictReply *reply) override;\r\n  grpc::Status Exit(grpc::ServerContext *context, const proto::DistributedExitRequest *request,\r\n                    proto::DistributedExitReply *reply) override;\r\n  grpc::Status Ping(grpc::ServerContext *context, const proto::PingRequest *request, proto::PingReply *reply) override;\r\n  grpc::Status Pong(grpc::ServerContext *context, const proto::PongRequest *request, proto::PongReply *reply) override;\r\n\r\n private:\r\n  std::shared_ptr<Watcher<proto::MSDistributedWorker, proto::MSDistributedWorker>> watcher_;\r\n};\r\n\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n\r\n#endif  // MINDSPORE_SERVING_WORKER_AGENT_PROCESS_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/agent_startup.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"worker/distributed_worker/agent_startup.h\"\n#include <fstream>\n#include \"worker/distributed_worker/notify_distributed/notify_worker.h\"\n#include \"common/grpc_server.h\"\n\nnamespace mindspore {\nnamespace serving {\nWorkerAgentStartUp &WorkerAgentStartUp::Instance() {\n  static WorkerAgentStartUp instance;\n  return instance;\n}\n\nStatus WorkerAgentStartUp::GetAgentsConfigsFromWorker(const std::string &distributed_address) {\n  return GrpcNotifyDistributeWorker::GetAgentsConfigsFromWorker(distributed_address, &config_);\n}\n\nStatus WorkerAgentStartUp::GetDistributedServableConfig(DistributedServableConfig *config) {\n  MSI_EXCEPTION_IF_NULL(config);\n  if (config_.rank_list.empty()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Rank table config is not ready\";\n  }\n  *config = config_;\n  return SUCCESS;\n}\n\nStatus WorkerAgentStartUp::NotifyFailed(const std::string &distributed_address) {\n  return GrpcNotifyDistributeWorker::NotifyFailed(distributed_address);\n}\n\nvoid WorkerAgentStartUp::StartupNotifyExit(const std::string &distributed_address, const std::string &agent_ip) {\n  GrpcNotifyDistributeWorker::StartupNotifyExit(distributed_address, agent_ip);\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/agent_startup.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_AGENT_STARTUP_H\n#define MINDSPORE_SERVING_WORKER_AGENT_STARTUP_H\n#include <vector>\n#include <string>\n#include \"common/serving_common.h\"\n#include \"worker/distributed_worker/common.h\"\n#include \"worker/inference/inference.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API WorkerAgentStartUp {\n public:\n  static WorkerAgentStartUp &Instance();\n  // from python, worker_agent.py\n  // start_worker_agent\n  // step1, get agents config from worker\n  Status GetAgentsConfigsFromWorker(const std::string &distributed_address);\n  // step2, invoke from python\n  Status GetDistributedServableConfig(DistributedServableConfig *config);\n\n  Status NotifyFailed(const std::string &distributed_address);\n  void StartupNotifyExit(const std::string &distributed_address, const std::string &agent_ip);\n\n private:\n  DistributedServableConfig config_;\n  std::string worker_address_;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WORKER_AGENT_STARTUP_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/common.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_DISTRIBUTED_WORKER_COMMON_H\n#define MINDSPORE_SERVING_DISTRIBUTED_WORKER_COMMON_H\n\n#include <vector>\n#include <string>\n#include <map>\n#include \"common/serving_common.h\"\n#include \"worker/inference/inference.h\"\n#include \"common/servable.h\"\n\nnamespace mindspore {\nnamespace serving {\nstruct OneRankConfig {\n  std::string ip;\n  uint32_t device_id = 0;\n};\n\nstruct DistributedServableConfig {\n  std::string rank_table_content;\n  std::vector<OneRankConfig> rank_list;\n\n  CommonModelMeta common_meta;\n  DistributedModelMeta distributed_meta;\n};\n\nstruct AgentStartUpConfig {\n  uint32_t rank_id;\n  uint32_t device_id;\n  std::vector<std::string> model_file_names;\n  std::vector<std::string> group_file_names;\n  std::string rank_table_json_file_name;\n\n  std::string agent_address;\n  std::string distributed_address;\n  uint32_t worker_port;\n\n  CommonModelMeta common_meta;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_DISTRIBUTED_WORKER_COMMON_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/distributed_model_loader.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/distributed_worker/distributed_model_loader.h\"\n#include <vector>\n#include <string>\n#include <set>\n#include \"worker/distributed_worker/notify_agent/notify_agent.h\"\n#include \"worker/worker.h\"\n#include \"common/exit_handle.h\"\n#include \"common/proto_tensor.h\"\n#include \"worker/servable_register.h\"\n\nnamespace mindspore {\nnamespace serving {\nstruct DistributedPredictMsg {\n  proto::DistributedPredictReply reply;\n  std::promise<void> promise = std::promise<void>();\n  Status status = FAILED;\n  std::future<void> future = promise.get_future();\n};\n\nDistributedModelLoader::~DistributedModelLoader() { Clear(); }\n\nuint64_t DistributedModelLoader::GetGraphNum() const { return graph_num_; }\n\nStatus DistributedModelLoader::Predict(const std::vector<TensorBasePtr> &input, std::vector<TensorBasePtr> *output,\n                                       uint64_t subgraph) {\n  Status status(SUCCESS);\n  if (config_.distributed_meta.enable_pipeline_infer) {\n    std::shared_lock<std::shared_mutex> lock{rw_mutex_};\n    status = PredictInner(input, output, subgraph);\n  } else {\n    std::unique_lock<std::shared_mutex> lock{rw_mutex_};\n    status = PredictInner(input, output, subgraph);\n  }\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Predict error happened, now exit distributed servable\";\n    Worker::GetInstance().StopServable();\n  }\n  return status;\n}\n\nStatus DistributedModelLoader::PredictInner(const std::vector<TensorBasePtr> &input, std::vector<TensorBasePtr> *output,\n                                            uint64_t subgraph) {\n  MSI_EXCEPTION_IF_NULL(output);\n  if (!model_loaded_) {\n    MSI_LOG_EXCEPTION << \"Model has not been loaded\";\n  }\n\n  proto::DistributedPredictRequest request;\n  proto::DistributedPredictRequest empty_request;\n  request.set_subgraph(subgraph);\n  request.set_subgraph(subgraph);\n  for (const auto &tensor_ptr : input) {\n    auto tensor = request.add_inputs();\n    ProtoTensor proto_tensor(tensor);\n    proto_tensor.assign(*tensor_ptr);\n  }\n\n  auto rank_size = config_.distributed_meta.rank_size;\n  auto stage_size = config_.distributed_meta.stage_size;\n  if (rank_size != agent_spec_map_.size()) {\n    MSI_LOG_EXCEPTION << \"agent_spec_map_ size \" << agent_spec_map_.size() << \" not match rank size \" << rank_size;\n  }\n  auto agent_num_per_stage = rank_size / stage_size;\n  auto result_agent_id = rank_size - 1;\n\n  auto msg_list = std::make_shared<std::vector<DistributedPredictMsg>>(rank_size);\n  request.set_return_result(false);\n  empty_request.set_return_result(false);\n\n  std::unique_lock<std::mutex> wait_lock(wait_mutex_);\n  for (size_t i = 0; i < rank_size; ++i) {\n    AsyncPredictCallback callback = [msg_list, i](const Status &status) {\n      msg_list->at(i).status = status;\n      msg_list->at(i).promise.set_value();\n    };\n    if (i < agent_num_per_stage || all_stage_has_input_) {\n      if (i == result_agent_id) {\n        request.set_return_result(true);\n      }\n      agent_spec_map_[i].notify_agent_->DispatchAsync(request, &msg_list->at(i).reply, callback);\n    } else {\n      if (i == result_agent_id) {\n        empty_request.set_return_result(true);\n      }\n      agent_spec_map_[i].notify_agent_->DispatchAsync(empty_request, &msg_list->at(i).reply, callback);\n    }\n  }\n  wait_lock.unlock();\n\n  for (size_t rank_id = 0; rank_id < msg_list->size(); ++rank_id) {\n    auto &predict_msg = msg_list->at(rank_id);\n    auto &future = predict_msg.future;\n    const uint64_t kWaitMaxHundredMs = 10 * 10;  // waiting for 10s\n    uint64_t k;\n    for (k = 0; k < kWaitMaxHundredMs; k++) {\n      if (ExitSignalHandle::Instance().HasStopped()) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"Worker has stopped\";\n      }\n      // waiting for 100ms\n      if (future.wait_for(std::chrono::milliseconds(100)) == std::future_status::ready) {\n        break;\n      }\n    }\n    if (k >= kWaitMaxHundredMs) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Failed to wait for result of rank \" << rank_id;\n    }\n    auto status = predict_msg.status;\n    if (status != SUCCESS) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Error happened on get result of rank \" << rank_id << \": \" << status.StatusMessage();\n    }\n    auto &reply = predict_msg.reply;\n    if (reply.has_error_msg() && reply.error_msg().error_code() != 0) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Error happened on get result of rank \" << rank_id << \": \" << reply.error_msg().error_msg();\n    }\n  }\n\n  auto &reply = msg_list->at(result_agent_id).reply;\n  for (int i = 0; i < reply.outputs_size(); ++i) {\n    auto p = std::make_shared<ProtoTensor>(reply.mutable_outputs(i));\n    auto tensor_ptr = std::make_shared<Tensor>(p->data_type(), p->shape(), p->data(), p->data_size());\n    output->push_back(tensor_ptr);\n  }\n  return SUCCESS;\n}\n\nstd::vector<TensorInfo> DistributedModelLoader::GetInputInfos(uint64_t subgraph) const {\n  if (!model_loaded_) {\n    MSI_LOG_EXCEPTION << \"Model has not been loaded\";\n  }\n  auto iter = input_infos_.find(subgraph);\n  if (iter != input_infos_.end()) {\n    return iter->second;\n  }\n  MSI_LOG_EXCEPTION << \"subgraph: \" << subgraph << \" is not existed\";\n  return {};\n}\n\nstd::vector<TensorInfo> DistributedModelLoader::GetOutputInfos(uint64_t subgraph) const {\n  if (!model_loaded_) {\n    MSI_LOG_EXCEPTION << \"Model has not been loaded\";\n  }\n  auto iter = output_infos_.find(subgraph);\n  if (iter != output_infos_.end()) {\n    return iter->second;\n  }\n  MSI_LOG_EXCEPTION << \"subgraph: \" << subgraph << \" is not existed\";\n  return {};\n}\n\nuint64_t DistributedModelLoader::GetBatchSize() const {\n  if (!model_loaded_) {\n    MSI_LOG_EXCEPTION << \"Model has not been loaded\";\n  }\n  return batch_size_;\n}\n\nStatus DistributedModelLoader::GetDistributedServableConfig(DistributedServableConfig *config) const {\n  if (!config_loaded_) {\n    return INFER_STATUS(FAILED) << \"Config not loaded\";\n  }\n  *config = config_;\n  return SUCCESS;\n}\n\nvoid DistributedModelLoader::SetWaitAgentsPromise(bool flag) {\n  if (!promise_set_flag_.test_and_set()) {\n    agents_promise_.set_value(flag);\n    registered_end_flag_ = true;\n  }\n}\n\nStatus DistributedModelLoader::RegisterAgent(const std::vector<WorkerAgentSpec> &agent_specs) {\n  std::unique_lock<std::shared_mutex> lock{rw_mutex_};\n  if (registered_end_flag_) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Distributed servable has ended up registration\";\n  }\n  if (agent_specs.empty()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"The number of graph cannot be 0\";\n  }\n  if (agent_specs[0].rank_id >= config_.distributed_meta.rank_size) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Invalid rank id \" << agent_specs[0].rank_id << \", rank size \" << config_.distributed_meta.rank_size;\n  }\n  DistributedAgentContext context;\n  auto it = agent_spec_map_.find(agent_specs[0].rank_id);\n  if (it != agent_spec_map_.end()) {\n    MSI_LOG_WARNING << \"rank_id \" << agent_specs[0].rank_id << \" has been registered\";\n    return SUCCESS;\n  }\n  context.agent_spec_ = agent_specs;\n  std::shared_ptr<BaseNotifyAgent> notify_agent = std::make_shared<GrpcNotifyAgent>(agent_specs[0].agent_address);\n  context.notify_agent_ = notify_agent;\n  agent_spec_map_[agent_specs[0].rank_id] = context;\n  MSI_LOG_INFO << \"Rank \" << agent_specs[0].rank_id << \" been registered\";\n\n  if (agent_spec_map_.size() >= config_.distributed_meta.rank_size) {\n    SetWaitAgentsPromise(true);\n  }\n  return SUCCESS;\n}\n\nvoid DistributedModelLoader::Clear() {\n  std::unique_lock<std::shared_mutex> lock{rw_mutex_};\n  for (auto &agent : agent_spec_map_) {\n    agent.second.notify_agent_->Exit();\n  }\n  agent_spec_map_.clear();\n  model_loaded_ = false;\n  MSI_LOG_INFO << \"End clear distributed servable\";\n}\n\nStatus DistributedModelLoader::OnAgentExit() {\n  std::unique_lock<std::shared_mutex> lock{rw_mutex_};\n  MSI_LOG_INFO << \"Worker agent notify exit\";\n  SetWaitAgentsPromise(false);\n  model_loaded_ = false;\n  return SUCCESS;\n}\n\nStatus DistributedModelLoader::LoadModel(const std::string &servable_name, const std::string &rank_table_json_file,\n                                         uint64_t wait_agents_time_in_seconds) {\n  if (model_loaded_) {\n    MSI_LOG_EXCEPTION << \"Model has loaded\";\n  }\n  rank_table_json_file_ = rank_table_json_file;\n  const ServableSignature &signature = ServableRegister::Instance().GetServableSignature();\n  if (signature.servable_name != servable_name) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Servable '\" << servable_name << \"' has not been registered\";\n  }\n  if (signature.model_metas.size() != 1) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Distributed servable '\" << servable_name\n                                          << \"' has not been declared or has been declared more than once, \"\n                                          << \"declared number: \" << signature.model_metas.size();\n  }\n  if (signature.servable_type != kServableTypeDistributed) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Servable '\" << servable_name << \"' is not registered as distributed servable\";\n  }\n  auto &meta = signature.model_metas[0];\n  model_key_ = meta.common_meta.model_key;\n  config_.common_meta = meta.common_meta;\n  config_.distributed_meta = meta.distributed_meta;\n\n  auto status = InitConfigOnStartup(rank_table_json_file_);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Init with rank table on start up failed\";\n    return status;\n  }\n  status = CheckRankConfig();\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Check rank config failed\";\n    return status;\n  }\n  config_loaded_ = true;\n  status = WaitAgentsReady(wait_agents_time_in_seconds);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Waiting for ready of agents failed\";\n    return status;\n  }\n  status = CheckAgentsInfosAndInitTensorInfos();\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Check agents infos failed\";\n    return status;\n  }\n  model_loaded_ = true;\n  return SUCCESS;\n}\n\nstd::string RealPath(const std::string &path) {\n  // Return absolute path when path is accessible\n  std::string res;\n  char resolved_path[PATH_MAX] = {0};\n  if (realpath(path.c_str(), resolved_path) != nullptr) {\n    res = resolved_path;\n  }\n  return res;\n}\n\nStatus DistributedModelLoader::InitConfigOnStartup(const std::string &rank_table_json_file) {\n  std::string rank_table_json_abs_path = RealPath(rank_table_json_file);\n  if (rank_table_json_abs_path.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"failed to get realpath of：\" << rank_table_json_file.c_str();\n  }\n\n  MSI_LOG(INFO) << \"Begin to parser rank table json file: \" << rank_table_json_file.c_str();\n  std::ifstream json_file(rank_table_json_abs_path);\n  if (!json_file.is_open()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"failed to open rank table file：\" << rank_table_json_file.c_str();\n  }\n  std::stringstream buffer;\n  buffer << json_file.rdbuf();\n  config_.rank_table_content = buffer.str();\n\n  json rank_table_json;\n  try {\n    rank_table_json = nlohmann::json::parse(config_.rank_table_content);\n  } catch (json::parse_error &e) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"parse error:\" << e.what();\n  } catch (json::out_of_range &e) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"out of range:\" << e.what();\n  } catch (json::exception &e) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Json exception:\" << e.what();\n  }\n\n  if (!rank_table_json.is_object()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << rank_table_json_file.c_str() << \" is not json object\";\n  }\n\n  if (rank_table_json.find(\"group_list\") != rank_table_json.end()) {\n    return ParserRankTableWithGroupList(rank_table_json_file, rank_table_json);\n  } else {\n    return ParserRankTableWithServerList(rank_table_json_file, rank_table_json);\n  }\n}\n\njson DistributedModelLoader::ParserArrayInJson(const json &json_array, const std::string &str) {\n  json temp_array;\n  auto iter = json_array.find(str);\n  if (iter == json_array.end()) {\n    MSI_LOG_ERROR << \"Check rank table file failed\" << str << \"in file is not find\";\n    return temp_array;\n  }\n  if (!iter->is_array()) {\n    MSI_LOG_ERROR << \"Check rank table file failed\" << str << \"in file is not array\";\n    return temp_array;\n  }\n  temp_array = json_array.at(str);\n  return temp_array;\n}\n\nstd::string DistributedModelLoader::ParserStringInJson(const json &json_str, const std::string &str) {\n  std::string temp_str;\n  auto iter = json_str.find(str);\n  if (iter == json_str.end()) {\n    MSI_LOG_ERROR << \"Check rank table file failed\" << str << \"in file is not find\";\n    return temp_str;\n  }\n  if (!iter->is_string()) {\n    MSI_LOG_ERROR << \"Check rank table file failed\" << str << \"in file is not string\";\n    return temp_str;\n  }\n  json temp_json_str = json_str.at(str);\n  temp_str = temp_json_str.get<std::string>();\n  return temp_str;\n}\n\nStatus DistributedModelLoader::ParserRankTableWithGroupList(const std::string &rank_table_json_file,\n                                                            const json &rank_table_json) {\n  MSI_LOG_INFO << \"Begin to parser rank table with group list\";\n  auto server_list = ParserArrayInJson(rank_table_json, \"group_list\");\n  if (server_list.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"group_list attr is empty in\" << rank_table_json_file.c_str();\n  }\n\n  size_t rank_id = 0;\n  for (auto &server : server_list) {\n    auto instance_list = ParserArrayInJson(server, \"instance_list\");\n    if (instance_list.empty()) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"instance_list attr is empty in\" << rank_table_json_file.c_str();\n    }\n\n    for (auto &instance : instance_list) {\n      auto str_server_id = ParserStringInJson(instance, \"server_id\");\n      if (str_server_id.empty()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"server_id attr is empty in\" << rank_table_json_file.c_str();\n      }\n\n      OneRankConfig one_rank_config;\n      one_rank_config.ip = str_server_id;\n\n      auto devices = ParserArrayInJson(instance, \"devices\");\n      if (devices.empty()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"devices attr is empty in\" << rank_table_json_file.c_str();\n      }\n\n      auto str_device_id = ParserStringInJson(devices.at(0), \"device_id\");\n      if (str_device_id.empty()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"device_id attr is empty in\" << rank_table_json_file.c_str();\n      }\n      uint32_t temp_device_id;\n      auto status = ConvertStr2Int(rank_table_json_file, str_device_id, \"device_id\", &temp_device_id);\n      if (status != SUCCESS) {\n        MSI_LOG_ERROR << \"Convert device_id from string to int failed\";\n        return status;\n      }\n\n      auto str_rank_id = ParserStringInJson(instance, \"rank_id\");\n      if (str_rank_id.empty()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"rank_id attr is empty in\" << rank_table_json_file.c_str();\n      }\n      uint32_t temp_rank_id;\n      status = ConvertStr2Int(rank_table_json_file, str_rank_id, \"rank_id\", &temp_rank_id);\n      if (status != SUCCESS) {\n        MSI_LOG_ERROR << \"Convert rank_id from string to int failed\";\n        return status;\n      }\n\n      if (rank_id != temp_rank_id) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n               << \"device size not match rank_id in\" << rank_table_json_file.c_str();\n      }\n      rank_id++;\n      one_rank_config.device_id = temp_device_id;\n      config_.rank_list.push_back(one_rank_config);\n    }\n  }\n  MSI_LOG(INFO) << \"Success parser rank table json file with group list and save to DistributedServableConfig\";\n\n  return SUCCESS;\n}\nStatus DistributedModelLoader::ConvertStr2Int(const std::string &rank_table_json_file, const std::string &para_str,\n                                              const std::string &para_key, uint32_t *para_int) const {\n  uint32_t parsed_value = 0;\n  constexpr uint32_t decimal_times = 10;\n  for (auto c : para_str) {\n    if (c < '0' || c > '9') {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << para_key << \"attr is invalid argument in\" << rank_table_json_file.c_str();\n    }\n    parsed_value = parsed_value * decimal_times + c - '0';\n  }\n  if (std::to_string(parsed_value) != para_str) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n           << para_key << \"attr is invalid argument in\" << rank_table_json_file.c_str();\n  }\n  *para_int = parsed_value;\n  return SUCCESS;\n}\n\nStatus DistributedModelLoader::ParserRankTableWithServerList(const std::string &rank_table_json_file,\n                                                             const json &rank_table_json) {\n  MSI_LOG_INFO << \"Begin to parser rank table with server list\";\n  auto server_list = ParserArrayInJson(rank_table_json, \"server_list\");\n  if (server_list.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"server_list attr is empty in\" << rank_table_json_file.c_str();\n  }\n\n  size_t rank_id = 0;\n  for (auto &server : server_list) {\n    auto server_id = ParserStringInJson(server, \"server_id\");\n    if (server_id.empty()) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"server_id attr is empty in\" << rank_table_json_file.c_str();\n    }\n\n    auto device_list = ParserArrayInJson(server, \"device\");\n    if (device_list.empty()) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"device attr is empty in\" << rank_table_json_file.c_str();\n    }\n\n    for (auto &device : device_list) {\n      OneRankConfig one_rank_config;\n      one_rank_config.ip = server_id;\n      auto str_device_id = ParserStringInJson(device, \"device_id\");\n      if (str_device_id.empty()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"device_id attr is empty in\" << rank_table_json_file.c_str();\n      }\n      uint32_t temp_device_id;\n      auto status = ConvertStr2Int(rank_table_json_file, str_device_id, \"device_id\", &temp_device_id);\n      if (status != SUCCESS) {\n        MSI_LOG_ERROR << \"Convert device_id from string to int failed\";\n        return status;\n      }\n\n      auto str_rank_id = ParserStringInJson(device, \"rank_id\");\n      if (str_rank_id.empty()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"rank_id attr is empty in\" << rank_table_json_file.c_str();\n      }\n      uint32_t temp_rank_id;\n      status = ConvertStr2Int(rank_table_json_file, str_rank_id, \"rank_id\", &temp_rank_id);\n      if (status != SUCCESS) {\n        MSI_LOG_ERROR << \"Convert rank_id from string to int failed\";\n        return status;\n      }\n\n      if (rank_id != temp_rank_id) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n               << \"device size not match rank_id in\" << rank_table_json_file.c_str();\n      }\n      rank_id++;\n      one_rank_config.device_id = temp_device_id;\n      config_.rank_list.push_back(one_rank_config);\n    }\n  }\n  MSI_LOG(INFO) << \"Success parser rank table json file with server list and save to DistributedServableConfig\";\n\n  return SUCCESS;\n}\n\nStatus DistributedModelLoader::WaitAgentsReady(uint64_t wait_agents_time_in_seconds) {\n  MSI_LOG_INFO << \"Begin waiting ready of all agents\";\n  auto future = agents_promise_.get_future();\n  if (wait_agents_time_in_seconds == 0) {\n    wait_agents_time_in_seconds = UINT32_MAX;\n  }\n  const uint64_t kWaitMaxHundredMs = wait_agents_time_in_seconds * 10;\n  uint64_t i;\n  for (i = 0; i < kWaitMaxHundredMs; i++) {  //\n    if (ExitSignalHandle::Instance().HasStopped()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Worker or Agents has stopped\";\n    }\n    // waiting for 100ms\n    if (future.wait_for(std::chrono::milliseconds(100)) == std::future_status::ready) {\n      auto flag = future.get();\n      if (!flag) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"Failed to starting all agents, maybe some error reported\";\n      }\n      break;\n    }\n  }\n  if (i >= kWaitMaxHundredMs) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Failed to wait for ready of all agents, current agents count: \" << agent_spec_map_.size()\n           << \", rank size: \" << config_.distributed_meta.rank_size;\n  }\n  MSI_LOG_INFO << \"Success waiting ready of all agents\";\n  return SUCCESS;\n}\n\nStatus DistributedModelLoader::CompareTensorInfos(const std::vector<TensorInfo> &lefts,\n                                                  const std::vector<TensorInfo> &rights) {\n  if (lefts.size() != rights.size()) {\n    return INFER_STATUS(FAILED) << \"Size not match, left: \" << lefts.size() << \", right: \" << rights.size();\n  }\n  auto tensor_info_as_str = [](const TensorInfo &tensor_info) {\n    Status status = INFER_STATUS(SUCCESS) << \"size: \" << tensor_info.size << \", data type: \" << tensor_info.data_type\n                                          << \", shape: \" << tensor_info.shape;\n    return status.StatusMessage();\n  };\n  for (size_t k = 0; k < lefts.size(); k++) {\n    auto &left = lefts[k];\n    auto &right = rights[k];\n    if (left.size != right.size || left.shape != right.shape || left.data_type != right.data_type) {\n      return INFER_STATUS(FAILED) << \"Index \" << k << \" tensor not match, left- \" << tensor_info_as_str(left)\n                                  << \"; right- \" << tensor_info_as_str(right);\n    }\n  }\n  return SUCCESS;\n}\n\nStatus DistributedModelLoader::CheckAgentsInfosAndInitTensorInfos() {\n  auto rank_size = config_.distributed_meta.rank_size;\n  auto stage_size = config_.distributed_meta.stage_size;\n  auto parallel_count = rank_size / stage_size;\n  MSI_LOG_INFO << \"Check agents infos, rank size :\" << rank_size << \", stage size: \" << stage_size\n               << \", parallel count(rank size/stage size): \" << parallel_count;\n  if (agent_spec_map_.size() != rank_size) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Registered agents size \" << agent_spec_map_.size() << \" not match rank size \" << rank_size;\n  }\n  graph_num_ = agent_spec_map_[0].agent_spec_.size();\n  for (size_t i = 1; i < rank_size; i++) {\n    if (graph_num_ != agent_spec_map_[i].agent_spec_.size()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"The number of graph not match in different agent\";\n    }\n  }\n  batch_size_ = agent_spec_map_[0].agent_spec_[0].batch_size;\n  for (size_t subgraph = 0; subgraph < agent_spec_map_[0].agent_spec_.size(); subgraph++) {\n    input_infos_[subgraph] = agent_spec_map_[0].agent_spec_[subgraph].input_infos;\n    output_infos_[subgraph] = agent_spec_map_[rank_size - 1].agent_spec_[subgraph].output_infos;\n    if (input_infos_[subgraph].empty()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Rank \" << 0 << \" input count cannot be 0\";\n    }\n    if (output_infos_[subgraph].empty()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Rank \" << rank_size - 1 << \" output count cannot be 0\";\n    }\n    const auto &input_infos = input_infos_[subgraph];\n    Status status;\n    for (size_t i = 0; i < parallel_count; i++) {\n      auto &agent_spec = agent_spec_map_[i].agent_spec_[subgraph];\n      status = CompareTensorInfos(agent_spec.input_infos, input_infos);\n      if (status != SUCCESS) {\n        status =\n          INFER_STATUS_LOG_ERROR(FAILED)\n          << \"Rank \" << i << \" input infos not match rank 0, subgraph: \" << subgraph\n          << \", you can check if the actual stage size of the distributed model matches the stage size declared in \"\n             \"servable_config.py, details: \"\n          << status.StatusMessage();\n        return status;\n      }\n    }\n    for (size_t i = parallel_count; i < rank_size; i++) {\n      auto &agent_spec = agent_spec_map_[i].agent_spec_[subgraph];\n      if (agent_spec.input_infos.empty()) {\n        if (all_stage_has_input_) {\n          return INFER_STATUS_LOG_ERROR(FAILED)\n                 << \"Expect stage 0(other stages have empty inputs) or all stages have same inputs, detect rank \"\n                 << (i - 1) << \" input count is \" << agent_spec.input_infos.size() << \", but rank \" << i\n                 << \" input count is 0, subgraph: \" << subgraph;\n        }\n        continue;\n      }\n      status = CompareTensorInfos(agent_spec.input_infos, input_infos);\n      if (status != SUCCESS) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"Expect stage 0(other stages have empty inputs) or all stages have same inputs, detect rank \" << i - 1\n               << \" and rank \" << i << \" inputs are different, subgraph: \" << subgraph\n               << \", details: \" << status.StatusMessage();\n      }\n      all_stage_has_input_ = true;\n    }\n    for (size_t i = 0; i < rank_size; i += parallel_count) {\n      const auto &first_item = agent_spec_map_[i].agent_spec_[subgraph];\n      for (size_t k = 0; k < parallel_count && i + k < rank_size; k++) {\n        auto rank_id = i + k;\n        const auto &agent_spec = agent_spec_map_[i + k].agent_spec_[subgraph];\n        status = CompareTensorInfos(agent_spec.output_infos, first_item.output_infos);\n        if (status != SUCCESS) {\n          status = INFER_STATUS_LOG_ERROR(FAILED)\n                   << \"Rank \" << rank_id << \" output infos not match rank \" << i << \", subgraph: \" << subgraph\n                   << \", details: \" << status.StatusMessage();\n          return status;\n        }\n        if (agent_spec.batch_size != 0 && agent_spec.batch_size != batch_size_) {\n          return INFER_STATUS_LOG_ERROR(FAILED)\n                 << \"Expect rank \" << rank_id << \" batch size  \" << agent_spec.batch_size\n                 << \" equal to 0 or rank 0's batch size \" << batch_size_ << \", subgraph: \" << subgraph;\n        }\n      }\n    }\n  }\n  return SUCCESS;\n}\n\nStatus DistributedModelLoader::CheckRankConfig() {\n  auto rank_size = config_.distributed_meta.rank_size;\n  auto stage_size = config_.distributed_meta.stage_size;\n  if (stage_size == 0 || rank_size == 0) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Rank size or stage size cannot be 0, rank size: \" << rank_size << \", stage size: \" << stage_size;\n  }\n  if (rank_size % stage_size != 0) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Rank size must be an integral multiple of stage size, rank size: \" << rank_size\n           << \", stage size: \" << stage_size;\n  }\n  if (config_.rank_list.size() != rank_size) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Rank size \" << config_.rank_list.size() << \" declared in rank table file '\" << rank_table_json_file_\n           << \"' not equal to \" << rank_size << \" declared in servable config\";\n  }\n  auto parallel_count = rank_size / stage_size;\n  constexpr size_t card_count_per_machine = 8;\n  if (stage_size == 1) {\n    std::map<std::string, std::set<uint32_t>> device_map;\n    for (size_t i = 0; i < rank_size; i++) {\n      const auto &item = config_.rank_list[i];\n      auto &device_id_list = device_map[item.ip];\n      if (device_id_list.count(item.device_id) > 0) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"Check rank table config failed, device id repeatedly used by rank \"\n                                              << i << \" in device ip \" << item.ip;\n      }\n      if (item.device_id >= card_count_per_machine) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"Check rank table config failed, device id cannot larger than 8\";\n      }\n      (void)device_id_list.emplace(item.device_id);\n    }\n  } else {\n    if (rank_size < card_count_per_machine) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Rank size \" << rank_size << \"must >= card count \" << card_count_per_machine\n             << \" of one machine when stage size \" << stage_size << \" > 1\";\n    }\n    for (size_t i = 0; i < rank_size; i += card_count_per_machine) {\n      const auto &first_item = config_.rank_list[i];\n      for (size_t k = 0; i + k < rank_size && k < card_count_per_machine; k++) {\n        auto rank_id = i + k;\n        const auto &item = config_.rank_list[rank_id];\n        if (k != item.device_id) {\n          return INFER_STATUS_LOG_ERROR(FAILED)\n                 << \"Check rank table config failed, expected device id of rank \" << rank_id << \" to be \" << k;\n        }\n        if (first_item.ip != item.ip) {\n          return INFER_STATUS_LOG_ERROR(FAILED)\n                 << \"Check rank table config failed, expected device ip \" << item.ip << \" of rank \" << rank_id\n                 << \" to be equal with device ip \" << first_item.ip << \" of rank \" << i;\n        }\n      }\n    }\n  }\n  MSI_LOG_INFO << \"Check rank table success, rank size: \" << rank_size << \", stage size: \" << stage_size\n               << \", parallel count in one stage: \" << parallel_count;\n  return SUCCESS;\n}\n\nvoid DistributedModelLoader::OnAgentFailed() {\n  MSI_LOG_INFO << \"Worker agent notify failed\";\n  SetWaitAgentsPromise(false);\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/distributed_model_loader.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_DISTRIBUTED_SERVABLE_H\n#define MINDSPORE_SERVING_WORKER_DISTRIBUTED_SERVABLE_H\n\n#include <fstream>\n#include <vector>\n#include <string>\n#include <map>\n#include <memory>\n#include <shared_mutex>\n#include <nlohmann/json.hpp>\n#include \"mindspore_serving/ccsrc/worker/model_loader_base.h\"\n#include \"worker/distributed_worker/common.h\"\n#include \"worker/distributed_worker/notify_agent/base_notify_agent.h\"\n\nusing nlohmann::json;\nnamespace mindspore {\nnamespace serving {\nstruct DistributedAgentContext {\n  std::vector<WorkerAgentSpec> agent_spec_;\n  std::shared_ptr<BaseNotifyAgent> notify_agent_ = nullptr;\n};\n\nclass MS_API DistributedModelLoader final : public DirectModelLoaderBase {\n public:\n  DistributedModelLoader() = default;\n  ~DistributedModelLoader();\n  // from python, worker.py\n  Status LoadModel(const std::string &servable_name, const std::string &rank_table_json_file,\n                   uint64_t wait_agents_time_in_seconds);\n\n  // invoke from agent\n  Status GetDistributedServableConfig(DistributedServableConfig *config) const;\n  // send model and group\n\n  // register and unregister agent, agent_spec_list_\n  Status RegisterAgent(const std::vector<WorkerAgentSpec> &agent_specs);\n  Status OnAgentExit();\n\n  // predict, use config_ and agent_spec_list_\n  Status Predict(const std::vector<TensorBasePtr> &input, std::vector<TensorBasePtr> *output,\n                 uint64_t subgraph = 0) override;\n\n  std::vector<TensorInfo> GetInputInfos(uint64_t subgraph = 0) const override;\n  std::vector<TensorInfo> GetOutputInfos(uint64_t subgraph = 0) const override;\n  uint64_t GetBatchSize() const override;\n  void Clear() override;\n  void OnAgentFailed();\n  uint64_t GetGraphNum() const override;\n\n  std::string GetModelKey() const { return model_key_; }\n\n private:\n  DistributedServableConfig config_;\n  std::atomic_bool config_loaded_ = false;\n\n  std::string model_key_;\n  std::atomic_bool model_loaded_ = false;\n  uint64_t graph_num_ = 0;\n  std::shared_mutex rw_mutex_;\n  std::mutex wait_mutex_;\n  std::map<uint32_t, DistributedAgentContext> agent_spec_map_;\n  std::string rank_table_json_file_;\n\n  std::map<uint64_t, std::vector<TensorInfo>> input_infos_;\n  std::map<uint64_t, std::vector<TensorInfo>> output_infos_;\n  uint64_t batch_size_;\n  bool all_stage_has_input_ = false;\n\n  std::atomic_flag promise_set_flag_ = ATOMIC_FLAG_INIT;\n  std::atomic_bool registered_end_flag_ = false;\n  std::promise<bool> agents_promise_;\n\n  Status InitConfigOnStartup(const std::string &rank_table_json_file);\n  Status WaitAgentsReady(uint64_t wait_agents_time_in_seconds);\n  Status CheckAgentsInfosAndInitTensorInfos();\n  Status CompareTensorInfos(const std::vector<TensorInfo> &lefts, const std::vector<TensorInfo> &rights);\n  Status CheckRankConfig();\n  void SetWaitAgentsPromise(bool flag);\n  Status PredictInner(const std::vector<TensorBasePtr> &input, std::vector<TensorBasePtr> *output,\n                      uint64_t subgraph = 0);\n  // agent stubs\n  Status ParserRankTableWithGroupList(const std::string &rank_table_json_file, const json &rank_table_json);\n\n  Status ParserRankTableWithServerList(const std::string &rank_table_json_file, const json &rank_table_json);\n\n  json ParserArrayInJson(const json &json_array, const std::string &str);\n\n  std::string ParserStringInJson(const json &json_str, const std::string &str);\n  Status ConvertStr2Int(const std::string &rank_table_json_file, const std::string &para_str,\n                        const std::string &para_key, uint32_t *para_int) const;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WORKER_DISTRIBUTED_SERVABLE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/distributed_process/distributed_process.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/distributed_worker/distributed_process/distributed_process.h\"\n#include <vector>\n#include \"worker/worker.h\"\n#include \"common/proto_tensor.h\"\n\nnamespace mindspore {\nnamespace serving {\ngrpc::Status MSDistributedImpl::AgentRegister(grpc::ServerContext *context, const proto::AgentRegisterRequest *request,\n                                              proto::AgentRegisterReply *reply) {\n  MSI_EXCEPTION_IF_NULL(request);\n  MSI_EXCEPTION_IF_NULL(reply);\n  std::vector<WorkerAgentSpec> agent_specs;\n  for (auto &spec : request->agent_spec()) {\n    WorkerAgentSpec agent_spec;\n    agent_spec.agent_address = request->address();\n    GrpcTensorHelper::CopyFromAgentSpec(spec, &agent_spec);\n    agent_specs.push_back(agent_spec);\n  }\n  if (agent_specs.size() == 0) {\n    MSI_LOG(ERROR) << \"Agent Register FAILED, agent_specs size is 0\";\n  }\n  Status status(FAILED);\n  status = servable_->RegisterAgent(agent_specs);\n  if (status != SUCCESS) {\n    MSI_LOG(ERROR) << \"Agent Register FAILED\";\n  }\n  watcher_->StartWatch(request->address());\n  return grpc::Status::OK;\n}\n\ngrpc::Status MSDistributedImpl::AgentExit(grpc::ServerContext *context, const proto::AgentExitRequest *request,\n                                          proto::AgentExitReply *reply) {\n  MSI_EXCEPTION_IF_NULL(request);\n  MSI_EXCEPTION_IF_NULL(reply);\n  if (request->address_choice_case() == proto::AgentExitRequest::kAddress) {\n    watcher_->StopWatch(request->address());\n  }\n  MSI_LOG_INFO << \"Agent exit, address: '\" << request->address() << \"', agent ip: '\" << request->agent_ip() << \"'\";\n  servable_->OnAgentExit();\n  Worker::GetInstance().StopServable();\n  return grpc::Status::OK;\n}\n\ngrpc::Status MSDistributedImpl::AgentFailed(grpc::ServerContext *context, const proto::AgentFailedRequest *request,\n                                            proto::AgentFailedReply *reply) {\n  if (Worker::GetInstance().IsRunning()) {\n    MSI_LOG_ERROR << \"Expect worker should not be running\";\n    Worker::GetInstance().StopServable();\n  } else {\n    servable_->OnAgentFailed();\n  }\n  return grpc::Status::OK;\n}\n\ngrpc::Status MSDistributedImpl::AgentConfigAcquire(grpc::ServerContext *context,\n                                                   const proto::AgentConfigAcquireRequest *request,\n                                                   proto::AgentConfigAcquireReply *reply) {\n  Status status(FAILED);\n  DistributedServableConfig agent_config;\n  status = servable_->GetDistributedServableConfig(&agent_config);\n  if (status != SUCCESS) {\n    MSI_LOG(ERROR) << \"Get distributed servable config failed\";\n    return grpc::Status::CANCELLED;\n  }\n\n  MSI_LOG(INFO) << \"Begin to set DistributedServableConfig info in reply message\";\n  // set reply message:AgentConfigAcquireReply, parameter:rank_table_content\n  reply->set_rank_table_content(agent_config.rank_table_content);\n  // set reply message:AgentConfigAcquireReply, parameter:rank_list\n  auto &agent_rank_list = agent_config.rank_list;\n  for (auto &agent_rank : agent_rank_list) {\n    auto rank_list = reply->add_rank_list();\n    rank_list->set_ip(agent_rank.ip);\n    rank_list->set_device_id(agent_rank.device_id);\n  }\n  // set reply message:AgentConfigAcquireReply, parameter:common_meta\n  auto reply_common_meta = reply->mutable_common_meta();\n  reply_common_meta->set_servable_name(agent_config.common_meta.servable_name);\n  reply_common_meta->set_model_key(agent_config.common_meta.model_key);\n  reply_common_meta->set_with_batch_dim(agent_config.common_meta.with_batch_dim);\n  auto &without_batch_dim_inputs_list = agent_config.common_meta.without_batch_dim_inputs;\n  for (auto &without_batch_dim_input : without_batch_dim_inputs_list) {\n    reply_common_meta->add_without_batch_dim_inputs(without_batch_dim_input);\n  }\n  auto &proto_input_count = *(reply_common_meta->mutable_inputs_count());\n  for (auto &inputs_count : agent_config.common_meta.inputs_count) {\n    proto_input_count[inputs_count.first] = inputs_count.second;\n  }\n  auto &proto_output_count = *(reply_common_meta->mutable_outputs_count());\n  for (auto &outputs_count : agent_config.common_meta.outputs_count) {\n    proto_output_count[outputs_count.first] = outputs_count.second;\n  }\n\n  // set reply message:AgentConfigAcquireReply, parameter:distributed_meta\n  auto reply_distributed_meta = reply->mutable_distributed_meta();\n  reply_distributed_meta->set_rank_size(agent_config.distributed_meta.rank_size);\n  reply_distributed_meta->set_stage_size(agent_config.distributed_meta.stage_size);\n  MSI_LOG(INFO) << \"Success to set DistributedServableConfig info in reply message\";\n\n  return grpc::Status::OK;\n}\n\ngrpc::Status MSDistributedImpl::Ping(grpc::ServerContext *context, const proto::PingRequest *request,\n                                     proto::PingReply *reply) {\n  MSI_EXCEPTION_IF_NULL(request);\n  MSI_EXCEPTION_IF_NULL(reply);\n  watcher_->RecvPing(request->address());\n  return grpc::Status::OK;\n}\n\ngrpc::Status MSDistributedImpl::Pong(grpc::ServerContext *context, const proto::PongRequest *request,\n                                     proto::PongReply *reply) {\n  MSI_EXCEPTION_IF_NULL(request);\n  MSI_EXCEPTION_IF_NULL(reply);\n  watcher_->RecvPong(request->address());\n  return grpc::Status::OK;\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/distributed_process/distributed_process.h",
    "content": "/**\r\n * Copyright 2021 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_DISTRIBUTED_WORKER_WORKER_PROCESS_H\r\n#define MINDSPORE_SERVING_DISTRIBUTED_WORKER_WORKER_PROCESS_H\r\n\r\n#include <grpcpp/grpcpp.h>\r\n#include <grpcpp/health_check_service_interface.h>\r\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\r\n#include <memory>\r\n#include <string>\r\n#include \"common/serving_common.h\"\r\n#include \"common/heart_beat.h\"\r\n#include \"proto/ms_service.pb.h\"\r\n#include \"proto/ms_service.grpc.pb.h\"\r\n#include \"proto/ms_distributed.pb.h\"\r\n#include \"proto/ms_distributed.grpc.pb.h\"\r\n#include \"worker/distributed_worker/distributed_model_loader.h\"\r\n#include \"worker/grpc/worker_process.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\n// Service Implement\r\nclass MSDistributedImpl {\r\n public:\r\n  explicit MSDistributedImpl(std::shared_ptr<DistributedModelLoader> servable, const std::string server_address)\r\n      : servable_(servable) {\r\n    if (!watcher_) {\r\n      watcher_ = std::make_shared<Watcher<proto::MSAgent, proto::MSAgent>>(server_address);\r\n    }\r\n  }\r\n  ~MSDistributedImpl() = default;\r\n  grpc::Status AgentRegister(grpc::ServerContext *context, const proto::AgentRegisterRequest *request,\r\n                             proto::AgentRegisterReply *reply);\r\n  grpc::Status AgentExit(grpc::ServerContext *context, const proto::AgentExitRequest *request,\r\n                         proto::AgentExitReply *reply);\r\n  grpc::Status AgentFailed(grpc::ServerContext *context, const proto::AgentFailedRequest *request,\r\n                           proto::AgentFailedReply *reply);\r\n  grpc::Status AgentConfigAcquire(grpc::ServerContext *context, const proto::AgentConfigAcquireRequest *request,\r\n                                  proto::AgentConfigAcquireReply *reply);\r\n\r\n  grpc::Status Ping(grpc::ServerContext *context, const proto::PingRequest *request, proto::PingReply *reply);\r\n  grpc::Status Pong(grpc::ServerContext *context, const proto::PongRequest *request, proto::PongReply *reply);\r\n\r\n private:\r\n  std::shared_ptr<DistributedModelLoader> servable_;\r\n\r\n  std::shared_ptr<Watcher<proto::MSAgent, proto::MSAgent>> watcher_;\r\n};\r\n\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n\r\n#endif  // MINDSPORE_SERVING_DISTRIBUTED_WORKER_WORKER_PROCESS_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/distributed_process/distributed_server.h",
    "content": "/**\r\n * Copyright 2020 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_WORKER_DISTRIBUTED_WORKER_SERVER_H\r\n#define MINDSPORE_SERVING_WORKER_DISTRIBUTED_WORKER_SERVER_H\r\n\r\n#include <grpcpp/grpcpp.h>\r\n#include <grpcpp/health_check_service_interface.h>\r\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\r\n#include <memory>\r\n#include <string>\r\n#include \"common/serving_common.h\"\r\n#include \"proto/ms_worker.pb.h\"\r\n#include \"proto/ms_worker.grpc.pb.h\"\r\n#include \"common/grpc_async_server.h\"\r\n#include \"worker/grpc/worker_process.h\"\r\n#include \"worker/grpc/worker_server.h\"\r\n#include \"worker/distributed_worker/distributed_process/distributed_process.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\ntemplate <class Derived>\r\nclass DistributedServiceContext\r\n    : public GrpcAsyncServiceContext<MSDistributedImpl, proto::MSDistributedWorker::AsyncService, Derived> {\r\n public:\r\n  DistributedServiceContext(MSDistributedImpl *service_impl, proto::MSDistributedWorker::AsyncService *async_service,\r\n                            grpc::ServerCompletionQueue *cq)\r\n      : GrpcAsyncServiceContext<MSDistributedImpl, proto::MSDistributedWorker::AsyncService, Derived>(\r\n          service_impl, async_service, cq) {}\r\n  virtual void StartEnqueueRequest() = 0;\r\n  virtual void HandleRequest() = 0;\r\n};\r\n\r\n// Service Implement\r\nclass WorkerAgentRegisterContext : public DistributedServiceContext<WorkerAgentRegisterContext> {\r\n public:\r\n  WorkerAgentRegisterContext(MSDistributedImpl *service_impl, proto::MSDistributedWorker::AsyncService *async_service,\r\n                             grpc::ServerCompletionQueue *cq)\r\n      : DistributedServiceContext<WorkerAgentRegisterContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~WorkerAgentRegisterContext() = default;\r\n\r\n  void StartEnqueueRequest() override {\r\n    async_service_->RequestAgentRegister(&ctx_, &request_, &responder_, cq_, cq_, this);\r\n  }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->AgentRegister(&ctx_, &request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::AgentRegisterReply> responder_;\r\n  proto::AgentRegisterRequest request_;\r\n  proto::AgentRegisterReply response_;\r\n};\r\n\r\nclass WorkerAgentExitContext : public DistributedServiceContext<WorkerAgentExitContext> {\r\n public:\r\n  WorkerAgentExitContext(MSDistributedImpl *service_impl, proto::MSDistributedWorker::AsyncService *async_service,\r\n                         grpc::ServerCompletionQueue *cq)\r\n      : DistributedServiceContext<WorkerAgentExitContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~WorkerAgentExitContext() = default;\r\n\r\n  void StartEnqueueRequest() override {\r\n    async_service_->RequestAgentExit(&ctx_, &request_, &responder_, cq_, cq_, this);\r\n  }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->AgentExit(&ctx_, &request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::AgentExitReply> responder_;\r\n  proto::AgentExitRequest request_;\r\n  proto::AgentExitReply response_;\r\n};\r\n\r\nclass WorkerAgentFailedContext : public DistributedServiceContext<WorkerAgentFailedContext> {\r\n public:\r\n  WorkerAgentFailedContext(MSDistributedImpl *service_impl, proto::MSDistributedWorker::AsyncService *async_service,\r\n                           grpc::ServerCompletionQueue *cq)\r\n      : DistributedServiceContext<WorkerAgentFailedContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~WorkerAgentFailedContext() = default;\r\n\r\n  void StartEnqueueRequest() override {\r\n    async_service_->RequestAgentFailed(&ctx_, &request_, &responder_, cq_, cq_, this);\r\n  }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->AgentFailed(&ctx_, &request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::AgentFailedReply> responder_;\r\n  proto::AgentFailedRequest request_;\r\n  proto::AgentFailedReply response_;\r\n};\r\n\r\nclass WorkerAgentConfigAcquireContext : public DistributedServiceContext<WorkerAgentConfigAcquireContext> {\r\n public:\r\n  WorkerAgentConfigAcquireContext(MSDistributedImpl *service_impl,\r\n                                  proto::MSDistributedWorker::AsyncService *async_service,\r\n                                  grpc::ServerCompletionQueue *cq)\r\n      : DistributedServiceContext<WorkerAgentConfigAcquireContext>(service_impl, async_service, cq),\r\n        responder_(&ctx_) {}\r\n\r\n  ~WorkerAgentConfigAcquireContext() = default;\r\n\r\n  void StartEnqueueRequest() override {\r\n    async_service_->RequestAgentConfigAcquire(&ctx_, &request_, &responder_, cq_, cq_, this);\r\n  }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->AgentConfigAcquire(&ctx_, &request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::AgentConfigAcquireReply> responder_;\r\n  proto::AgentConfigAcquireRequest request_;\r\n  proto::AgentConfigAcquireReply response_;\r\n};\r\n\r\nclass WorkerPingContext : public DistributedServiceContext<WorkerPingContext> {\r\n public:\r\n  WorkerPingContext(MSDistributedImpl *service_impl, proto::MSDistributedWorker::AsyncService *async_service,\r\n                    grpc::ServerCompletionQueue *cq)\r\n      : DistributedServiceContext<WorkerPingContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~WorkerPingContext() = default;\r\n\r\n  void StartEnqueueRequest() override { async_service_->RequestPing(&ctx_, &request_, &responder_, cq_, cq_, this); }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->Ping(&ctx_, &request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::PingReply> responder_;\r\n  proto::PingRequest request_;\r\n  proto::PingReply response_;\r\n};\r\n\r\nclass WorkerPongContext : public DistributedServiceContext<WorkerPongContext> {\r\n public:\r\n  WorkerPongContext(MSDistributedImpl *service_impl, proto::MSDistributedWorker::AsyncService *async_service,\r\n                    grpc::ServerCompletionQueue *cq)\r\n      : DistributedServiceContext<WorkerPongContext>(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~WorkerPongContext() = default;\r\n\r\n  void StartEnqueueRequest() override { async_service_->RequestPong(&ctx_, &request_, &responder_, cq_, cq_, this); }\r\n\r\n  void HandleRequest() override {\r\n    grpc::Status status = service_impl_->Pong(&ctx_, &request_, &response_);\r\n    responder_.Finish(response_, status, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::PongReply> responder_;\r\n  proto::PongRequest request_;\r\n  proto::PongReply response_;\r\n};\r\n\r\nclass DistributedWorkerGrpcServer : public GrpcAsyncServer<proto::MSDistributedWorker::AsyncService> {\r\n public:\r\n  DistributedWorkerGrpcServer(std::shared_ptr<DistributedModelLoader> servable, const std::string server_address)\r\n      : GrpcAsyncServer<proto::MSDistributedWorker::AsyncService>(),\r\n        service_impl_(MSDistributedImpl(servable, server_address)) {}\r\n\r\n  void EnqueueRequests() override {\r\n    WorkerAgentRegisterContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n    WorkerAgentExitContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n    WorkerAgentFailedContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n    WorkerAgentConfigAcquireContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n    WorkerPingContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n    WorkerPongContext::EnqueueRequest(&service_impl_, &svc_, cq_.get());\r\n  }\r\n\r\n private:\r\n  MSDistributedImpl service_impl_;\r\n};\r\n\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n\r\n#endif  // MINDSPORE_SERVING_WORKER_DISTRIBUTED_WORKER_SERVER_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/notify_agent/base_notify_agent.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_BASE_NOTIFY_AGENT_H\n#define MINDSPORE_SERVING_WORKER_BASE_NOTIFY_AGENT_H\n#include <vector>\n#include <functional>\n#include <future>\n#include \"common/serving_common.h\"\n#include \"common/servable.h\"\n#include \"proto/ms_agent.pb.h\"\n#include \"common/grpc_client.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API BaseNotifyAgent {\n public:\n  BaseNotifyAgent() = default;\n  virtual ~BaseNotifyAgent() = default;\n  virtual Status Exit() = 0;\n  virtual Status DispatchAsync(const proto::DistributedPredictRequest &request, proto::DistributedPredictReply *reply,\n                               AsyncPredictCallback callback) = 0;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WORKER_BASE_NOTIFY_AGENT_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/notify_agent/notify_agent.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"worker/distributed_worker/notify_agent/notify_agent.h\"\n#include <grpcpp/grpcpp.h>\n#include <grpcpp/health_check_service_interface.h>\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\n#include <thread>\n#include \"common/exit_handle.h\"\n#include \"common/grpc_server.h\"\n#include \"common/grpc_client.h\"\n\nnamespace mindspore {\nnamespace serving {\nGrpcNotifyAgent::GrpcNotifyAgent(const std::string &agent_address) {\n  agent_address_ = agent_address;\n  std::shared_ptr<grpc::Channel> channel = GrpcServer::CreateChannel(agent_address_);\n  stub_ = proto::MSAgent::NewStub(channel);\n}\n\nGrpcNotifyAgent::~GrpcNotifyAgent() = default;\n\nStatus GrpcNotifyAgent::Exit() {\n  if (stub_) {\n    proto::DistributedExitRequest request;\n    request.set_address(agent_address_);\n    proto::DistributedExitReply reply;\n    grpc::ClientContext context;\n    const int32_t TIME_OUT = 1;\n    std::chrono::system_clock::time_point deadline = std::chrono::system_clock::now() + std::chrono::seconds(TIME_OUT);\n    context.set_deadline(deadline);\n\n    auto status = stub_->Exit(&context, request, &reply);\n    if (status.ok()) {\n      MSI_LOG_INFO << \"Notify one agent exit success, agent address: \" << agent_address_;\n    } else {\n      MSI_LOG_INFO << \"Notify one agent exit failed, agent address: \" << agent_address_\n                   << \", error: \" << status.error_code() << \", \" << status.error_message();\n    }\n  }\n  return SUCCESS;\n}\n\nStatus GrpcNotifyAgent::DispatchAsync(const proto::DistributedPredictRequest &request,\n                                      proto::DistributedPredictReply *reply, AsyncPredictCallback callback) {\n  if (!stub_) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Predict failed, agent gRPC has not been inited or has already exited, agent address \" << agent_address_;\n  }\n  if (!distributed_client_) {\n    distributed_client_ = std::make_unique<MSDistributedClient>();\n    distributed_client_->Start();\n  }\n  distributed_client_->PredictAsync(request, reply, stub_.get(), callback, agent_address_);\n  return SUCCESS;\n}  // namespace serving\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/notify_agent/notify_agent.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_NOTIFY_AGENT_H\n#define MINDSPORE_SERVING_WORKER_NOTIFY_AGENT_H\n#include <vector>\n#include <string>\n#include <memory>\n#include <atomic>\n#include \"worker/distributed_worker/notify_agent/base_notify_agent.h\"\n#include \"proto/ms_agent.pb.h\"\n#include \"proto/ms_agent.grpc.pb.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API GrpcNotifyAgent : public BaseNotifyAgent {\n public:\n  explicit GrpcNotifyAgent(const std::string &worker_address);\n  ~GrpcNotifyAgent() override;\n\n  Status Exit() override;\n\n  Status DispatchAsync(const proto::DistributedPredictRequest &request, proto::DistributedPredictReply *reply,\n                       AsyncPredictCallback callback) override;\n\n private:\n  std::string agent_address_;\n  std::shared_ptr<proto::MSAgent::Stub> stub_ = nullptr;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WORKER_NOTIFY_AGENT_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/notify_distributed/notify_worker.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"worker/distributed_worker/notify_distributed/notify_worker.h\"\n#include <grpcpp/grpcpp.h>\n#include <grpcpp/health_check_service_interface.h>\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\n#include <thread>\n#include \"common/exit_handle.h\"\n#include \"common/grpc_server.h\"\n#include \"common/proto_tensor.h\"\n\nnamespace mindspore {\nnamespace serving {\nGrpcNotifyDistributeWorker::GrpcNotifyDistributeWorker(const std::string &distributed_address,\n                                                       const std::string &agent_address)\n    : distributed_address_(distributed_address), agent_address_(agent_address) {\n  auto channel = GrpcServer::CreateChannel(distributed_address_);\n  stub_ = proto::MSDistributedWorker::NewStub(channel);\n}\n\nGrpcNotifyDistributeWorker::~GrpcNotifyDistributeWorker() = default;\n\nStatus GrpcNotifyDistributeWorker::Register(const std::vector<WorkerAgentSpec> &worker_specs) {\n  const int32_t REGISTER_INTERVAL = 1;\n\n  MSI_LOG(INFO) << \"Register to worker \" << distributed_address_ << \", agent address: \" << agent_address_;\n  proto::AgentRegisterRequest request;\n  GrpcTensorHelper::CopyFromWorkerAgentSpec(worker_specs, &request);\n  request.set_address(agent_address_);\n  proto::AgentRegisterReply reply;\n  grpc::ClientContext context;\n  std::chrono::system_clock::time_point deadline =\n    std::chrono::system_clock::now() + std::chrono::seconds(REGISTER_INTERVAL);\n  context.set_deadline(deadline);\n  grpc::Status status = stub_->AgentRegister(&context, request, &reply);\n  if (status.ok()) {\n    MSI_LOG(INFO) << \"Register SUCCESS \";\n    return SUCCESS;\n  }\n  return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n         << \"Register to worker failed, grpc error: \" << status.error_code() << \", \" << status.error_message();\n}\n\nStatus GrpcNotifyDistributeWorker::Unregister() {\n  if (is_stoped_.load()) {\n    return SUCCESS;\n  }\n  is_stoped_ = true;\n  proto::AgentExitRequest request;\n  request.set_address(agent_address_);\n  proto::AgentExitReply reply;\n  grpc::ClientContext context;\n  const int32_t TIME_OUT = 1;\n  std::chrono::system_clock::time_point deadline = std::chrono::system_clock::now() + std::chrono::seconds(TIME_OUT);\n  context.set_deadline(deadline);\n  grpc::Status status = stub_->AgentExit(&context, request, &reply);\n  if (status.ok()) {\n    MSI_LOG(INFO) << \"Exit SUCCESS \";\n    return SUCCESS;\n  }\n  return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Exit Failed\";\n}\n\nStatus GrpcNotifyDistributeWorker::NotifyFailed(const std::string &distributed_address) {\n  auto channel = GrpcServer::CreateChannel(distributed_address);\n  auto stub = proto::MSDistributedWorker::NewStub(channel);\n\n  grpc::ClientContext context;\n  proto::AgentFailedRequest request;\n  proto::AgentFailedReply reply;\n  grpc::Status status = stub->AgentFailed(&context, request, &reply);\n  if (status.ok()) {\n    MSI_LOG(INFO) << \"Success to notify failure of agent\";\n    return SUCCESS;\n  }\n  return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Failed to notify failure of agent\";\n}\n\nvoid GrpcNotifyDistributeWorker::StartupNotifyExit(const std::string &distributed_address,\n                                                   const std::string &agent_ip) {\n  auto channel = GrpcServer::CreateChannel(distributed_address);\n  auto stub = proto::MSDistributedWorker::NewStub(channel);\n\n  grpc::ClientContext context;\n  proto::AgentExitRequest request;\n  request.set_agent_ip(agent_ip);\n  proto::AgentExitReply reply;\n  grpc::Status status = stub->AgentExit(&context, request, &reply);\n  if (status.ok()) {\n    MSI_LOG(INFO) << \"Success to notify exit of agent start up process\";\n  } else {\n    MSI_LOG(INFO) << \"Failed to notify exit of agent start up process\";\n  }\n}\n\nStatus GrpcNotifyDistributeWorker::GetAgentsConfigsFromWorker(const std::string &distributed_address,\n                                                              DistributedServableConfig *config) {\n  const int32_t REGISTER_TIME_OUT = 60;\n  const int32_t REGISTER_INTERVAL = 1;\n  auto loop = REGISTER_TIME_OUT;\n  while (loop-- && !ExitSignalHandle::Instance().HasStopped()) {\n    auto channel = GrpcServer::CreateChannel(distributed_address);\n    auto stub = proto::MSDistributedWorker::NewStub(channel);\n\n    grpc::ClientContext context;\n    proto::AgentConfigAcquireRequest request;\n    proto::AgentConfigAcquireReply reply;\n    std::chrono::system_clock::time_point deadline =\n      std::chrono::system_clock::now() + std::chrono::seconds(REGISTER_INTERVAL);\n    context.set_deadline(deadline);\n    grpc::Status status = stub->AgentConfigAcquire(&context, request, &reply);\n    if (status.ok()) {\n      return ParseAgentConfigAcquireReply(reply, config);\n    }\n    MSI_LOG_INFO << \"Grpc message: \" << status.error_code() << \", \" << status.error_message();\n    std::this_thread::sleep_for(std::chrono::seconds(REGISTER_INTERVAL));\n  }\n  if (ExitSignalHandle::Instance().HasStopped()) {\n    return INFER_STATUS_LOG_WARNING(FAILED) << \"Agent exit, stop get Agents configs from Worker\";\n  }\n  return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Failed to get Agents configs from Worker, worker is not available.\";\n}\nStatus GrpcNotifyDistributeWorker::ParseAgentConfigAcquireReply(const proto::AgentConfigAcquireReply &reply,\n                                                                DistributedServableConfig *config) {\n  MSI_LOG(INFO) << \"Success to get Agents configs from Worker, and begin to parser\";\n  // parser reply message:AgentConfigAcquireReply, parameter:rank_table_content\n  config->rank_table_content = reply.rank_table_content();\n  // parser reply message:AgentConfigAcquireReply, parameter:rank_list\n  for (auto &temp_rank : reply.rank_list()) {\n    OneRankConfig ome_rank_config;\n    ome_rank_config.ip = temp_rank.ip();\n    ome_rank_config.device_id = temp_rank.device_id();\n    config->rank_list.push_back(ome_rank_config);\n  }\n  // parser reply message:AgentConfigAcquireReply, parameter:common_meta\n  auto &temp_common_meta = reply.common_meta();\n  config->common_meta.servable_name = temp_common_meta.servable_name();\n  config->common_meta.model_key = temp_common_meta.model_key();\n  config->common_meta.with_batch_dim = temp_common_meta.with_batch_dim();\n  for (auto &temp_without_batch_dim_inputs : temp_common_meta.without_batch_dim_inputs()) {\n    config->common_meta.without_batch_dim_inputs.push_back(temp_without_batch_dim_inputs);\n  }\n  for (auto &count : temp_common_meta.inputs_count()) {\n    config->common_meta.inputs_count[count.first] = count.second;\n  }\n  for (auto &count : temp_common_meta.outputs_count()) {\n    config->common_meta.outputs_count[count.first] = count.second;\n  }\n\n  // parser reply message:AgentConfigAcquireReply, parameter:distributed_meta\n  auto &temp_distributed_meta = reply.distributed_meta();\n  config->distributed_meta.rank_size = temp_distributed_meta.rank_size();\n  config->distributed_meta.stage_size = temp_distributed_meta.stage_size();\n  MSI_LOG(INFO) << \"Success to parser reply message and save to DistributedServableConfig\";\n\n  return SUCCESS;\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/notify_distributed/notify_worker.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_NOTIFY_WORKER_H\n#define MINDSPORE_SERVING_WORKER_NOTIFY_WORKER_H\n#include <vector>\n#include <string>\n#include <memory>\n#include \"common/serving_common.h\"\n#include \"worker/distributed_worker/common.h\"\n#include \"proto/ms_distributed.pb.h\"\n#include \"proto/ms_distributed.grpc.pb.h\"\n#include \"proto/ms_worker.pb.h\"\n#include \"proto/ms_worker.grpc.pb.h\"\nnamespace mindspore {\nnamespace serving {\nclass MS_API GrpcNotifyDistributeWorker {\n public:\n  GrpcNotifyDistributeWorker(const std::string &distributed_address, const std::string &agent_address);\n  ~GrpcNotifyDistributeWorker();\n  Status Register(const std::vector<WorkerAgentSpec> &agent_specs);\n  Status Unregister();\n  // from start up, not agent\n  static Status NotifyFailed(const std::string &distributed_address);\n  static Status GetAgentsConfigsFromWorker(const std::string &distributed_address, DistributedServableConfig *config);\n  static void StartupNotifyExit(const std::string &distributed_address, const std::string &agent_ip);\n\n private:\n  static Status ParseAgentConfigAcquireReply(const proto::AgentConfigAcquireReply &reply,\n                                             DistributedServableConfig *config);\n  std::string distributed_address_;\n  std::string agent_address_;\n  std::unique_ptr<proto::MSDistributedWorker::Stub> stub_;\n  std::atomic<bool> is_stoped_{false};\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WORKER_NOTIFY_WORKER_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/worker_agent.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"worker/distributed_worker/worker_agent.h\"\n#include <memory>\n#include <string>\n#include \"worker/distributed_worker/agent_process/agent_process.h\"\n#include \"worker/distributed_worker/notify_distributed/notify_worker.h\"\n#include \"common/exit_handle.h\"\n#include \"common/proto_tensor.h\"\n\nnamespace mindspore {\nnamespace serving {\nWorkerAgent &WorkerAgent::Instance() {\n  static WorkerAgent instance;\n  return instance;\n}\n\nStatus WorkerAgent::Clear() {\n  if (notify_worker_) {\n    if (exit_notify_worker_) {\n      notify_worker_->Unregister();\n      MSI_LOG_INFO << \"End unregister to worker\";\n    }\n    notify_worker_ = nullptr;\n  }\n  grpc_server_.Stop();\n  if (session_ != nullptr) {\n    session_->UnloadModel();\n    session_ = nullptr;\n  }\n  return SUCCESS;\n}\n\nStatus WorkerAgent::StartAgent(const AgentStartUpConfig &config, const std::string &dec_key,\n                               const std::string &dec_mode) {\n  session_ = InferenceLoader::Instance().CreateMindSporeInfer();\n  if (session_ == nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Create MindSpore infer failed\";\n  }\n  Status status;\n  config_ = config;\n  const auto &common_meta = config.common_meta;\n  auto enable_lite = InferenceLoader::Instance().GetEnableLite();\n  status = session_->LoadModelFromFile(kDeviceTypeAscend, config.device_id, config.model_file_names, kMindIR,\n                                       common_meta.with_batch_dim, common_meta.without_batch_dim_inputs, ModelContext(),\n                                       dec_key, dec_mode, {}, enable_lite);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"LoadModelFromFile failed, servable name: \" << common_meta.servable_name\n                  << \", rank_id: \" << config.rank_id << \", device id: \" << config.device_id\n                  << \", model file: \" << config.model_file_names\n                  << \", rank table file: \" << config.rank_table_json_file_name\n                  << \", group config file: \" << config.group_file_names;\n    return status;\n  }\n  status = StartGrpcServer();\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Start agent grpc server failed, agent address: \" << config.agent_address;\n    return status;\n  }\n  status = RegisterAgent();\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Register agent failed, agent address: \" << config.agent_address\n                  << \", distributed worker address: \" << config.distributed_address;\n    return status;\n  }\n  MSI_LOG_INFO << \"Start agent success, servable name: \" << common_meta.servable_name << \", rank_id: \" << config.rank_id\n               << \", device id: \" << config.device_id << \", model file: \" << config.model_file_names\n               << \", rank table file: \" << config.rank_table_json_file_name\n               << \", group config file: \" << config.group_file_names;\n  return SUCCESS;\n}\n\nStatus WorkerAgent::StartGrpcServer() {\n  std::string server_address = config_.agent_address;\n  return grpc_server_.Start(std::make_shared<MSAgentImpl>(server_address), server_address, gRpcMaxMBMsgSize, \"Agent\");\n}\n\nStatus WorkerAgent::RegisterAgent() {\n  notify_worker_ = std::make_shared<GrpcNotifyDistributeWorker>(config_.distributed_address, config_.agent_address);\n  auto graph_num = session_->GetSubGraphNum();\n  if (graph_num == 0) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"RegisterAgent failed, Agent graph_num error\";\n  }\n  std::vector<WorkerAgentSpec> worker_specs;\n  for (uint64_t i = 0; i < graph_num; i++) {\n    WorkerAgentSpec spec;\n    spec.subgraph = i;\n    spec.agent_address = config_.agent_address;\n    spec.rank_id = config_.rank_id;\n    spec.batch_size = session_->GetBatchSize(i);\n    spec.input_infos = session_->GetInputInfos(i);\n    spec.output_infos = session_->GetOutputInfos(i);\n    worker_specs.push_back(spec);\n  }\n  return notify_worker_->Register(worker_specs);\n}\n\nvoid WorkerAgent::StopAgent(bool notify_worker) {\n  exit_notify_worker_ = notify_worker;\n  ExitSignalHandle::Instance().Stop();\n}\n\nclass ProtoDistributedPredictRequest : public RequestBase {\n public:\n  explicit ProtoDistributedPredictRequest(const proto::DistributedPredictRequest &other) : proto_request_(other) {\n    for (int i = 0; i < proto_request_.inputs_size(); i++) {\n      (void)tensor_list_.emplace_back(const_cast<proto::Tensor *>(&proto_request_.inputs(i)));\n    }\n  }\n  ~ProtoDistributedPredictRequest() = default;\n\n  size_t size() const override { return tensor_list_.size(); }\n  const TensorBase *operator[](size_t index) const override {\n    if (index >= tensor_list_.size()) {\n      MSI_LOG_EXCEPTION << \"visit invalid index \" << index << \" total size \" << tensor_list_.size();\n    }\n    return &tensor_list_[index];\n  }\n\n private:\n  std::vector<ProtoTensor> tensor_list_;\n  const proto::DistributedPredictRequest &proto_request_;\n};\n\nclass ProtoDistributedPredictReply : public ReplyBase {\n public:\n  explicit ProtoDistributedPredictReply(proto::DistributedPredictReply *other) : proto_reply_(other) {}\n  ~ProtoDistributedPredictReply() = default;\n\n  size_t size() const override { return tensor_list_.size(); };\n  TensorBase *operator[](size_t index) override {\n    if (index >= tensor_list_.size()) {\n      MSI_LOG_EXCEPTION << \"visit invalid index \" << index << \" total size \" << tensor_list_.size();\n    }\n    return &tensor_list_[index];\n  };\n  const TensorBase *operator[](size_t index) const override {\n    if (index >= tensor_list_.size()) {\n      MSI_LOG_EXCEPTION << \"visit invalid index \" << index << \" total size \" << tensor_list_.size();\n    }\n    return &tensor_list_[index];\n  }\n  TensorBase *add() override {\n    auto tensor = proto_reply_->add_outputs();\n    ProtoTensor proto_tensor(tensor);\n    tensor_list_.push_back(proto_tensor);\n    return &(tensor_list_.back());\n  }\n  void clear() override { tensor_list_.clear(); }\n\n private:\n  proto::DistributedPredictReply *proto_reply_;\n  std::vector<ProtoTensor> tensor_list_;\n};\n\nStatus WorkerAgent::Run(const proto::DistributedPredictRequest &request, proto::DistributedPredictReply *reply) {\n  if (session_ == nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Model is not loaded\";\n  }\n  Status status;\n  try {\n    MSI_TIME_STAMP_START(ExecuteModel)\n    ProtoDistributedPredictRequest request_wrap(request);\n    ProtoDistributedPredictReply reply_wrap(reply);\n    status = session_->ExecuteModel(request_wrap, &reply_wrap, request.return_result(), request.subgraph());\n    MSI_TIME_STAMP_END(ExecuteModel)\n  } catch (const std::bad_alloc &ex) {\n    status = INFER_STATUS_LOG_ERROR(FAILED) << \"Serving Error: malloc memory failed\";\n  } catch (const std::runtime_error &ex) {\n    status = INFER_STATUS_LOG_ERROR(FAILED) << \"Serving Error: runtime error occurred: \" << ex.what();\n  } catch (const std::exception &ex) {\n    status = INFER_STATUS_LOG_ERROR(FAILED) << \"Serving Error: exception occurred: \" << ex.what();\n  } catch (...) {\n    status = INFER_STATUS_LOG_ERROR(FAILED) << \"Serving Error: exception occurred\";\n  }\n  if (status != SUCCESS) {\n    reply->Clear();\n    auto error_msg = reply->mutable_error_msg();\n    error_msg->set_error_code(status.StatusCode());\n    error_msg->set_error_msg(status.StatusMessage());\n  }\n  return status;\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/distributed_worker/worker_agent.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_AGENT_H\n#define MINDSPORE_SERVING_WORKER_AGENT_H\n#include <vector>\n#include <memory>\n#include <string>\n#include \"proto/ms_agent.pb.h\"\n#include \"proto/ms_agent.grpc.pb.h\"\n#include \"common/grpc_server.h\"\n#include \"worker/distributed_worker/common.h\"\n#include \"worker/distributed_worker/notify_distributed/notify_worker.h\"\n#include \"worker/inference/inference.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API WorkerAgent {\n public:\n  static WorkerAgent &Instance();\n  Status Clear();\n\n  Status Run(const proto::DistributedPredictRequest &request, proto::DistributedPredictReply *reply);\n\n  Status StartAgent(const AgentStartUpConfig &config, const std::string &dec_key, const std::string &dec_mode);\n\n  void StopAgent(bool notify_worker = true);\n\n private:\n  AgentStartUpConfig config_;\n  std::shared_ptr<InferenceBase> session_ = nullptr;\n  GrpcServer grpc_server_;\n  bool exit_notify_worker_ = true;\n  std::shared_ptr<GrpcNotifyDistributeWorker> notify_worker_;\n\n  Status StartGrpcServer();\n  Status RegisterAgent();\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WORKER_AGENT_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/extra_worker/remote_call_model.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/extra_worker/remote_call_model.h\"\n#include <unistd.h>\n#include <memory>\n#include \"worker/notfiy_master/grpc_notify.h\"\n#include \"common/proto_tensor.h\"\n#include \"worker/worker.h\"\n\nnamespace mindspore::serving {\nStatus RemoteCallModel::InitRemote(const std::string &servable_name, uint32_t version_number,\n                                   const std::string &master_address,\n                                   std::map<std::string, std::shared_ptr<ModelLoaderBase>> *models) {\n  MSI_EXCEPTION_IF_NULL(models);\n  proto::GetModelInfoReply reply;\n  auto status = GrpcNotifyMaster::GetModelInfos(master_address, servable_name, version_number, &reply);\n  if (status != SUCCESS) {\n    return status;\n  }\n  if (reply.error_msg().error_code() != 0) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << reply.error_msg().error_msg();\n  }\n  std::map<std::string, ModelInfo> model_infos;\n  GrpcTensorHelper::ConvertProtoModelInfos(reply.model_infos(), &model_infos);\n\n  for (auto &model_it : model_infos) {\n    auto &model_name = model_it.first;\n    auto &model_info = model_it.second;\n    auto model_loader = std::make_shared<RemoteCallModel>();\n    (void)models->emplace(model_name, model_loader);\n    status = model_loader->InitModel(model_name, version_number, model_info);\n    if (status != SUCCESS) {\n      for (auto &item : *models) {\n        item.second->Clear();\n      }\n      return status;\n    }\n  }\n  return SUCCESS;\n}\n\nStatus RemoteCallModel::InitModel(const std::string &model_key, uint32_t version_number, const ModelInfo &model_info) {\n  model_key_ = model_key;\n  batch_size_ = model_info.batch_size;\n  if (batch_size_ == 0) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Batch size cannot be 0\";\n  }\n  auto &subgraph_infos = model_info.sub_graph_infos;\n  subgraph_contexts_.resize(subgraph_infos.size());\n  for (size_t i = 0; i < subgraph_infos.size(); i++) {\n    auto &subgraph_info = subgraph_infos[i];\n    RemoteCallModelContext &context = subgraph_contexts_[i];\n    context.model_name = model_key;\n    context.version_number = version_number;\n    context.subgraph = i;\n    context.input_infos = subgraph_info.input_infos;\n    for (auto &tensor_info : subgraph_info.output_infos) {\n      TensorInfoOutput output_info;\n      output_info.tensor_info = tensor_info;\n      context.output_infos.push_back(output_info);\n    }\n  }\n  auto status = InitModelExecuteInfo();\n  if (status != SUCCESS) {\n    return status;\n  }\n  return SUCCESS;\n}\n\nstd::vector<TensorInfo> RemoteCallModel::GetInputInfos(uint64_t subgraph) const {\n  if (subgraph >= subgraph_contexts_.size()) {\n    MSI_LOG_EXCEPTION << \"Cannot find subgraph \" << subgraph << \" in model \" << model_key_;\n  }\n  return subgraph_contexts_[subgraph].input_infos;\n}\n\nstd::vector<TensorInfo> RemoteCallModel::GetOutputInfos(uint64_t subgraph) const {\n  if (subgraph >= subgraph_contexts_.size()) {\n    MSI_LOG_EXCEPTION << \"Cannot find subgraph \" << subgraph << \" in model \" << model_key_;\n  }\n  std::vector<TensorInfo> output_tensors;\n  for (auto &item : subgraph_contexts_[subgraph].output_infos) {\n    // cppcheck-suppress useStlAlgorithm\n    output_tensors.push_back(item.tensor_info);\n  }\n  return output_tensors;\n}\n\nuint64_t RemoteCallModel::GetBatchSize() const { return batch_size_; }\n\nuint64_t RemoteCallModel::GetGraphNum() const { return subgraph_contexts_.size(); }\n\nvoid RemoteCallModel::Clear() { subgraph_contexts_.clear(); }\n\nStatus RemoteCallModel::Predict(const std::vector<InstanceData> &inputs, std::vector<ResultInstance> *outputs,\n                                uint64_t subgraph) {\n  auto notify_master = Worker::GetInstance().GetGrpcNotifyMaster();\n  if (notify_master == nullptr) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Get notify master failed\";\n  }\n  if (subgraph >= subgraph_contexts_.size()) {\n    MSI_LOG_EXCEPTION << \"Cannot find subgraph \" << subgraph << \" in model \" << model_key_;\n  }\n  return notify_master->CallModel(subgraph_contexts_[subgraph], inputs, outputs);\n}\n\nStatus RemoteCallModel::InitModelExecuteInfo() {\n  auto pid = getpid();\n  Status status;\n  constexpr uint32_t cache_times = 3;\n  auto &shared_memory = SharedMemoryAllocator::Instance();\n  for (auto &subgraph : subgraph_contexts_) {\n    for (size_t i = 0; i < subgraph.input_infos.size(); i++) {\n      auto &tensor_info = subgraph.input_infos[i];\n      uint64_t size_one_batch = tensor_info.size;\n      if (!tensor_info.is_no_batch_dim) {\n        size_one_batch = size_one_batch / batch_size_;\n      }\n      auto memory_key = model_key_ + \"_subgraph\" + std::to_string(subgraph.subgraph) + \"_input\" + std::to_string(i) +\n                        \"_pid\" + std::to_string(pid);\n      uint64_t init_count = batch_size_ * cache_times;\n      status = shared_memory.NewMemoryBuffer(memory_key, size_one_batch, init_count);\n      if (status != SUCCESS) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"Init input shared memory failed, item size: \" << size_one_batch << \", initial count: \" << init_count;\n      }\n      subgraph.request_memory.push_back(memory_key);\n    }\n    for (size_t i = 0; i < subgraph.output_infos.size(); i++) {\n      auto &output_info = subgraph.output_infos[i];\n      auto &tensor_info = output_info.tensor_info;\n      if (tensor_info.is_no_batch_dim) {\n        output_info.shape_one_batch = tensor_info.shape;\n        output_info.size_one_batch = tensor_info.size;\n      } else {\n        output_info.shape_one_batch = tensor_info.shape;\n        (void)output_info.shape_one_batch.erase(output_info.shape_one_batch.begin());\n        // the batch size has been checked in WorkerExecutor\n        output_info.size_one_batch = tensor_info.size / batch_size_;\n      }\n      auto memory_key = model_key_ + \"_subgraph\" + std::to_string(subgraph.subgraph) + \"_output\" + std::to_string(i) +\n                        \"_pid\" + std::to_string(pid);\n      uint64_t init_count = batch_size_ * cache_times;\n      status = shared_memory.NewMemoryBuffer(memory_key, output_info.size_one_batch, init_count);\n      if (status != SUCCESS) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"Init output shared memory failed, item size: \" << output_info.size_one_batch\n               << \", initial count: \" << init_count;\n      }\n      subgraph.reply_memory.push_back(memory_key);\n    }\n  }\n  return SUCCESS;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/extra_worker/remote_call_model.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_REMOTE_CALL_MODEL_H\n#define MINDSPORE_SERVING_REMOTE_CALL_MODEL_H\n#include <map>\n#include <string>\n#include <vector>\n#include <memory>\n#include \"worker/model_loader_base.h\"\n\nnamespace mindspore::serving {\nstruct RemoteCallModelContext {\n  uint32_t version_number;\n  std::string model_name;\n  uint64_t subgraph;\n  std::vector<std::string> request_memory;\n  std::vector<std::string> reply_memory;\n  std::vector<TensorInfo> input_infos;\n  std::vector<TensorInfoOutput> output_infos;\n};\n\nclass MS_API RemoteCallModel : public ModelLoaderBase {\n public:\n  static Status InitRemote(const std::string &servable_name, uint32_t version_number, const std::string &master_address,\n                           std::map<std::string, std::shared_ptr<ModelLoaderBase>> *models);\n\n  std::vector<TensorInfo> GetInputInfos(uint64_t subgraph = 0) const override;\n  std::vector<TensorInfo> GetOutputInfos(uint64_t subgraph = 0) const override;\n  uint64_t GetBatchSize() const override;\n  uint64_t GetGraphNum() const override;\n  void Clear() override;\n\n  Status Predict(const std::vector<InstanceData> &inputs, std::vector<ResultInstance> *outputs,\n                 uint64_t subgraph = 0) override;\n  Status AfterLoadModel() override { return SUCCESS; }\n  bool OwnDevice() const override { return false; }\n\n private:\n  std::string model_key_;\n  uint64_t batch_size_ = 0;\n  std::vector<RemoteCallModelContext> subgraph_contexts_;\n\n  Status InitModelExecuteInfo();\n  Status InitModel(const std::string &model_key, uint32_t version_number, const ModelInfo &model_info);\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_REMOTE_CALL_MODEL_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/grpc/worker_process.cc",
    "content": "/**\r\n * Copyright 2020 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"worker/grpc/worker_process.h\"\r\n#include \"worker/worker.h\"\r\n#include \"common/proto_tensor.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\nvoid MSWorkerImpl::Exit(const proto::ExitRequest *request, proto::ExitReply *reply) {\r\n  MSI_LOG(INFO) << \"Master Exit\";\r\n  Worker::GetInstance().StopServable(false);\r\n}\r\n\r\nvoid MSWorkerImpl::PredictAsync(const proto::PredictRequest *request, proto::PredictReply *reply,\r\n                                const PredictOnFinish &on_finish) {\r\n  Status status(WORKER_UNAVAILABLE);\r\n  try {\r\n    status = Worker::GetInstance().RunAsync(*request, reply, on_finish);\r\n  } catch (const std::bad_alloc &ex) {\r\n    MSI_LOG(ERROR) << \"Serving Error: malloc memory failed\";\r\n  } catch (const std::runtime_error &ex) {\r\n    MSI_LOG(ERROR) << \"Serving Error: runtime error occurred: \" << ex.what();\r\n  } catch (const std::exception &ex) {\r\n    MSI_LOG(ERROR) << \"Serving Error: exception occurred: \" << ex.what();\r\n  } catch (...) {\r\n    MSI_LOG(ERROR) << \"Serving Error: exception occurred\";\r\n  }\r\n\r\n  if (status != SUCCESS) {\r\n    GrpcTensorHelper::CreateReplyFromErrorMsg(status, reply);\r\n    on_finish();\r\n  }\r\n}\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/grpc/worker_process.h",
    "content": "/**\r\n * Copyright 2020 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_WORKER_WORKER_PROCESS_H\r\n#define MINDSPORE_SERVING_WORKER_WORKER_PROCESS_H\r\n\r\n#include <grpcpp/grpcpp.h>\r\n#include <grpcpp/health_check_service_interface.h>\r\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\r\n#include <memory>\r\n#include <string>\r\n#include \"common/serving_common.h\"\r\n#include \"common/heart_beat.h\"\r\n#include \"common/grpc_client.h\"\r\n#include \"proto/ms_worker.pb.h\"\r\n#include \"proto/ms_worker.grpc.pb.h\"\r\n#include \"proto/ms_master.pb.h\"\r\n#include \"proto/ms_master.grpc.pb.h\"\r\n#include \"proto/ms_agent.pb.h\"\r\n#include \"proto/ms_agent.grpc.pb.h\"\r\nnamespace mindspore {\r\nnamespace serving {\r\n// Service Implement\r\nclass MSWorkerImpl {\r\n public:\r\n  MSWorkerImpl() = default;\r\n  ~MSWorkerImpl() = default;\r\n  void Exit(const proto::ExitRequest *request, proto::ExitReply *reply);\r\n  void PredictAsync(const proto::PredictRequest *request, proto::PredictReply *reply, const PredictOnFinish &on_finish);\r\n};\r\n\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n\r\n#endif  // MINDSPORE_SERVING_WORKER_WORKER_PROCESS_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/grpc/worker_server.cc",
    "content": "/**\r\n * Copyright 2020 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"worker/grpc/worker_server.h\"\r\n#include <string>\r\n#include <memory>\r\n#include \"common/grpc_server.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {}  // namespace serving\r\n}  // namespace mindspore\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/grpc/worker_server.h",
    "content": "/**\r\n * Copyright 2020 Huawei Technologies Co., Ltd\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MINDSPORE_SERVING_WORKER_WORKER_SERVER_H\r\n#define MINDSPORE_SERVING_WORKER_WORKER_SERVER_H\r\n\r\n#include <memory>\r\n#include <string>\r\n#include \"common/serving_common.h\"\r\n#include \"proto/ms_worker.pb.h\"\r\n#include \"proto/ms_worker.grpc.pb.h\"\r\n#include \"common/grpc_async_server.h\"\r\n#include \"worker/grpc/worker_process.h\"\r\n#include \"worker/distributed_worker/distributed_model_loader.h\"\r\n\r\nnamespace mindspore {\r\nnamespace serving {\r\ntemplate <class Derived>\r\nclass WorkerServiceContext : public GrpcAsyncServiceContext<MSWorkerImpl, proto::MSWorker::AsyncService, Derived> {\r\n public:\r\n  WorkerServiceContext(MSWorkerImpl *service_impl, proto::MSWorker::AsyncService *async_service,\r\n                       grpc::ServerCompletionQueue *cq)\r\n      : GrpcAsyncServiceContext<MSWorkerImpl, proto::MSWorker::AsyncService, Derived>(service_impl, async_service, cq) {\r\n  }\r\n  virtual void StartEnqueueRequest() = 0;\r\n  virtual void HandleRequest() = 0;\r\n};\r\n\r\nclass WorkerPredictContext : public WorkerServiceContext<WorkerPredictContext> {\r\n public:\r\n  WorkerPredictContext(MSWorkerImpl *service_impl, proto::MSWorker::AsyncService *async_service,\r\n                       grpc::ServerCompletionQueue *cq)\r\n      : WorkerServiceContext(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~WorkerPredictContext() = default;\r\n\r\n  void StartEnqueueRequest() override { async_service_->RequestPredict(&ctx_, &request_, &responder_, cq_, cq_, this); }\r\n\r\n  void HandleRequest() override {\r\n    MSI_TIME_STAMP_START(WorkerRequestHandle)\r\n    auto method_name = request_.servable_spec().method_name();\r\n    PredictOnFinish on_finish = [this, method_name, time_start_WorkerRequestHandle]() {\r\n      responder_.Finish(response_, grpc::Status::OK, this);\r\n      MSI_TIME_STAMP_END_EXTRA(WorkerRequestHandle, \"Method \" + method_name)\r\n    };\r\n    service_impl_->PredictAsync(&request_, &response_, on_finish);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::PredictReply> responder_;\r\n  proto::PredictRequest request_;\r\n  proto::PredictReply response_;\r\n};\r\n\r\nclass WorkerExitContext : public WorkerServiceContext<WorkerPredictContext> {\r\n public:\r\n  WorkerExitContext(MSWorkerImpl *service_impl, proto::MSWorker::AsyncService *async_service,\r\n                    grpc::ServerCompletionQueue *cq)\r\n      : WorkerServiceContext(service_impl, async_service, cq), responder_(&ctx_) {}\r\n\r\n  ~WorkerExitContext() = default;\r\n\r\n  void StartEnqueueRequest() override { async_service_->RequestExit(&ctx_, &request_, &responder_, cq_, cq_, this); }\r\n\r\n  void HandleRequest() override {\r\n    service_impl_->Exit(&request_, &response_);\r\n    responder_.Finish(response_, grpc::Status::OK, this);\r\n  }\r\n\r\n private:\r\n  grpc::ServerAsyncResponseWriter<proto::ExitReply> responder_;\r\n  proto::ExitRequest request_;\r\n  proto::ExitReply response_;\r\n};\r\n\r\nclass WorkerGrpcServer : public GrpcAsyncServer<proto::MSWorker::AsyncService> {\r\n public:\r\n  WorkerGrpcServer() : GrpcAsyncServer<proto::MSWorker::AsyncService>() {}\r\n  void EnqueueRequests() override { WorkerPredictContext::EnqueueRequest(&service_impl_, &svc_, cq_.get()); }\r\n\r\n protected:\r\n  MSWorkerImpl service_impl_;\r\n};\r\n\r\n}  // namespace serving\r\n}  // namespace mindspore\r\n\r\n#endif  // MINDSPORE_SERVING_WORKER_WORKER_PROCESS_H\r\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/inference/inference.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"worker/inference/inference.h\"\n#include <dlfcn.h>\n#include \"glog/logging.h\"\n#include \"worker/context.h\"\n\nnamespace mindspore::serving {\nnamespace {\nconstexpr const char *kMindSporeLibName = \"libmindspore.so\";\nconstexpr const char *kMindsporeLiteLibName = \"libmindspore-lite.so\";\nconstexpr const char *kServingAscendLibName = \"libserving_ascend.so\";\n}  // namespace\n\nvoid ModelContext::AppendDeviceInfo(const DeviceInfo &device_info) { (void)device_list.emplace_back(device_info); }\n\nstd::string ModelContext::AsString() const {\n  std::map<std::string, std::string> output_map;\n  if (thread_num > -1) {\n    output_map[\"thread num\"] = AsStringHelper::AsString(thread_num);\n  }\n  if (!thread_affinity_core_list.empty()) {\n    output_map[\"thread affinity core list\"] = AsStringHelper::AsString(thread_affinity_core_list);\n  }\n  if (enable_parallel > -1) {\n    output_map[\"enable parallel\"] = AsStringHelper::AsString(enable_parallel);\n  }\n  if (!device_list.empty()) {\n    output_map[\"device infos\"] = AsStringHelper::AsString(device_list);\n  }\n  return AsStringHelper::AsString(output_map);\n}\n\nInferenceLoader::InferenceLoader() {}\nInferenceLoader::~InferenceLoader() {\n  if (ms_lib_handle_ != nullptr) {\n    (void)dlclose(ms_lib_handle_);\n    ms_lib_handle_ = nullptr;\n  }\n  if (ms_cxx_lib_handle_ != nullptr) {\n    (void)dlclose(ms_cxx_lib_handle_);\n    ms_cxx_lib_handle_ = nullptr;\n  }\n  if (gomp_handler_ != nullptr) {\n    (void)dlclose(gomp_handler_);\n    gomp_handler_ = nullptr;\n  }\n  ms_create_handle_ = nullptr;\n}\n\nInferenceLoader &InferenceLoader::Instance() {\n  static InferenceLoader inference = InferenceLoader();\n  return inference;\n}\n\nstd::shared_ptr<InferenceBase> InferenceLoader::CreateMindSporeInfer() {\n  Status status;\n  if (ms_lib_handle_ == nullptr) {\n    status = LoadMindSporeModelWrap();\n    if (status != SUCCESS) {\n      MSI_LOG_EXCEPTION << \"Load \" << kServingAscendLibName << \" failed, error msg: \" << status.StatusMessage();\n    }\n  }\n  auto instance = ms_create_handle_();\n  if (instance == nullptr) {\n    return nullptr;\n  } else {\n    return std::shared_ptr<InferenceBase>(instance);\n  }\n}\n\nstd::vector<std::string> SplitString(const std::string &s, const std::string &delimiters = \":\") {\n  auto pos_left = s.find_first_not_of(delimiters, 0);\n  auto pos_right = s.find_first_of(delimiters, pos_left);\n  std::vector<std::string> tokens;\n  while (pos_left != std::string::npos) {\n    if (pos_right == std::string::npos) {\n      tokens.push_back(s.substr(pos_left));\n      break;\n    }\n    tokens.push_back(s.substr(pos_left, pos_right - pos_left));\n    pos_left = s.find_first_not_of(delimiters, pos_right);\n    pos_right = s.find_first_of(delimiters, pos_left);\n  }\n  return tokens;\n}\n\nStatus InferenceLoader::LoadMindSporeModelWrap() {\n  MSI_LOG_INFO << \"Start Initialize MindSpore Model Wrap so\";\n  std::vector<std::string> gomp_list = {\"libgomp.so.1\"};\n  for (auto &item : gomp_list) {\n    gomp_handler_ = dlopen(item.c_str(), RTLD_NOW | RTLD_GLOBAL);\n    if (gomp_handler_ != nullptr) {\n      MSI_LOG_INFO << \"dlopen libgomp so: \" << item << \" success\";\n    }\n  }\n  if (gomp_handler_ == nullptr) {\n    MSI_LOG_WARNING << \"dlopen libgomp library failed, try dlopen list: \" << gomp_list;\n  }\n\n  auto get_dlerror = []() -> std::string {\n    auto error = dlerror();\n    if (error == nullptr) {\n      return std::string();\n    }\n    return error;\n  };\n  enable_lite_ = ServableContext::Instance()->EnableLite();\n\n  auto ld_lib_path = common::GetEnv(\"LD_LIBRARY_PATH\");\n  MSI_LOG_INFO << \"Enable lite: \" << enable_lite_ << \", LD_LIBRARY_PATH: \" << ld_lib_path;\n  if (enable_lite_) {\n    ms_cxx_lib_handle_ = dlopen(kMindsporeLiteLibName, RTLD_NOW | RTLD_GLOBAL);\n    if (ms_cxx_lib_handle_ == nullptr) {\n      std::string load_error = get_dlerror();\n      std::string so_no_exist_error =\n        std::string(kMindsporeLiteLibName) + \": cannot open shared object file: No such file or directory\";\n      // libmindspore-lite.so exist but dlopen failed\n      if (load_error.find(so_no_exist_error) == std::string::npos) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"dlopen libmindspore-lite.so failed, dlopen error: \" << load_error;\n      }\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"dlopen libmindspore_lite.so failed, if you want to use MindSpore Lite to do the inference, please \"\n                \"append \"\n                \"libmindspore-lite.so's path to LD_LIBRARY_PATH env or put it in the dynamic_library search path\"\n             << \", dlopen error: \" << load_error;\n    }\n    MSI_LOG_INFO << \"Load \" << kMindsporeLiteLibName << \" successful\";\n  } else {\n    if (!ld_lib_path.empty()) {\n      auto ms_search_path_list = SplitString(ld_lib_path, \":\");\n      MSI_LOG_INFO << \"Search \" << kMindSporeLibName << \" directory: \" << ms_search_path_list;\n      for (auto &item : ms_search_path_list) {\n        auto lib_path = item + \"/\" + kMindSporeLibName;\n        if (!common::DirOrFileExist(lib_path)) {\n          continue;\n        }\n        ms_cxx_lib_handle_ = dlopen(lib_path.c_str(), RTLD_NOW | RTLD_GLOBAL);\n        if (ms_cxx_lib_handle_ == nullptr) {\n          return INFER_STATUS_LOG_ERROR(FAILED) << \"dlopen libmindspore.so failed, please check whether the MindSpore \"\n                                                   \"and Ascend/GPU software package versions match\"\n                                                << \", lib path:\" << lib_path << \", dlopen error: \" << get_dlerror();\n        }\n        MSI_LOG_INFO << \"Load \" << kMindSporeLibName << \" in \" << item << \" successful\";\n        break;\n      }\n    }\n    if (ms_cxx_lib_handle_ == nullptr) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Failed to load libmindspore.so, please pip install MindSpore whl package for libmindspore.so\";\n    }\n  }\n  ms_lib_handle_ = dlopen(kServingAscendLibName, RTLD_NOW | RTLD_GLOBAL);\n  if (ms_lib_handle_ == nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"dlopen failed, please check whether the MindSpore and Serving versions match, lib name:\"\n           << kServingAscendLibName << \", dlopen error: \" << get_dlerror();\n  }\n  MSI_LOG_INFO << \"Load \" << kServingAscendLibName << \" successful\";\n  ms_create_handle_ = (CreateInferHandle)dlsym(ms_lib_handle_, \"ServingCreateInfer\");\n  if (ms_create_handle_ == nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"dlsym ServingCreateInfer failed, lib name:\" << kServingAscendLibName\n                                          << \", dlopen error: \" << get_dlerror();\n  }\n  return SUCCESS;\n}\n\nbool InferenceLoader::GetEnableLite() const { return enable_lite_; }\n\nDeviceType InferenceLoader::GetSupportDeviceType(DeviceType device_type, ModelType model_type) {\n  auto mindspore_infer = CreateMindSporeInfer();\n  if (mindspore_infer == nullptr) {\n    MSI_LOG_ERROR << \"Create MindSpore infer failed\";\n    return kDeviceTypeNotSpecified;\n  }\n  std::vector<ModelType> check_model_types;\n  if (model_type == kUnknownType) {\n    check_model_types = {kMindIR, kMindIR_Lite, kOM};\n  } else {\n    check_model_types = {model_type};\n  }\n  for (auto &model_type_item : check_model_types) {\n    if (device_type == kDeviceTypeNotSpecified) {\n      auto device_list = {kDeviceTypeAscend, kDeviceTypeGpu, kDeviceTypeCpu};\n      for (auto item : device_list) {\n        if (mindspore_infer->CheckModelSupport(item, model_type_item)) {\n          return item;\n        }\n      }\n    } else {\n      if (mindspore_infer->CheckModelSupport(device_type, model_type_item)) {\n        return device_type;\n      }\n    }\n  }\n  return kDeviceTypeNotSpecified;\n}\n\nbool InferenceLoader::SupportReuseDevice() {\n  auto mindspore_infer = CreateMindSporeInfer();\n  if (mindspore_infer == nullptr) {\n    MSI_LOG_ERROR << \"Create MindSpore infer failed\";\n    return false;\n  }\n  return mindspore_infer->SupportReuseDevice();\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/inference/inference.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_INFERENCE_H\n#define MINDSPORE_SERVING_WORKER_INFERENCE_H\n\n#include <utility>\n#include <map>\n#include <unordered_map>\n#include <memory>\n#include <vector>\n#include <string>\n#include <atomic>\n#include \"common/serving_common.h\"\n\nnamespace mindspore {\nnamespace serving {\nusing DeviceInfo = std::map<std::string, std::string>;\n\nenum DeviceType {\n  kDeviceTypeNotSpecified,\n  kDeviceTypeAscend,\n  kDeviceTypeGpu,\n  kDeviceTypeCpu,\n};\n\nenum ModelType : uint32_t {\n  kMindIR = 0,\n  kAIR = 1,\n  kOM = 2,\n  kONNX = 3,\n  kMindIR_Lite = 4,\n  // insert new data type here\n  kUnknownType = 0xFFFFFFFF\n};\n\nstruct MS_API ModelContext {\n  int32_t thread_num{-1};  // -1: unspecified\n  std::vector<int> thread_affinity_core_list;\n  int enable_parallel{-1};  // -1: unspecified, 0: false, 1: true\n  std::vector<DeviceInfo> device_list;\n  void AppendDeviceInfo(const DeviceInfo &device_info);\n  std::string AsString() const;\n};\n\nstruct TensorInfo {\n  size_t size = 0;  // -1: unspecified\n  DataType data_type = kMSI_Unknown;\n  std::vector<int64_t> shape;\n  bool is_no_batch_dim = false;\n};\n\nstruct TensorInfoOutput {\n  TensorInfo tensor_info;\n  size_t size_one_batch = 0;\n  std::vector<int64_t> shape_one_batch;\n};\n\nstatic inline LogStream &operator<<(LogStream &stream, DeviceType device_type) {\n  switch (device_type) {\n    case kDeviceTypeAscend:\n      stream << \"Ascend\";\n      break;\n    case kDeviceTypeGpu:\n      stream << \"Gpu\";\n      break;\n    case kDeviceTypeCpu:\n      stream << \"Cpu\";\n      break;\n    case kDeviceTypeNotSpecified:\n      stream << \"None(Default)\";\n      break;\n    default:\n      stream << \"[device type: \" << static_cast<int>(device_type) << \"]\";\n      break;\n  }\n  return stream;\n}\n\nstatic inline LogStream &operator<<(LogStream &stream, ModelType model_type) {\n  switch (model_type) {\n    case kMindIR:\n      stream << \"MindIR\";\n      break;\n    case kOM:\n      stream << \"OM\";\n      break;\n    case kONNX:\n      stream << \"ONNX\";\n      break;\n    case kAIR:\n      stream << \"AIR\";\n      break;\n    case kMindIR_Lite:\n      stream << \"MindIR_Lite\";\n      break;\n    case kUnknownType:\n    default:\n      stream << \"[model type: \" << static_cast<int>(model_type) << \"]\";\n      break;\n  }\n  return stream;\n}\n\nclass InferenceBase {\n public:\n  InferenceBase() = default;\n  virtual ~InferenceBase() = default;\n  virtual Status LoadModelFromFile(DeviceType device_type, uint32_t device_id,\n                                   const std::vector<std::string> &file_name, ModelType model_type, bool with_batch_dim,\n                                   const std::vector<int> &without_batch_dim_inputs, const ModelContext &model_context,\n                                   const std::string &dec_key, const std::string &dec_mode,\n                                   const std::string &config_file, bool enable_lite) = 0;\n  virtual Status UnloadModel() = 0;\n\n  virtual Status ExecuteModel(const RequestBase &request, ReplyBase *reply, bool return_result, uint64_t subgraph) = 0;\n  virtual Status ExecuteModel(const std::vector<TensorBasePtr> &request, std::vector<TensorBasePtr> *reply,\n                              bool return_result, uint64_t subgraph) = 0;\n\n  virtual std::vector<TensorInfo> GetInputInfos(uint64_t subgraph) const = 0;\n\n  virtual std::vector<TensorInfo> GetOutputInfos(uint64_t subgraph) const = 0;\n\n  virtual ssize_t GetBatchSize(uint64_t subgraph) const = 0;\n\n  virtual bool CheckModelSupport(DeviceType device_type, ModelType model_type) const = 0;\n\n  virtual uint64_t GetSubGraphNum() const = 0;\n  virtual bool SupportReuseDevice() const = 0;\n};\n\nclass MS_API InferenceLoader {\n public:\n  InferenceLoader();\n  ~InferenceLoader();\n  static InferenceLoader &Instance();\n  std::shared_ptr<InferenceBase> CreateMindSporeInfer();\n  DeviceType GetSupportDeviceType(DeviceType device_type, ModelType model_type);\n  bool SupportReuseDevice();\n  bool GetEnableLite() const;\n\n private:\n  typedef InferenceBase *(*CreateInferHandle)();\n  void *ms_lib_handle_ = nullptr;\n  void *ms_cxx_lib_handle_ = nullptr;\n  void *gomp_handler_ = nullptr;\n  CreateInferHandle ms_create_handle_ = nullptr;\n  Status LoadMindSporeModelWrap();\n  bool enable_lite_{false};\n};\n\n}  // namespace serving\n}  // namespace mindspore\n#endif  // MINDSPORE_SERVING_WORKER_INFERENCE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/inference/mindspore_model_wrap.h\"\n#include <unistd.h>\n#include <sys/stat.h>\n#include <functional>\n#include <map>\n#include <vector>\n\nnamespace mindspore {\nnamespace serving {\nextern \"C\" {\nMS_API InferenceBase *ServingCreateInfer() {\n  auto obj = new MindSporeModelWrap();\n  return dynamic_cast<InferenceBase *>(obj);\n}\n}\n\nstd::mutex MindSporeModelWrap::infer_mutex_;\n\nmindspore::DataType TransInferDataType2ApiTypeId(DataType data_type) {\n  const std::map<DataType, mindspore::DataType> type2id_map{\n    {serving::kMSI_Unknown, mindspore::DataType::kTypeUnknown},\n    {serving::kMSI_Bool, mindspore::DataType::kNumberTypeBool},\n    {serving::kMSI_Int8, mindspore::DataType::kNumberTypeInt8},\n    {serving::kMSI_Uint8, mindspore::DataType::kNumberTypeUInt8},\n    {serving::kMSI_Int16, mindspore::DataType::kNumberTypeInt16},\n    {serving::kMSI_Uint16, mindspore::DataType::kNumberTypeUInt16},\n    {serving::kMSI_Int32, mindspore::DataType::kNumberTypeInt32},\n    {serving::kMSI_Uint32, mindspore::DataType::kNumberTypeUInt32},\n    {serving::kMSI_Int64, mindspore::DataType::kNumberTypeInt64},\n    {serving::kMSI_Uint64, mindspore::DataType::kNumberTypeUInt64},\n    {serving::kMSI_Float16, mindspore::DataType::kNumberTypeFloat16},\n    {serving::kMSI_Float32, mindspore::DataType::kNumberTypeFloat32},\n    {serving::kMSI_Float64, mindspore::DataType::kNumberTypeFloat64},\n  };\n  auto it = type2id_map.find(data_type);\n  if (it == type2id_map.end()) {\n    MSI_LOG_WARNING << \"Unsupported MSI data type \" << data_type;\n    return mindspore::DataType::kTypeUnknown;\n  } else {\n    return it->second;\n  }\n}\n\nDataType TransTypeId2InferDataType(mindspore::DataType type_id) {\n  const std::map<mindspore::DataType, DataType> id2type_map{\n    {mindspore::DataType::kTypeUnknown, kMSI_Unknown},       {mindspore::DataType::kNumberTypeBool, kMSI_Bool},\n    {mindspore::DataType::kNumberTypeFloat64, kMSI_Float64}, {mindspore::DataType::kNumberTypeInt8, kMSI_Int8},\n    {mindspore::DataType::kNumberTypeUInt8, kMSI_Uint8},     {mindspore::DataType::kNumberTypeInt16, kMSI_Int16},\n    {mindspore::DataType::kNumberTypeUInt16, kMSI_Uint16},   {mindspore::DataType::kNumberTypeInt32, kMSI_Int32},\n    {mindspore::DataType::kNumberTypeUInt32, kMSI_Uint32},   {mindspore::DataType::kNumberTypeInt64, kMSI_Int64},\n    {mindspore::DataType::kNumberTypeUInt64, kMSI_Uint64},   {mindspore::DataType::kNumberTypeFloat16, kMSI_Float16},\n    {mindspore::DataType::kNumberTypeFloat32, kMSI_Float32},\n  };\n  auto it = id2type_map.find(type_id);\n  if (it == id2type_map.end()) {\n    MSI_LOG_WARNING << \"Unsupported data id \" << static_cast<int>(type_id);\n    return kMSI_Unknown;\n  } else {\n    return it->second;\n  }\n}\n\nStatus MindSporeModelWrap::LoadModelFromFile(serving::DeviceType device_type, uint32_t device_id,\n                                             const std::vector<std::string> &file_names, ModelType model_type,\n                                             bool with_batch_dim, const std::vector<int> &without_batch_dim_inputs,\n                                             const ModelContext &model_context, const std::string &dec_key,\n                                             const std::string &dec_mode, const std::string &config_file,\n                                             bool enable_lite) {\n  char path[PATH_MAX];\n  std::string current_path = getcwd(path, PATH_MAX);\n  std::string build_dir = current_path + \"/models_build_temp/\";\n  (void)mkdir(build_dir.c_str(), S_IRWXU | S_IRWXG);\n  build_dir += \"device_\" + std::to_string(device_id);\n  (void)mkdir(build_dir.c_str(), S_IRWXU | S_IRWXG);\n  auto error_no = chdir(build_dir.c_str());\n  if (error_no != 0) {\n    MSI_LOG_WARNING << \"Failed to call chdir, target build directory: \" << build_dir << \", error no: \" << error_no;\n  }\n  Status status;\n  if (enable_lite) {\n    status = LoadLiteModelFromFileInner(device_type, device_id, file_names, model_type, with_batch_dim,\n                                        without_batch_dim_inputs, model_context, config_file);\n  } else {\n    status = LoadModelFromFileInner(device_type, device_id, file_names, model_type, with_batch_dim,\n                                    without_batch_dim_inputs, model_context, dec_key, dec_mode, config_file);\n  }\n\n  error_no = chdir(current_path.c_str());\n  if (error_no != 0) {\n    MSI_LOG_WARNING << \"Failed to call chdir, target directory: \" << current_path << \", error no: \" << error_no;\n  }\n  return status;\n}\n\nStatus MindSporeModelWrap::LoadLiteModelFromFileInner(serving::DeviceType device_type, uint32_t device_id,\n                                                      const std::vector<std::string> &file_names, ModelType model_type,\n                                                      bool with_batch_dim,\n                                                      const std::vector<int> &without_batch_dim_inputs,\n                                                      const ModelContext &model_context,\n                                                      const std::string &config_file) {\n  auto ms_model_type = GetMsModelType(model_type);\n  if (ms_model_type == mindspore::kUnknownType) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Invalid model type \" << model_type;\n  }\n  if (file_names.size() != 1) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Load model from file failed, Multi subgraph is not support when the backend is lite, file names: \"\n           << file_names;\n  }\n  const auto &file_name = file_names[0];\n  auto model = std::make_shared<mindspore::Model>();\n  try {\n    auto context = TransformModelContext(device_type, device_id, model_context, true);\n    if (!config_file.empty()) {\n      auto load_status = model->LoadConfig(config_file);\n      if (!load_status.IsOk()) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"Load config file: \" << config_file << \" failed, error details: \" << load_status.ToString();\n      }\n    }\n    auto status = model->Build(file_name, ms_model_type, context);\n    if (!status.IsOk()) {\n      MSI_LOG_ERROR << \"Load model from file failed, model file: \" << file_name << \", device_type: '\" << device_type\n                    << \"', device_id: \" << device_id << \", model type: \" << model_type\n                    << \", model context: \" << model_context.AsString() << \", build error detail: \" << status.ToString();\n      return Status(FAILED, status.ToString());\n    }\n  } catch (std::runtime_error &ex) {\n    MSI_LOG_ERROR << \"Load model from file failed, model file: \" << file_name << \", device_type: '\" << device_type\n                  << \"', device_id: \" << device_id << \", model type: \" << model_type\n                  << \", model context: \" << model_context.AsString() << \", build error detail: \" << ex.what();\n    return Status(FAILED, ex.what());\n  }\n\n  return SetApiModelInfo(device_type, device_id, {file_name}, model_type, with_batch_dim, without_batch_dim_inputs,\n                         model_context, {model});\n}\n\nStatus MindSporeModelWrap::LoadModelFromFileInner(serving::DeviceType device_type, uint32_t device_id,\n                                                  const std::vector<std::string> &file_names, ModelType model_type,\n                                                  bool with_batch_dim, const std::vector<int> &without_batch_dim_inputs,\n                                                  const ModelContext &model_context, const std::string &dec_key,\n                                                  const std::string &dec_mode, const std::string &config_file) {\n  auto ms_model_type = GetMsModelType(model_type);\n  if (ms_model_type == mindspore::kUnknownType) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Invalid model type \" << model_type;\n  }\n  std::vector<std::shared_ptr<mindspore::Model>> models;\n  try {\n    std::vector<mindspore::Graph> graphs;\n    mindspore::Key key;\n    if (!dec_key.empty()) {\n      auto rt = memcpy_s(key.key, sizeof(key.key), dec_key.data(), dec_key.size());\n      if (rt != EOK) {\n        return INFER_STATUS_LOG_ERROR(FAILED) << \"Load model from file failed, dec key size \" << dec_key.size()\n                                              << \" should less than \" << key.max_key_len;\n      }\n      key.len = dec_key.size();\n    } else {\n      key.len = 0;\n    }\n\n    mindspore::Status ms_status;\n    if (file_names.size() > 1) {\n      ms_status = mindspore::Serialization::Load(file_names, ms_model_type, &graphs, key, dec_mode);\n    } else {\n      (void)graphs.emplace_back(mindspore::Graph());\n      ms_status = mindspore::Serialization::Load(file_names[0], ms_model_type, &graphs[0], key, dec_mode);\n    }\n\n    (void)memset_s(key.key, sizeof(key.key), 0, key.max_key_len);\n    if (!ms_status.IsOk()) {\n      MSI_LOG_ERROR << \"Load model from file failed, model file: \" << file_names << \", device_type: '\" << device_type\n                    << \"', device_id: \" << device_id << \", model type: \" << model_type\n                    << \", model context: \" << model_context.AsString() << \", dec mode: \" << dec_mode\n                    << \", load error detail: \" << ms_status.ToString();\n      return Status(FAILED, ms_status.ToString());\n    }\n    if (file_names.size() > 1 && graphs.size() != file_names.size()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Load model from file failed, generate graphs size \" << graphs.size()\n                                            << \" should equal to \" << file_names.size();\n    }\n    auto context = TransformModelContext(device_type, device_id, model_context, false);\n    for (size_t i = 0; i < file_names.size(); i++) {\n      auto model = std::make_shared<mindspore::Model>();\n      if (!config_file.empty()) {\n        auto load_status = model->LoadConfig(config_file);\n        if (!load_status.IsOk()) {\n          return INFER_STATUS_LOG_ERROR(FAILED)\n                 << \"Load config file: \" << config_file << \" failed, error details: \" << load_status.ToString();\n        }\n      }\n      mindspore::Status status;\n      status = model->Build(mindspore::GraphCell(graphs[i]), context);\n      if (!status.IsOk()) {\n        MSI_LOG_ERROR << \"Load model from file failed, model file: \" << file_names[i] << \", device_type: '\"\n                      << device_type << \"', device_id: \" << device_id << \", model type: \" << model_type\n                      << \", model context: \" << model_context.AsString()\n                      << \", build error detail: \" << status.ToString();\n        return Status(FAILED, status.ToString());\n      }\n      models.push_back(model);\n    }\n  } catch (std::runtime_error &ex) {\n    MSI_LOG_ERROR << \"Load model from file failed, model file: \" << file_names << \", device_type: '\" << device_type\n                  << \"', device_id: \" << device_id << \", model type: \" << model_type\n                  << \", model context: \" << model_context.AsString() << \", build error detail: \" << ex.what();\n    return Status(FAILED, ex.what());\n  }\n  auto ret = SetApiModelInfo(device_type, device_id, file_names, model_type, with_batch_dim, without_batch_dim_inputs,\n                             model_context, models);\n  if (ret != SUCCESS) {\n    return ret;\n  }\n  return BuildOnPredict();\n}\n\nStatus MindSporeModelWrap::SetApiModelInfo(serving::DeviceType device_type, uint32_t device_id,\n                                           const std::vector<std::string> &file_names, ModelType model_type,\n                                           bool with_batch_dim, const std::vector<int> &without_batch_dim_inputs,\n                                           const ModelContext &model_context,\n                                           const std::vector<std::shared_ptr<mindspore::Model>> &models) {\n  uint64_t last_batch_size = 0;\n  common_model_info_.device_type = device_type;\n  common_model_info_.device_id = device_id;\n  common_model_info_.with_batch_dim = with_batch_dim;\n  common_model_info_.without_batch_dim_inputs = without_batch_dim_inputs;\n  for (size_t i = 0; i < file_names.size(); i++) {\n    ApiModelInfo api_model_info;\n    api_model_info.model = models[i];\n    auto st = GetModelInfos(&api_model_info);\n    if (st != SUCCESS) {\n      return st;\n    }\n\n    MSI_LOG_INFO << \"Print model info, model file: '\" << file_names[i] << \"', subgraph \" << i;\n    MSI_LOG_INFO << \"Model input infos: count \" << api_model_info.input_tensor_infos.size();\n    for (auto &item : api_model_info.input_tensor_infos) {\n      MSI_LOG_INFO << item.shape << \", \" << item.data_type << \", \" << item.size;\n    }\n    MSI_LOG_INFO << \"Model output infos: count \" << api_model_info.output_tensor_infos.size();\n    for (auto &item : api_model_info.output_tensor_infos) {\n      MSI_LOG_INFO << item.shape << \", \" << item.data_type << \", \" << item.size;\n    }\n\n    auto status = CalculateBatchSize(&api_model_info);\n    if (status != SUCCESS) {\n      MSI_LOG_ERROR << \"Calculate batch size failed, model file: \" << file_names[i] << \", subgraph: \" << i;\n      return status;\n    }\n    if (last_batch_size != 0 && last_batch_size != common_model_info_.batch_size) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Expect batch size to be same, last batch size: \" << last_batch_size\n                                            << \", subgraph \" << i << \" batch size: \" << common_model_info_.batch_size;\n    }\n    last_batch_size = common_model_info_.batch_size;\n    models_.push_back(api_model_info);\n  }\n  MSI_LOG_INFO << \"Load model from file success, model file: \" << file_names << \", device_type: '\" << device_type\n               << \"', device_id: \" << device_id << \", model type: \" << model_type\n               << \", model context: \" << model_context.AsString();\n  return SUCCESS;\n}\n\nStatus MindSporeModelWrap::BuildOnPredict() {\n  for (size_t i = 0; i < models_.size(); i++) {\n    auto &inputs_info = models_[i].input_tensor_infos;\n    std::vector<TensorBasePtr> request;\n    for (auto &info : inputs_info) {\n      auto tensor = std::make_shared<Tensor>();\n      tensor->set_data_type(info.data_type);\n      tensor->set_shape(info.shape);\n      tensor->resize_data(info.size);\n      request.push_back(tensor);\n    }\n    std::vector<TensorBasePtr> reply;\n    auto ret = ExecuteModel(request, &reply, false, i);\n    if (ret != SUCCESS) {\n      MSI_LOG_ERROR << \"Failed to execute model when warmup, subgraph \" << i;\n      return ret;\n    }\n  }\n  return SUCCESS;\n}\n\nstd::shared_ptr<DeviceInfoContext> MindSporeModelWrap::TransformAscendModelContext(uint32_t device_id,\n                                                                                   const DeviceInfo &device_info) {\n  auto context_info = std::make_shared<AscendDeviceInfo>();\n  context_info->SetDeviceID(device_id);\n\n  using ContextStrFun = std::function<void(const std::string &)>;\n  ContextStrFun set_output_type = [context_info](const std::string &val) {\n    // \"FP32\", \"FP16\", \"UINT8\"\n    if (val == \"FP32\") {\n      context_info->SetOutputType(mindspore::DataType::kNumberTypeFloat32);\n    } else if (val == \"FP16\") {\n      context_info->SetOutputType(mindspore::DataType::kNumberTypeFloat16);\n    } else if (val == \"UINT8\") {\n      context_info->SetOutputType(mindspore::DataType::kNumberTypeUInt8);\n    } else {\n      MSI_LOG_ERROR << \"Set model context output type failed, unknown data type \" << val;\n    }\n  };\n\n  for (auto &item : device_info) {\n    const auto &key = item.first;\n    const auto &value = item.second;\n    if (key == \"insert_op_cfg_path\") {\n      context_info->SetInsertOpConfigPath(value);\n    } else if (key == \"input_format\") {\n      context_info->SetInputFormat(value);\n    } else if (key == \"input_shape\") {\n      context_info->SetInputShape(value);\n    } else if (key == \"output_type\") {\n      set_output_type(value);\n    } else if (key == \"precision_mode\") {\n      context_info->SetPrecisionMode(value);\n    } else if (key == \"op_select_impl_mode\") {\n      context_info->SetOpSelectImplMode(value);\n    } else if (key == \"fusion_switch_config_path\") {\n      context_info->SetFusionSwitchConfigPath(value);\n    } else if (key == \"buffer_optimize_mode\") {\n      context_info->SetBufferOptimizeMode(value);\n    }\n  }\n  return context_info;\n}\n\nstd::shared_ptr<DeviceInfoContext> MindSporeModelWrap::TransformNvidiaGPUModelContext(uint32_t device_id,\n                                                                                      const DeviceInfo &device_info) {\n  auto context_info = std::make_shared<GPUDeviceInfo>();\n  context_info->SetDeviceID(device_id);\n\n  for (auto &item : device_info) {\n    const auto &key = item.first;\n    const auto &value = item.second;\n    if (key == \"precision_mode\") {\n      context_info->SetPrecisionMode(value);\n      context_info->SetEnableFP16(value == \"fp16\");\n    }\n  }\n  return context_info;\n}\n\nstd::shared_ptr<DeviceInfoContext> MindSporeModelWrap::TransformCPUModelContext(const DeviceInfo &device_info) {\n  auto context_info = std::make_shared<CPUDeviceInfo>();\n  for (auto &item : device_info) {\n    const auto &key = item.first;\n    const auto &value = item.second;\n    if (key == \"precision_mode\") {\n      context_info->SetEnableFP16(value == \"fp16\");\n    }\n  }\n  return context_info;\n}\n\nstd::string MindSporeModelWrap::DeviceTypeToString(serving::DeviceType device_type) {\n  switch (device_type) {\n    case kDeviceTypeGpu:\n      return \"gpu\";\n    case kDeviceTypeCpu:\n      return \"cpu\";\n    case kDeviceTypeAscend:\n      return \"ascend\";\n    case kDeviceTypeNotSpecified:\n    default:\n      return \"not_specified\";\n  }\n}\n\nDeviceInfo MindSporeModelWrap::GetDeviceInfo(const std::vector<DeviceInfo> &device_list,\n                                             serving::DeviceType device_type) {\n  DeviceInfo device_info;\n  for (auto &item : device_list) {\n    if (item.at(\"device_type\") == DeviceTypeToString(device_type)) {\n      device_info = item;\n      break;\n    }\n  }\n  return device_info;\n}\n\nstd::shared_ptr<Context> MindSporeModelWrap::TransformModelContext(serving::DeviceType device_type, uint32_t device_id,\n                                                                   const ModelContext &model_context,\n                                                                   bool enable_lite) {\n  auto context = std::make_shared<mindspore::Context>();\n  if (model_context.thread_num != -1) {\n    context->SetThreadNum(model_context.thread_num);\n  }\n  if (model_context.enable_parallel != -1) {\n    context->SetEnableParallel(model_context.enable_parallel != 0);\n  }\n  if (!model_context.thread_affinity_core_list.empty()) {\n    context->SetThreadAffinity(model_context.thread_affinity_core_list);\n  }\n\n  std::shared_ptr<mindspore::DeviceInfoContext> context_info = nullptr;\n\n  auto device_info = GetDeviceInfo(model_context.device_list, device_type);\n  if (device_type == kDeviceTypeAscend) {\n    context_info = TransformAscendModelContext(device_id, device_info);\n  } else if (device_type == kDeviceTypeCpu) {\n    context_info = TransformCPUModelContext(device_info);\n  } else if (device_type == kDeviceTypeGpu) {\n    context_info = TransformNvidiaGPUModelContext(device_id, device_info);\n  }\n  if (context_info != nullptr) {\n    context->MutableDeviceInfo().push_back(context_info);\n  }\n\n  if (enable_lite && device_type != kDeviceTypeCpu) {\n    auto cpu_device_info = GetDeviceInfo(model_context.device_list, kDeviceTypeCpu);\n    context->MutableDeviceInfo().push_back(TransformCPUModelContext(cpu_device_info));\n  }\n  return context;\n}\n\nStatus MindSporeModelWrap::GetModelInfos(ApiModelInfo *api_model_info) {\n  MSI_EXCEPTION_IF_NULL(api_model_info);\n  auto model = api_model_info->model;\n\n  auto get_tensor_info_from_tensor = [](const mindspore::MSTensor &ms_tensor) {\n    serving::TensorInfo tensor_info;\n    tensor_info.shape = ms_tensor.Shape();\n    tensor_info.data_type = TransTypeId2InferDataType(ms_tensor.DataType());\n    tensor_info.size = ms_tensor.DataSize();\n    if (tensor_info.size == 0) {\n      auto &shape = tensor_info.shape;\n      int64_t elements_nums = std::accumulate(shape.begin(), shape.end(), 1LL, std::multiplies<int64_t>());\n      if (elements_nums <= 0) {\n        MSI_LOG_ERROR << \"Invalid tensor shape \" << shape;\n        return serving::TensorInfo();\n      }\n      tensor_info.size = TensorBase::GetTypeSize(tensor_info.data_type) * static_cast<size_t>(elements_nums);\n    }\n    return tensor_info;\n  };\n  {  // input infos\n    auto input_infos = model->GetInputs();\n    for (size_t i = 0; i < input_infos.size(); i++) {\n      auto &info = input_infos[i];\n      auto tensor_info = get_tensor_info_from_tensor(info);\n      if (tensor_info.data_type == kMSI_Unknown) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"Unknown input mindspore data type \" << static_cast<int>(info.DataType());\n      }\n      api_model_info->input_tensor_infos.push_back(tensor_info);\n      api_model_info->input_names.push_back(info.Name());\n    }\n  }\n  {  // output infos\n    auto output_infos = model->GetOutputs();\n    for (auto &info : output_infos) {\n      auto tensor_info = get_tensor_info_from_tensor(info);\n      if (tensor_info.data_type == kMSI_Unknown) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"Unknown output mindspore data type \" << static_cast<int>(info.DataType());\n      }\n      api_model_info->output_tensor_infos.push_back(tensor_info);\n      api_model_info->output_names.push_back(info.Name());\n    }\n  }\n  return SUCCESS;\n}\n\nStatus MindSporeModelWrap::CalculateBatchSize(ApiModelInfo *api_model_info) {\n  auto &input_infos = api_model_info->input_tensor_infos;\n  auto &output_infos = api_model_info->output_tensor_infos;\n  if (!common_model_info_.with_batch_dim) {\n    common_model_info_.batch_size = 1;\n    for (auto &input : input_infos) {\n      input.is_no_batch_dim = true;\n    }\n    for (auto &output : output_infos) {\n      output.is_no_batch_dim = true;\n    }\n    return SUCCESS;\n  }\n  const auto &list = common_model_info_.without_batch_dim_inputs;\n  uint32_t cur_batch_size = 0;\n  for (size_t i = 0; i < input_infos.size(); i++) {\n    auto &input = input_infos[i];\n    if (std::find(list.begin(), list.end(), i) != list.end()) {\n      input.is_no_batch_dim = true;\n      continue;\n    }\n    if (input.shape.empty()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"The shape of model input \" << i << \" cannot be empty, \"\n                                            << \"when with_batch_dim is true and without_batch_dim_inputs is \" << list;\n    }\n    if (input.shape[0] <= 0) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"The shape of model input \" << i << \" is invalid, shape: \" << input.shape;\n    }\n    if (cur_batch_size == 0) {\n      cur_batch_size = static_cast<uint32_t>(input.shape[0]);\n      continue;\n    }\n    if (input.shape[0] != cur_batch_size) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"The shape \" << input.shape << \" of model input \" << i\n                                            << \" does not match current batch size \" << cur_batch_size;\n    }\n  }\n  for (size_t i = 0; i < output_infos.size(); i++) {\n    auto &output = output_infos[i];\n    if (output.shape.empty()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"The shape of model output \" << i << \" cannot be empty\";\n    }\n    if (output.shape[0] <= 0) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"The shape of model output \" << i << \" is invalid, shape: \" << output.shape;\n    }\n    if (cur_batch_size == 0) {\n      cur_batch_size = static_cast<uint32_t>(output.shape[0]);\n      continue;\n    }\n    if (output.shape[0] != cur_batch_size) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"The shape \" << output.shape << \" of model output \" << i\n                                            << \" does not match current batch size \" << cur_batch_size;\n    }\n  }\n  if (cur_batch_size == 0) {\n    cur_batch_size = 1;\n  }\n  common_model_info_.batch_size = cur_batch_size;\n  return SUCCESS;\n}\n\nStatus MindSporeModelWrap::UnloadModel() {\n  for (auto &iter : models_) {\n    iter.model = nullptr;\n  }\n  return SUCCESS;\n}\n\nStatus MindSporeModelWrap::ExecuteModel(const RequestBase &request, serving::ReplyBase *reply, bool return_result,\n                                        uint64_t subgraph) {\n  MSI_EXCEPTION_IF_NULL(reply);\n  FuncMakeInBuffer func_in = [&request](size_t index, const std::string &name) {\n    auto input_tensor = request[index];\n    if (input_tensor == nullptr || input_tensor->data() == nullptr) {\n      MSI_LOG_EXCEPTION << \"Input tensor data cannot be nullptr, index \" << index;\n    }\n    return mindspore::MSTensor::CreateRefTensor(name, TransInferDataType2ApiTypeId(input_tensor->data_type()),\n                                                input_tensor->shape(), const_cast<uint8_t *>(input_tensor->data()),\n                                                input_tensor->data_size(), false);\n  };\n\n  FuncMakeOutTensor func_out = [&reply](const mindspore::MSTensor &result_tensor, DataType data_type,\n                                        const std::vector<int64_t> &shape) {\n    if (result_tensor.IsDevice()) {\n      MSI_LOG_EXCEPTION << \"Can not support device type tensor\";\n    }\n    auto tensor = reply->add();\n    MSI_EXCEPTION_IF_NULL(tensor);\n    (void)tensor->set_data(result_tensor.Data().get(), result_tensor.DataSize());\n    tensor->set_data_type(data_type);\n    tensor->set_shape(shape);\n  };\n  return ExecuteModelCommon(request.size(), func_in, func_out, return_result, subgraph);\n}\n\nStatus MindSporeModelWrap::ExecuteModel(const std::vector<TensorBasePtr> &request, std::vector<TensorBasePtr> *reply,\n                                        bool return_result, uint64_t subgraph) {\n  if (subgraph >= models_.size()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Inputs subgraph label error, subgraph label is \" << subgraph\n                                          << \", total graph number is \" << models_.size();\n  }\n  MSI_EXCEPTION_IF_NULL(reply);\n  FuncMakeInBuffer func_in = [&request](size_t index, const std::string &name) {\n    auto &input_tensor = request[index];\n    return mindspore::MSTensor::CreateRefTensor(name, TransInferDataType2ApiTypeId(input_tensor->data_type()),\n                                                input_tensor->shape(), const_cast<uint8_t *>(input_tensor->data()),\n                                                input_tensor->data_size(), false);\n  };\n  FuncMakeOutTensor func_out = [&reply](const mindspore::MSTensor &result_tensor, DataType data_type,\n                                        const std::vector<int64_t> &shape) {\n    if (result_tensor.IsDevice()) {\n      MSI_LOG_EXCEPTION << \"Can not support device type tensor\";\n    }\n    TensorBasePtr tensor = nullptr;\n    // lite backend, output tensor result in all predict\n    if (InferenceLoader::Instance().GetEnableLite()) {\n      tensor = std::make_shared<Tensor>(data_type, shape, result_tensor.Data().get(), result_tensor.DataSize());\n    } else {\n      tensor = std::make_shared<ApiBufferTensorWrap>(data_type, shape, result_tensor);\n    }\n    reply->push_back(tensor);\n  };\n  return ExecuteModelCommon(request.size(), func_in, func_out, return_result, subgraph);\n}\n\nStatus MindSporeModelWrap::ExecuteModelCommon(size_t request_size, const FuncMakeInBuffer &in_func,\n                                              const FuncMakeOutTensor &out_func, bool return_result,\n                                              uint64_t subgraph) {\n  if (models_[subgraph].model == nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Model is not loaded\";\n  }\n  auto &model_info = models_[subgraph];\n  auto model = model_info.model;\n  auto &input_names = model_info.input_names;\n  auto &output_names = model_info.output_names;\n  if (input_names.size() != request_size) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Inputs size not match, request inputs size \" << request_size\n                                          << \", model inputs size \" << input_names.size();\n  }\n  std::vector<mindspore::MSTensor> inputs;\n  for (size_t i = 0; i < input_names.size(); i++) {\n    auto tensor = in_func(i, input_names[i]);\n    if (tensor == nullptr) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Failed to create input \" << i << \" MSTensor\";\n    }\n    inputs.push_back(*tensor);\n    mindspore::MSTensor::DestroyTensorPtr(tensor);\n  }\n  std::vector<mindspore::MSTensor> outputs;\n  mindspore::Status status;\n  if (SupportMultiThreads()) {\n    status = model->Predict(inputs, &outputs);\n  } else {  // vm backend\n    std::unique_lock<std::mutex> lock(infer_mutex_);\n    status = model->Predict(inputs, &outputs);\n  }\n  if (!status.IsOk()) {\n    MSI_LOG_ERROR << \"Predict failed: \" << status.ToString();\n    return Status(FAILED, \"Predict Failed\");\n  }\n  if (outputs.size() != output_names.size()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Outputs size not match, predict outputs size \" << outputs.size()\n                                          << \", model outputs size \" << output_names.size();\n  }\n  if (return_result) {\n    auto &output_infos = model_info.output_tensor_infos;\n    for (size_t i = 0; i < output_names.size(); i++) {\n      auto &result_tensor = outputs[i];\n      auto &output_info = output_infos[i];\n      if (result_tensor.DataSize() != output_info.size) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"Get output failed, predict output data size \" << result_tensor.DataSize()\n               << \" not match model info data size \" << output_info.size << \", output_name \" << output_names[i];\n      }\n      out_func(result_tensor, output_info.data_type, output_info.shape);\n    }\n  }\n  return SUCCESS;\n}\n\nstd::vector<serving::TensorInfo> MindSporeModelWrap::GetInputInfos(uint64_t subgraph) const {\n  return models_[subgraph].input_tensor_infos;\n}\n\nstd::vector<serving::TensorInfo> MindSporeModelWrap::GetOutputInfos(uint64_t subgraph) const {\n  return models_[subgraph].output_tensor_infos;\n}\n\nssize_t MindSporeModelWrap::GetBatchSize(uint64_t) const { return common_model_info_.batch_size; }\n\nuint64_t MindSporeModelWrap::GetSubGraphNum() const { return models_.size(); }\n\nbool MindSporeModelWrap::SupportReuseDevice() const {\n  static bool support_reuse_device = false;\n  static bool value_set = false;\n  if (!value_set) {\n    value_set = true;\n    auto is_device_910 = mindspore::Model::CheckModelSupport(mindspore::kAscend910, mindspore::kMindIR);\n    support_reuse_device = !is_device_910;\n  }\n  return support_reuse_device;\n}\n\nbool MindSporeModelWrap::SupportMultiThreads() const {\n  static bool support_multi_thread = false;\n  static bool value_set = false;\n  if (!value_set) {\n    value_set = true;\n    if (InferenceLoader::Instance().GetEnableLite()) {\n      support_multi_thread = true;\n    } else if (mindspore::Model::CheckModelSupport(mindspore::kAscend910, mindspore::kMindIR)) {\n      support_multi_thread = false;\n    } else if (mindspore::Model::CheckModelSupport(mindspore::kGPU, mindspore::kMindIR)) {\n      support_multi_thread = false;\n    } else {\n      support_multi_thread = true;\n    }\n  }\n  return support_multi_thread;\n}\n\nbool MindSporeModelWrap::CheckModelSupport(DeviceType device_type, ModelType model_type) const {\n  auto ms_device_type = GetMsDeviceType(device_type);\n  if (ms_device_type == mindspore::kInvalidDeviceType) {\n    return false;\n  }\n  auto ms_model_type = GetMsModelType(model_type);\n  if (ms_model_type == mindspore::kUnknownType) {\n    return false;\n  }\n  return mindspore::Model::CheckModelSupport(ms_device_type, ms_model_type);\n}\n\nmindspore::ModelType MindSporeModelWrap::GetMsModelType(serving::ModelType model_type) {\n  mindspore::ModelType ms_model_type;\n  switch (model_type) {\n    case kMindIR:\n      ms_model_type = mindspore::kMindIR;\n      break;\n    case kMindIR_Lite:\n      ms_model_type = mindspore::kMindIR_Lite;\n      break;\n    case kAIR:\n      ms_model_type = mindspore::kAIR;\n      break;\n    case kOM:\n      ms_model_type = mindspore::kOM;\n      break;\n    case kONNX:\n      ms_model_type = mindspore::kONNX;\n      break;\n    case kUnknownType:\n    default:\n      ms_model_type = mindspore::kUnknownType;\n  }\n  return ms_model_type;\n}\n\nmindspore::DeviceType MindSporeModelWrap::GetMsDeviceType(serving::DeviceType device_type) {\n  mindspore::DeviceType ms_device_type = mindspore::DeviceType::kInvalidDeviceType;\n  switch (device_type) {\n    case kDeviceTypeAscend:\n      ms_device_type = mindspore::DeviceType::kAscend;\n      break;\n    case kDeviceTypeGpu:\n      ms_device_type = mindspore::DeviceType::kGPU;\n      break;\n    case kDeviceTypeCpu:\n      ms_device_type = mindspore::DeviceType::kCPU;\n      break;\n    case kDeviceTypeNotSpecified:\n    default:\n      break;\n  }\n  return ms_device_type;\n}\n\nApiBufferTensorWrap::ApiBufferTensorWrap() = default;\n\nApiBufferTensorWrap::ApiBufferTensorWrap(DataType type, const std::vector<int64_t> &shape,\n                                         const mindspore::MSTensor &tensor)\n    : type_(type), shape_(shape), tensor_(tensor) {}\n\nApiBufferTensorWrap::~ApiBufferTensorWrap() = default;\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/inference/mindspore_model_wrap.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WROERK_MODEL_WRAP_H\n#define MINDSPORE_SERVING_WROERK_MODEL_WRAP_H\n\n#include <unordered_map>\n#include <string>\n#include <memory>\n#include <vector>\n#include <map>\n#include <mutex>\n#include \"common/serving_common.h\"\n#include \"worker/inference/inference.h\"\n#include \"include/api/model.h\"\n#include \"include/api/types.h\"\n#include \"include/api/data_type.h\"\n#include \"include/api/serialization.h\"\n#include \"include/api/context.h\"\n\nnamespace mindspore {\nnamespace serving {\nstruct ApiModelInfo {\n  std::vector<std::string> input_names;\n  std::vector<serving::TensorInfo> input_tensor_infos;\n  std::vector<std::string> output_names;\n  std::vector<serving::TensorInfo> output_tensor_infos;\n  std::shared_ptr<mindspore::Model> model = nullptr;\n};\n\nstruct ApiCommonModelInfo {\n  uint32_t batch_size = 0;\n  serving::DeviceType device_type;\n  uint32_t device_id = 0;\n  bool with_batch_dim = false;\n  std::vector<int> without_batch_dim_inputs;\n};\n\nclass MindSporeModelWrap : public InferenceBase {\n public:\n  MindSporeModelWrap() = default;\n\n  ~MindSporeModelWrap() = default;\n\n  Status LoadModelFromFile(serving::DeviceType device_type, uint32_t device_id,\n                           const std::vector<std::string> &file_names, ModelType model_type, bool with_batch_dim,\n                           const std::vector<int> &without_batch_dim_inputs, const ModelContext &model_context,\n                           const std::string &dec_key, const std::string &dec_mode, const std::string &config_file,\n                           bool enable_lite) override;\n\n  Status UnloadModel() override;\n  Status ExecuteModel(const RequestBase &request, ReplyBase *reply, bool return_result, uint64_t subgraph) override;\n  Status ExecuteModel(const std::vector<TensorBasePtr> &request, std::vector<TensorBasePtr> *reply, bool return_result,\n                      uint64_t subgraph) override;\n\n  std::vector<serving::TensorInfo> GetInputInfos(uint64_t subgraph) const override;\n\n  std::vector<serving::TensorInfo> GetOutputInfos(uint64_t subgraph) const override;\n\n  ssize_t GetBatchSize(uint64_t subgraph) const override;\n\n  bool CheckModelSupport(DeviceType device_type, ModelType model_type) const override;\n\n  uint64_t GetSubGraphNum() const override;\n  bool SupportReuseDevice() const override;\n  bool SupportMultiThreads() const;\n\n private:\n  ApiCommonModelInfo common_model_info_;\n  std::vector<ApiModelInfo> models_;\n  static std::mutex infer_mutex_;\n\n  using FuncMakeInBuffer = std::function<mindspore::MSTensor *(size_t index, const std::string &name)>;\n  using FuncMakeOutTensor =\n    std::function<void(const mindspore::MSTensor, DataType data_type, const std::vector<int64_t> &shape)>;\n  Status ExecuteModelCommon(size_t request_size, const FuncMakeInBuffer &in_func, const FuncMakeOutTensor &out_func,\n                            bool return_result, uint64_t subgraph);\n  Status GetModelInfos(ApiModelInfo *model_info);\n  Status SetApiModelInfo(serving::DeviceType device_type, uint32_t device_id,\n                         const std::vector<std::string> &file_names, ModelType model_type, bool with_batch_dim,\n                         const std::vector<int> &without_batch_dim_inputs, const ModelContext &model_context,\n                         const std::vector<std::shared_ptr<mindspore::Model>> &models);\n  Status LoadLiteModelFromFileInner(serving::DeviceType device_type, uint32_t device_id,\n                                    const std::vector<std::string> &file_names, ModelType model_type,\n                                    bool with_batch_dim, const std::vector<int> &without_batch_dim_inputs,\n                                    const ModelContext &model_context, const std::string &config_file);\n  Status LoadModelFromFileInner(serving::DeviceType device_type, uint32_t device_id,\n                                const std::vector<std::string> &file_names, ModelType model_type, bool with_batch_dim,\n                                const std::vector<int> &without_batch_dim_inputs, const ModelContext &model_context,\n                                const std::string &dec_key, const std::string &dec_mode,\n                                const std::string &config_file);\n  std::shared_ptr<Context> TransformModelContext(serving::DeviceType device_type, uint32_t device_id,\n                                                 const ModelContext &model_context, bool enable_lite);\n\n  std::shared_ptr<DeviceInfoContext> TransformAscendModelContext(uint32_t device_id, const DeviceInfo &device_info);\n  std::shared_ptr<DeviceInfoContext> TransformNvidiaGPUModelContext(uint32_t device_id, const DeviceInfo &device_info);\n  std::shared_ptr<DeviceInfoContext> TransformCPUModelContext(const DeviceInfo &device_info);\n  DeviceInfo GetDeviceInfo(const std::vector<DeviceInfo> &device_list, serving::DeviceType device_type);\n  Status BuildOnPredict();\n\n  Status CalculateBatchSize(ApiModelInfo *api_model_info);\n  static mindspore::ModelType GetMsModelType(serving::ModelType model_type);\n  static mindspore::DeviceType GetMsDeviceType(serving::DeviceType device_type);\n  static std::string DeviceTypeToString(serving::DeviceType device_type);\n};\n\nclass ApiBufferTensorWrap : public TensorBase {\n public:\n  ApiBufferTensorWrap();\n  ApiBufferTensorWrap(DataType type, const std::vector<int64_t> &shape, const mindspore::MSTensor &buffer);\n  ~ApiBufferTensorWrap() override;\n\n  void set_data_type(DataType type) override { type_ = type; }\n  DataType data_type() const override { return type_; }\n\n  void set_shape(const std::vector<int64_t> &shape) override { shape_ = shape; }\n  std::vector<int64_t> shape() const override { return shape_; }\n\n  const uint8_t *data() const override { return static_cast<const uint8_t *>(tensor_.Data().get()); }\n  size_t data_size() const override { return tensor_.DataSize(); }\n\n  bool resize_data(size_t) override { MSI_LOG_EXCEPTION << \"ApiBufferTensorWrap not support resize data\"; }\n  uint8_t *mutable_data() override { return static_cast<uint8_t *>(tensor_.MutableData()); }\n\n  // For kMSI_String and kMSI_Bytes\n  void clear_bytes_data() override { MSI_LOG_EXCEPTION << \"Not support for mindspore::Buffer Tensor\"; }\n  void add_bytes_data(const uint8_t *, size_t) override {\n    MSI_LOG_EXCEPTION << \"Not support for mindspore::MSTensor Tensor\";\n  }\n  size_t bytes_data_size() const override { MSI_LOG_EXCEPTION << \"Not support for mindspore::Buffer Tensor\"; }\n  void get_bytes_data(size_t, const uint8_t **, size_t *) const override {\n    MSI_LOG_EXCEPTION << \"Not support for mindspore::MSTensor Tensor\";\n  }\n\n private:\n  DataType type_ = kMSI_Unknown;\n  std::vector<int64_t> shape_;\n  mindspore::MSTensor tensor_;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WROERK_MODEL_WRAP_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/local_servable/local_model_loader.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/local_servable/local_model_loader.h\"\n#include <vector>\n#include <string>\n#include \"common/tensor.h\"\n#include \"worker/context.h\"\n#include \"worker/servable_register.h\"\n\nnamespace mindspore::serving {\nLocalModelLoader::~LocalModelLoader() noexcept { Clear(); }\n\nuint64_t LocalModelLoader::GetGraphNum() const {\n  if (!model_session_) {\n    MSI_LOG_EXCEPTION << \"Model '\" << GetModelKey() << \"' has not been loaded\";\n  }\n  return graph_num_;\n}\n\nStatus LocalModelLoader::Predict(const std::vector<TensorBasePtr> &input, std::vector<TensorBasePtr> *output,\n                                 uint64_t subgraph) {\n  if (!model_session_) {\n    MSI_LOG_EXCEPTION << \"Model '\" << GetModelKey() << \"' has not been loaded\";\n  }\n  return model_session_->ExecuteModel(input, output, true, subgraph);\n}\n\nstd::vector<TensorInfo> LocalModelLoader::GetInputInfos(uint64_t subgraph) const {\n  if (!model_session_) {\n    MSI_LOG_EXCEPTION << \"Model '\" << GetModelKey() << \"' has not been loaded\";\n  }\n  return model_session_->GetInputInfos(subgraph);\n}\n\nstd::vector<TensorInfo> LocalModelLoader::GetOutputInfos(uint64_t subgraph) const {\n  if (!model_session_) {\n    MSI_LOG_EXCEPTION << \"Model '\" << GetModelKey() << \"' has not been loaded\";\n  }\n  return model_session_->GetOutputInfos(subgraph);\n}\n\nuint64_t LocalModelLoader::GetBatchSize() const {\n  if (!model_session_) {\n    MSI_LOG_EXCEPTION << \"Model '\" << GetModelKey() << \"' has not been loaded\";\n  }\n  auto batch_size = model_session_->GetBatchSize(0);\n  if (batch_size < 0) {\n    MSI_LOG_EXCEPTION << \"Invalid batch size \" << batch_size << \", model: '\" << GetModelKey() << \"'\";\n  }\n  return static_cast<uint64_t>(batch_size);\n}\n\nStatus LocalModelLoader::LoadModel(const std::string &servable_directory, const std::string &servable_name,\n                                   uint64_t version_number, const ModelMeta &model_meta, const std::string &dec_key,\n                                   const std::string &dec_mode) {\n  if (model_loaded_) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Model has loaded\";\n  }\n  base_spec_.servable_directory = servable_directory;\n  base_spec_.servable_name = servable_name;\n  base_spec_.version_number = version_number;\n  model_meta_ = model_meta;\n\n  Status status;\n  const ServableSignature &signature = ServableRegister::Instance().GetServableSignature();\n  if (signature.servable_name != servable_name) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Servable '\" << servable_name << \"' has not been registered\";\n  }\n  if (signature.servable_type != kServableTypeLocal) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Servable '\" << servable_name << \"' is not registered as local servable\";\n  }\n  status = InitDevice(model_meta.local_meta.model_format);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Init env failed\";\n    return status;\n  }\n  status = LoadModel(version_number, dec_key, dec_mode);\n  if (status != SUCCESS) {\n    return status;\n  }\n  model_loaded_ = true;\n  return SUCCESS;\n}\n\nStatus LocalModelLoader::InitDevice(ModelType model_type) {\n  auto context = ServableContext::Instance();\n  auto device_type = context->GetDeviceType();\n  auto lite_backend = InferenceLoader::Instance().GetEnableLite();\n  auto support_device_type = InferenceLoader::Instance().GetSupportDeviceType(device_type, model_type);\n  if (support_device_type == kDeviceTypeNotSpecified ||\n      (lite_backend && model_type != kMindIR_Lite && model_type != kMindIR)) {\n    std::string inference_package = lite_backend ? \"MindSpore Lite\" : \"MindSpore\";\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Not support device type \" << device_type << \" and model type \" << model_type\n           << \". Current inference backend: \" << inference_package\n           << \". When the inference backend is MindSpore, Ascend 910 and GPU supports MindIR \"\n           << \"model. When the inference backend is MindSpore Lite, \"\n           << \"Ascend 310/310P, GPU and CPU support MindIR and MindIR_Lite model converted by Lite converter tool.\";\n  }\n  context->SetDeviceType(support_device_type);\n  return SUCCESS;\n}\n\nStatus LocalModelLoader::LoadModel(uint64_t version_number, const std::string &dec_key, const std::string &dec_mode) {\n  const auto &model_meta = model_meta_;\n  auto context = ServableContext::Instance();\n  std::string model_dir =\n    base_spec_.servable_directory + \"/\" + base_spec_.servable_name + \"/\" + std::to_string(version_number);\n  if (!common::DirOrFileExist(model_dir)) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Start servable failed: There is no specified version directory of models, specified version number: \"\n           << version_number << \", servable directory: '\" << base_spec_.servable_directory << \"', servable name: '\"\n           << base_spec_.servable_name << \"'\";\n  }\n  const auto &common_meta = model_meta.common_meta;\n  const auto &local_meta = model_meta.local_meta;\n  std::vector<std::string> model_file_names;\n  for (auto &file : local_meta.model_files) {\n    std::string model_file_name = model_dir + \"/\" + file;\n    model_file_names.push_back(model_file_name);\n  }\n  auto session = InferenceLoader::Instance().CreateMindSporeInfer();\n  if (session == nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Create MindSpore infer failed\";\n  }\n  std::string config_file_path;\n  if (!local_meta.config_file.empty()) {\n    if (local_meta.config_file[0] == '/') {\n      config_file_path = local_meta.config_file;\n    } else {\n      config_file_path = base_spec_.servable_directory + \"/\" + base_spec_.servable_name + \"/\" + local_meta.config_file;\n    }\n  }\n  auto enable_lite = InferenceLoader::Instance().GetEnableLite();\n  Status status = session->LoadModelFromFile(context->GetDeviceType(), context->GetDeviceId(), model_file_names,\n                                             local_meta.model_format, common_meta.with_batch_dim,\n                                             common_meta.without_batch_dim_inputs, model_meta.local_meta.model_context,\n                                             dec_key, dec_mode, config_file_path, enable_lite);\n  if (status != SUCCESS) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Load model failed, servable directory: '\" << base_spec_.servable_directory << \"', servable name: '\"\n           << base_spec_.servable_name << \"', model file: '\" << local_meta.model_files << \"', version number \"\n           << version_number << \",model context: \" << local_meta.model_context.AsString()\n           << \", load error details: \" << status.StatusMessage();\n  }\n  model_session_ = session;\n  graph_num_ = model_file_names.size();\n\n  MSI_LOG_INFO << \"Load model success, servable directory: '\" << base_spec_.servable_directory << \"', servable name: '\"\n               << base_spec_.servable_name << \"', model file: '\" << local_meta.model_files << \"', version number \"\n               << version_number << \", context \" << local_meta.model_context.AsString();\n  return SUCCESS;\n}\n\nvoid LocalModelLoader::Clear() {\n  if (model_session_ != nullptr) {\n    (void)model_session_->UnloadModel();\n    model_session_ = nullptr;\n  }\n  model_loaded_ = false;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/local_servable/local_model_loader.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_ASCEND_SERVABLE_H\n#define MINDSPORE_SERVING_WORKER_ASCEND_SERVABLE_H\n\n#include <memory>\n#include <vector>\n#include <string>\n#include <map>\n\n#include \"common/serving_common.h\"\n#include \"common/instance.h\"\n#include \"common/servable.h\"\n#include \"mindspore_serving/ccsrc/worker/model_loader_base.h\"\n#include \"worker/inference/inference.h\"\n\nnamespace mindspore::serving {\nclass MS_API LocalModelLoader final : public DirectModelLoaderBase {\n public:\n  LocalModelLoader() = default;\n  ~LocalModelLoader() noexcept override;\n\n  Status Predict(const std::vector<TensorBasePtr> &input, std::vector<TensorBasePtr> *output,\n                 uint64_t subgraph) override;\n\n  std::vector<TensorInfo> GetInputInfos(uint64_t subgraph) const override;\n  std::vector<TensorInfo> GetOutputInfos(uint64_t subgraph) const override;\n  uint64_t GetBatchSize() const override;\n  uint64_t GetGraphNum() const override;\n\n  Status LoadModel(const std::string &servable_directory, const std::string &servable_name, uint64_t version_number,\n                   const ModelMeta &model_meta, const std::string &dec_key, const std::string &dec_mode);\n  Status InitDevice(ModelType model_type);\n  void Clear() override;\n\n  std::string GetModelKey() const { return model_meta_.common_meta.model_key; }\n\n private:\n  ServableLoadSpec base_spec_;\n  ModelMeta model_meta_;\n  uint64_t graph_num_ = 0;\n  std::shared_ptr<InferenceBase> model_session_ = nullptr;\n\n  bool model_loaded_ = false;\n\n  Status LoadModel(uint64_t version, const std::string &dec_key, const std::string &dec_mode);\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_WORKER_ASCEND_SERVABLE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/model_loader_base.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/model_loader_base.h\"\n#include \"common/buffer_tensor.h\"\n\nnamespace mindspore::serving {\nStatus DirectModelLoaderBase::Predict(const std::vector<InstanceData> &inputs, std::vector<ResultInstance> *outputs,\n                                      uint64_t subgraph) {\n  MSI_EXCEPTION_IF_NULL(outputs);\n  if (subgraph >= model_info_.sub_graph_infos.size()) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Invalid input subgraph index \" << subgraph << \", model info: \" << model_key_\n           << \", subgraph count: \" << model_info_.sub_graph_infos.size();\n  }\n  Status status;\n  std::vector<TensorBasePtr> predict_outputs;\n  auto &subgraph_info = model_info_.sub_graph_infos[subgraph];\n  status = PrePredict(subgraph_info, model_info_.batch_size, inputs);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Call Pre Predict failed, model info \" << model_key_;\n    return status;\n  }\n  status = Predict(subgraph_info.input_buffers, &predict_outputs, subgraph);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Predict failed, model info \" << model_key_;\n    return status;\n  }\n  status = PostPredict(subgraph_info, model_info_.batch_size, inputs, predict_outputs, outputs);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Call Post Predict failed, model info \" << model_key_;\n    return status;\n  }\n  return SUCCESS;\n}\n\nStatus DirectModelLoaderBase::PrePredict(const ModelExecutorSubgraphInfo &subgraph_info, uint64_t model_batch_size,\n                                         const std::vector<InstanceData> &instances) {\n  auto input_batch_size = instances.size();\n  if (input_batch_size == 0 || input_batch_size > model_batch_size) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n           << \"Invalid input batch size \" << input_batch_size << \", model batch size \" << model_batch_size;\n  }\n  auto &input_infos = subgraph_info.input_infos;\n  auto &input_buffers = subgraph_info.input_buffers;\n\n  for (size_t i = 0; i < input_infos.size(); i++) {\n    auto &tensor = input_buffers[i];\n    auto data_size = tensor->data_size();\n    auto dst_buffer = reinterpret_cast<uint8_t *>(tensor->mutable_data());\n    if (input_infos[i].is_no_batch_dim) {\n      if (data_size != instances[0][i]->data_size()) {\n        return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Input \" << i << \" data size \" << instances[0][i]->data_size()\n                                                    << \"does not match size \" << data_size << \" defined in model\";\n      }\n      (void)memcpy_s(dst_buffer, data_size, instances[0][i]->data(), data_size);\n      continue;\n    }\n    auto item_size = data_size / model_batch_size;\n    for (size_t k = 0; k < input_batch_size; k++) {\n      if (i >= instances[k].size()) {\n        return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \" Batch index \" << k << \" does not have input \" << i;\n      }\n      if (item_size != instances[k][i]->data_size()) {\n        return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n               << \"Input \" << i << \" Batch index \" << k << \" input data size \" << instances[k][i]->data_size()\n               << \"does not match size \" << item_size << \" defined in model\";\n      }\n      (void)memcpy_s(dst_buffer + k * item_size, data_size - k * item_size, instances[k][i]->data(), item_size);\n    }\n    for (size_t k = input_batch_size; k < model_batch_size; k++) {\n      (void)memcpy_s(dst_buffer + k * item_size, data_size - k * item_size, instances[0][i]->data(), item_size);\n    }\n  }\n  return SUCCESS;\n}\n\nStatus DirectModelLoaderBase::PostPredict(const ModelExecutorSubgraphInfo &subgraph_info, uint64_t model_batch_size,\n                                          const std::vector<InstanceData> &instances,\n                                          const std::vector<TensorBasePtr> &predict_result,\n                                          std::vector<ResultInstance> *instance_result) {\n  auto input_batch_size = instances.size();\n  if (input_batch_size == 0 || input_batch_size > model_batch_size) {\n    MSI_LOG_ERROR << \"Input batch size \" << input_batch_size << \" invalid, model batch size \" << model_batch_size;\n    return SYSTEM_ERROR;\n  }\n  if (predict_result.size() != subgraph_info.output_infos.size()) {\n    MSI_LOG_ERROR << \"Output result count \" << predict_result.size() << \" not equal to outputs count \"\n                  << subgraph_info.output_infos.size();\n    return SYSTEM_ERROR;\n  }\n  std::vector<ResultInstance> results_data(input_batch_size);\n  auto &output = subgraph_info.output_infos;\n  for (size_t i = 0; i < predict_result.size(); i++) {\n    auto &item = predict_result[i];\n    auto &output_info = output[i];\n    if (item->data_size() != output_info.tensor_info.size) {\n      MSI_LOG_ERROR << \"Output result \" << i << \" data size \" << item->data_size() << \" not equal to size \"\n                    << output_info.tensor_info.size << \" in output_infos_ \";\n      return SYSTEM_ERROR;\n    }\n    auto item_size = output_info.size_one_batch;\n    auto shape = output_info.shape_one_batch;\n    auto data_type = output_info.tensor_info.data_type;\n    auto src_buffer = const_cast<uint8_t *>(item->data());\n    for (size_t k = 0; k < input_batch_size; k++) {\n      auto tensor =\n        std::make_shared<BufferTensorWithOwner>(item, data_type, shape, src_buffer + item_size * k, item_size, true);\n      results_data[k].data.push_back(tensor);\n    }\n  }\n  *instance_result = results_data;\n  return SUCCESS;\n}\n\nStatus DirectModelLoaderBase::AfterLoadModel() {\n  InitModelExecuteInfo();\n  return SUCCESS;\n}\n\nvoid DirectModelLoaderBase::InitModelExecuteInfo() {\n  auto graph_num = GetGraphNum();\n  model_info_.sub_graph_infos.resize(graph_num);\n  model_info_.batch_size = GetBatchSize();\n\n  for (uint64_t i = 0; i < graph_num; i++) {\n    auto input_infos = GetInputInfos(i);\n    auto output_infos = GetOutputInfos(i);\n    auto &subgraph_info = model_info_.sub_graph_infos[i];\n    subgraph_info.input_infos = input_infos;\n    for (auto &item : output_infos) {\n      TensorInfoOutput info;\n      info.tensor_info = item;\n      if (item.is_no_batch_dim) {\n        info.shape_one_batch = item.shape;\n        info.size_one_batch = item.size;\n      } else {\n        info.shape_one_batch = item.shape;\n        (void)info.shape_one_batch.erase(info.shape_one_batch.begin());\n        // the batch size has been checked in WorkerExecutor\n        info.size_one_batch = item.size / model_info_.batch_size;\n      }\n      subgraph_info.output_infos.push_back(info);\n    }\n    // init input buffer\n    subgraph_info.input_buffers.clear();\n    for (auto &input_info : subgraph_info.input_infos) {\n      auto tensor = std::make_shared<Tensor>();\n      tensor->set_data_type(input_info.data_type);\n      tensor->set_shape(input_info.shape);\n      (void)tensor->resize_data(input_info.size);\n      subgraph_info.input_buffers.push_back(tensor);\n    }\n  }\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/model_loader_base.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_SERVABLE_BASE_H\n#define MINDSPORE_SERVING_WORKER_SERVABLE_BASE_H\n\n#include <memory>\n#include <unordered_map>\n#include <vector>\n#include <string>\n\n#include \"common/serving_common.h\"\n#include \"common/instance_data.h\"\n#include \"common/servable.h\"\n#include \"worker/inference/inference.h\"\n\nnamespace mindspore::serving {\nclass ModelLoaderBase {\n public:\n  ModelLoaderBase() = default;\n  virtual ~ModelLoaderBase() = default;\n\n  virtual std::vector<TensorInfo> GetInputInfos(uint64_t subgraph) const = 0;\n  virtual std::vector<TensorInfo> GetOutputInfos(uint64_t subgraph) const = 0;\n  virtual uint64_t GetBatchSize() const = 0;\n  virtual uint64_t GetGraphNum() const = 0;\n  virtual void Clear() = 0;\n\n  virtual Status Predict(const std::vector<InstanceData> &inputs, std::vector<ResultInstance> *outputs,\n                         uint64_t subgraph) = 0;\n  virtual Status AfterLoadModel() = 0;\n  virtual bool OwnDevice() const = 0;\n};\n\nstruct ModelExecutorSubgraphInfo {\n  std::vector<TensorInfo> input_infos;\n  std::vector<TensorInfoOutput> output_infos;\n  std::vector<TensorBasePtr> input_buffers;\n};\n\nstruct ModelExecutorInfo {\n  std::vector<ModelExecutorSubgraphInfo> sub_graph_infos;\n  uint64_t batch_size = 0;\n};\n\nclass MS_API DirectModelLoaderBase : public ModelLoaderBase {\n public:\n  virtual Status Predict(const std::vector<TensorBasePtr> &input, std::vector<TensorBasePtr> *output,\n                         uint64_t subgraph) = 0;\n\n  Status Predict(const std::vector<InstanceData> &inputs, std::vector<ResultInstance> *outputs,\n                 uint64_t subgraph) override;\n\n  Status AfterLoadModel() override;\n  bool OwnDevice() const override { return true; }\n\n private:\n  std::string model_key_;\n  ModelExecutorInfo model_info_;\n\n  void InitModelExecuteInfo();\n  Status PrePredict(const ModelExecutorSubgraphInfo &subgraph_info, uint64_t model_batch_size,\n                    const std::vector<InstanceData> &instances);\n  Status PostPredict(const ModelExecutorSubgraphInfo &subgraph_info, uint64_t model_batch_size,\n                     const std::vector<InstanceData> &instances, const std::vector<TensorBasePtr> &predict_result,\n                     std::vector<ResultInstance> *instance_result);\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_WORKER_SERVABLE_BASE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/notfiy_master/base_notify.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_BASE_NOTIFY_H\n#define MINDSPORE_SERVING_WORKER_BASE_NOTIFY_H\n#include <vector>\n#include \"common/serving_common.h\"\n#include \"common/servable.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API BaseNotifyMaster {\n public:\n  BaseNotifyMaster() = default;\n  virtual ~BaseNotifyMaster() = default;\n  virtual Status Register(const WorkerRegSpec &worker_spec) = 0;\n  virtual Status Unregister() = 0;\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WORKER_BASE_NOTIFY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/notfiy_master/grpc_notify.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"worker/notfiy_master/grpc_notify.h\"\n#include <unistd.h>\n#include <grpcpp/grpcpp.h>\n#include <grpcpp/health_check_service_interface.h>\n#include <grpcpp/ext/proto_server_reflection_plugin.h>\n#include <thread>\n#include \"common/grpc_server.h\"\n#include \"worker/servable_register.h\"\n#include \"common/shared_memory.h\"\n#include \"common/proto_tensor.h\"\n\nnamespace mindspore {\nnamespace serving {\nGrpcNotifyMaster::GrpcNotifyMaster(const std::string &master_address, const std::string &worker_address)\n    : master_address_(master_address), worker_address_(worker_address) {\n  auto channel = GrpcServer::CreateChannel(master_address_);\n  stub_ = proto::MSMaster::NewStub(channel);\n}\n\nGrpcNotifyMaster::~GrpcNotifyMaster() = default;\n\nStatus GrpcNotifyMaster::Register(const WorkerRegSpec &worker_spec) {\n  proto::RegisterRequest request;\n  GrpcTensorHelper::ConvertWorkerSpec(worker_spec, &request);\n\n  MSI_LOG(INFO) << \"Register to \" << master_address_;\n  proto::RegisterReply reply;\n  grpc::ClientContext context;\n  const int32_t TIME_OUT = 1;\n  std::chrono::system_clock::time_point deadline = std::chrono::system_clock::now() + std::chrono::seconds(TIME_OUT);\n  context.set_deadline(deadline);\n  grpc::Status status = stub_->Register(&context, request, &reply);\n  if (status.ok()) {\n    MSI_LOG(INFO) << \"Register SUCCESS \";\n    is_running_ = true;\n    return SUCCESS;\n  }\n  return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n         << \"Register failed, Grpc message: \" << status.error_code() << \", \" << status.error_message();\n}\n\nStatus GrpcNotifyMaster::Unregister() {\n  if (!is_running_) {\n    return SUCCESS;\n  }\n  is_running_ = false;\n  proto::ExitRequest request;\n  request.set_address(worker_address_);\n  MSI_LOG(INFO) << \"Unregister to \" << master_address_;\n  proto::ExitReply reply;\n  grpc::ClientContext context;\n  const int32_t TIME_OUT = 1;\n  std::chrono::system_clock::time_point deadline = std::chrono::system_clock::now() + std::chrono::seconds(TIME_OUT);\n  context.set_deadline(deadline);\n  grpc::Status status = stub_->Exit(&context, request, &reply);\n  if (status.ok()) {\n    MSI_LOG(INFO) << \"Exit SUCCESS \";\n    return SUCCESS;\n  }\n  return INFER_STATUS_LOG_WARNING(FAILED)\n         << \"Exit Failed, master may have exited, Grpc message: \" << status.error_code() << \", \"\n         << status.error_message();\n}\n\nStatus GrpcNotifyMaster::NotifyFailed(const std::string &master_address, const std::string &error_msg) {\n  proto::NotifyFailedRequest request;\n  request.set_worker_pid(getpid());\n  request.set_error_msg(error_msg);\n  auto channel = GrpcServer::CreateChannel(master_address);\n  auto stub = proto::MSMaster::NewStub(channel);\n\n  proto::NotifyFailedReply reply;\n  grpc::ClientContext context;\n  grpc::Status status = stub->NotifyFailed(&context, request, &reply);\n  if (status.ok()) {\n    MSI_LOG(INFO) << \"Success to notify master \" << master_address << \" error message of worker: \" << error_msg;\n    return SUCCESS;\n  }\n  MSI_LOG_WARNING << \"Failed to notify master \" << master_address << \" error message of worker: \" << error_msg\n                  << \", grpc error: \" << status.error_message();\n  return FAILED;\n}\n\nStatus GrpcNotifyMaster::GetModelInfos(const std::string &master_address, const std::string &servable_name,\n                                       uint32_t version_number, proto::GetModelInfoReply *reply) {\n  proto::GetModelInfoRequest request;\n  request.set_servable_name(servable_name);\n  request.set_version_number(version_number);\n  auto channel = GrpcServer::CreateChannel(master_address);\n  auto stub = proto::MSMaster::NewStub(channel);\n\n  grpc::ClientContext context;\n  grpc::Status grpc_status = stub->GetModelInfo(&context, request, reply);\n  if (!grpc_status.ok()) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n           << \"Get model infos failed, master address:\" << master_address\n           << \", Grpc message: \" << grpc_status.error_code() << \", \" << grpc_status.error_message();\n  }\n  return SUCCESS;\n}\n\nStatus GrpcNotifyMaster::CreateRequestShmInstance(const RemoteCallModelContext &model_context,\n                                                  const InstanceData &instance, proto::Instance *proto_instance,\n                                                  std::vector<SharedMemoryItem> *alloc_shm_request) {\n  Status status;\n  auto &memory_instance = SharedMemoryAllocator::Instance();\n  auto &proto_items = *(proto_instance->mutable_items());\n  for (size_t i = 0; i < instance.size(); i++) {\n    auto &input = instance[i];\n    auto &memory_key = model_context.request_memory[i];\n    SharedMemoryItem memory_item;\n    status = memory_instance.AllocMemoryItem(memory_key, &memory_item);\n    if (status != SUCCESS) {\n      MSI_LOG_ERROR << \"Alloc request memory failed, memory: \" << memory_key;\n      return status;\n    }\n    alloc_shm_request->push_back(memory_item);\n    auto &proto_tensor = proto_items[\"x\" + std::to_string(i)];  // input: x0, x1, x2,...\n    ProtoTensor tensor(&proto_tensor);\n    tensor.set_data_type(input->data_type());\n    tensor.set_shape(input->shape());\n    auto proto_shm_data = proto_tensor.mutable_shm_data();\n    proto_shm_data->set_memory_key(memory_item.memory_key);\n    proto_shm_data->set_bytes_size(memory_item.bytes_size);\n    proto_shm_data->set_data_size(memory_item.size);\n    proto_shm_data->set_data_offset(memory_item.offset);\n    auto ret = memcpy_s(memory_item.offset_address, memory_item.size, input->data(), input->data_size());\n    if (ret != EOK) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Copy tensor to shared memory failed, dst size: \" << memory_item.size\n                                            << \", src size: \" << input->data_size();\n    }\n  }\n  return SUCCESS;\n}\n\nStatus GrpcNotifyMaster::CreateResultShmInstance(const RemoteCallModelContext &model_context,\n                                                 ResultInstance *result_instance, proto::Instance *proto_instance) {\n  Status status;\n  auto &memory_instance = SharedMemoryAllocator::Instance();\n  auto &proto_reply_items = *(proto_instance->mutable_output_buffers());\n  for (size_t i = 0; i < model_context.output_infos.size(); i++) {\n    auto &output_info = model_context.output_infos[i];\n    auto &memory_key = model_context.reply_memory[i];\n    SharedMemoryItem memory_item;\n    status = memory_instance.AllocMemoryItem(memory_key, &memory_item);\n    if (status != SUCCESS) {\n      MSI_LOG_ERROR << \"Alloc request memory failed, memory: \" << memory_key;\n      return status;\n    }\n    auto &proto_output = proto_reply_items[\"y\" + std::to_string(i)];\n    proto_output.set_memory_key(memory_item.memory_key);\n    proto_output.set_bytes_size(memory_item.bytes_size);\n    proto_output.set_data_size(memory_item.size);\n    proto_output.set_data_offset(memory_item.offset);\n    auto result_tensor =\n      std::make_shared<ShmTensor>(output_info.tensor_info.data_type, output_info.shape_one_batch, memory_item);\n    result_instance->data.push_back(result_tensor);\n  }\n  return SUCCESS;\n}\n\nStatus GrpcNotifyMaster::CallModelInner(const RemoteCallModelContext &model_context,\n                                        const std::vector<InstanceData> &request, std::vector<ResultInstance> *reply,\n                                        std::vector<SharedMemoryItem> *alloc_shm_request) {\n  proto::PredictRequest proto_request;\n  auto servable_spec = proto_request.mutable_servable_spec();\n  servable_spec->set_name(ServableRegister::Instance().GetServableSignature().servable_name);\n  servable_spec->set_method_name(\n    ServableRegister::GetCallModelMethodName(model_context.model_name, model_context.subgraph));\n  servable_spec->set_version_number(model_context.version_number);\n  auto proto_instances = proto_request.mutable_instances();\n  Status status;\n  std::vector<ResultInstance> result_instances;\n  for (auto &instance : request) {\n    auto proto_instance = proto_instances->Add();\n    status = CreateRequestShmInstance(model_context, instance, proto_instance, alloc_shm_request);\n    if (status != SUCCESS) {\n      return status;\n    }\n    ResultInstance result_instance;\n    status = CreateResultShmInstance(model_context, &result_instance, proto_instance);\n    if (status != SUCCESS) {\n      return status;\n    }\n    result_instances.push_back(result_instance);\n  }\n  proto::PredictReply proto_reply;\n  MSI_TIME_STAMP_START(CallModel)\n  grpc::ClientContext context;\n  grpc::Status grpc_status = stub_->CallModel(&context, proto_request, &proto_reply);\n  MSI_TIME_STAMP_END_EXTRA(CallModel, \"Request count \" + std::to_string(request.size()))\n  if (!grpc_status.ok()) {\n    return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n           << \"Remote call model failed, master address:\" << master_address_\n           << \", Grpc message: \" << grpc_status.error_code() << \", \" << grpc_status.error_message();\n  }\n  auto &error_msgs = proto_reply.error_msg();\n  auto &reply_instances = proto_reply.instances();\n  if (error_msgs.size() == 1 && error_msgs[0].error_code() != 0) {\n    if (error_msgs[0].error_code() == SERVABLE_UNAVAILABLE) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"There are no available inference processes that occupy devices\";\n    }\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Remote call model failed: \" << error_msgs[0].error_msg();\n  }\n  if (!reply_instances.empty() && static_cast<size_t>(reply_instances.size()) != request.size()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Remote call model failed, reply instances size \" << reply_instances.size()\n                                          << \" is not equal to request instances size \" << request.size();\n  }\n  for (int i = 0; i < reply_instances.size(); i++) {\n    ResultInstance result_instance;\n    if (i < error_msgs.size() && error_msgs[i].error_code() != 0) {\n      result_instance.error_msg = INFER_STATUS_LOG_ERROR(FAILED)\n                                  << \"Result instance \" << i << \"failed: \" << error_msgs[i].error_msg();\n    } else {\n      auto &proto_instance = reply_instances[i];\n      auto &proto_items = proto_instance.items();\n      for (auto &output : proto_items) {\n        if (!output.second.has_shm_data()) {\n          return INFER_STATUS_LOG_ERROR(FAILED) << \"Result instance \" << i << \" invalid, there no shared memory data\";\n        }\n      }\n      result_instance.data = result_instances[i].data;\n    }\n    reply->push_back(result_instance);\n  }\n  return SUCCESS;\n}\n\nStatus GrpcNotifyMaster::CallModel(const RemoteCallModelContext &model_context,\n                                   const std::vector<InstanceData> &request, std::vector<ResultInstance> *reply) {\n  std::vector<SharedMemoryItem> alloc_shm_request;\n  auto status = CallModelInner(model_context, request, reply, &alloc_shm_request);\n  auto &memory_instance = SharedMemoryAllocator::Instance();\n  for (auto &item : alloc_shm_request) {\n    memory_instance.ReleaseMemoryItem(item);\n  }\n  return status;\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/notfiy_master/grpc_notify.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_GRPC_NOTIFY_H\n#define MINDSPORE_SERVING_WORKER_GRPC_NOTIFY_H\n#include <vector>\n#include <string>\n#include <memory>\n#include \"worker/notfiy_master/base_notify.h\"\n#include \"common/instance_data.h\"\n#include \"common/shared_memory.h\"\n#include \"proto/ms_master.pb.h\"\n#include \"proto/ms_master.grpc.pb.h\"\n#include \"worker/extra_worker/remote_call_model.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API GrpcNotifyMaster : public BaseNotifyMaster {\n public:\n  GrpcNotifyMaster(const std::string &master_address, const std::string &worker_address);\n  ~GrpcNotifyMaster() override;\n  Status Register(const WorkerRegSpec &worker_spec) override;\n  Status Unregister() override;\n  static Status NotifyFailed(const std::string &master_address, const std::string &error_msg);\n\n  Status CallModel(const RemoteCallModelContext &model_context, const std::vector<InstanceData> &request,\n                   std::vector<ResultInstance> *reply);\n  static Status GetModelInfos(const std::string &master_address, const std::string &servable_name,\n                              uint32_t version_number, proto::GetModelInfoReply *reply);\n\n private:\n  std::string master_address_;\n  std::string worker_address_;\n\n  std::atomic<bool> is_running_ = false;\n  std::unique_ptr<proto::MSMaster::Stub> stub_;\n\n  Status CallModelInner(const RemoteCallModelContext &model_context, const std::vector<InstanceData> &request,\n                        std::vector<ResultInstance> *reply, std::vector<SharedMemoryItem> *alloc_shm_request);\n\n  Status CreateRequestShmInstance(const RemoteCallModelContext &model_context, const InstanceData &instance,\n                                  proto::Instance *proto_instance, std::vector<SharedMemoryItem> *alloc_shm_request);\n  Status CreateResultShmInstance(const RemoteCallModelContext &model_context, ResultInstance *result_instance,\n                                 proto::Instance *proto_instance);\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WORKER_GRPC_NOTIFY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/predict_thread.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/predict_thread.h\"\n#include <vector>\n#include <memory>\n#include <string>\n#include \"worker/task_queue.h\"\n#include \"worker/stage_function.h\"\n#include \"common/buffer_tensor.h\"\n#include \"distributed_worker/distributed_model_loader.h\"\n\nnamespace mindspore::serving {\nserving::PredictThread::PredictThread() {}\nPredictThread::~PredictThread() noexcept { Stop(); }\n\nvoid PredictThread::PushPredictTask(const MethodStage &stage, const std::vector<InstancePtr> &inputs) {\n  // create input for predict, and check\n  std::vector<InstancePtr> valid_instances;\n  for (auto &instance : inputs) {\n    auto status = CheckPredictInput(stage.subgraph, instance);\n    if (status != SUCCESS) {\n      task_que_.PushTaskResult({instance}, status);\n      continue;\n    }\n    valid_instances.push_back(instance);\n  }\n  if (!valid_instances.empty()) {\n    auto group_name = AsGroupName(stage.stage_key, stage.subgraph);\n    task_que_.PushTask(group_name, 0, valid_instances);\n  }\n}\n\nvoid PredictThread::ThreadFunc(PredictThread *queue) { queue->Predict(); }\n\nvoid PredictThread::Predict() {\n  while (true) {\n    TaskItem task_item;\n    task_que_.PopTask(&task_item);\n    if (task_item.has_stopped) {\n      MSI_LOG_INFO << \"Predict task has stopped, exit predict thread\";\n      break;\n    }\n    MSI_TIME_STAMP_START(InvokePredict)\n    PredictHandle(task_item.task_info, task_item.instance_list);\n    MSI_TIME_STAMP_END_EXTRA(InvokePredict, task_item.task_info.tag)\n  }\n}\n\nvoid PredictThread::Stop() {\n  task_que_.Stop();\n  for (auto &predict_thread : predict_threads_) {\n    if (predict_thread.joinable()) {\n      try {\n        predict_thread.join();\n      } catch (const std::system_error &) {\n      } catch (...) {\n      }\n    }\n  }\n}\n\nstd::string PredictThread::AsGroupName(const std::string &model_key, uint64_t subgraph) const {\n  return model_key + \"_subgraph\" + std::to_string(subgraph);\n}\n\nvoid PredictThread::Start(const std::string &que_name, const std::shared_ptr<ModelLoaderBase> &model_loader,\n                          const ModelMeta &model_meta, const TaskCallBack &task_callback) {\n  MSI_EXCEPTION_IF_NULL(model_loader);\n  MSI_EXCEPTION_IF_NULL(task_callback);\n  model_loader_ = model_loader;\n  model_meta_ = model_meta;\n  auto &model_key = model_meta.common_meta.model_key;\n  auto graph_num = model_loader_->GetGraphNum();\n\n  auto batch_size = model_loader->GetBatchSize();\n  // init executor info\n  executor_info_.sub_graph_infos.resize(graph_num);\n  executor_info_.batch_size = batch_size;\n  for (uint64_t i = 0; i < graph_num; i++) {\n    auto input_infos = model_loader_->GetInputInfos(i);\n    auto &subgraph_info = executor_info_.sub_graph_infos[i];\n    subgraph_info.input_infos = input_infos;\n  }\n  // init task infos\n  std::vector<TaskInfo> task_infos;\n  for (uint64_t i = 0; i < graph_num; i++) {\n    TaskInfo info;\n    info.group_name = AsGroupName(model_key, i);\n    info.subgraph = i;\n    info.task_name = info.group_name;\n    info.priority = 0;\n    info.batch_size = batch_size;\n    info.tag = \"Model \" + model_key + (graph_num > 1 ? \" subgraph \" + std::to_string(i) : \"\");\n    task_infos.push_back(info);\n  }\n  task_que_.Start(que_name, task_infos, task_callback);  // start before predict_thread_ start\n  bool support_pipeline_infer = model_meta.distributed_meta.enable_pipeline_infer &&\n                                (std::dynamic_pointer_cast<DistributedModelLoader>(model_loader) != nullptr);\n  size_t thread_num = support_pipeline_infer ? model_meta.distributed_meta.stage_size : 1;\n  for (size_t i = 0; i < thread_num; i++) {\n    predict_threads_.emplace_back(ThreadFunc, this);\n  }\n}\n\nvoid PredictThread::PredictHandle(const TaskInfo &task_info, const std::vector<InstancePtr> &instances) {\n  Status status;\n  try {\n    std::vector<ResultInstance> instance_result;\n    status = PredictInner(task_info, instances, &instance_result);\n    if (status != SUCCESS) {\n      task_que_.PushTaskResult(instances, status);\n      return;\n    }\n    task_que_.PushTaskResult(instances, instance_result);\n    return;\n  } catch (const std::bad_alloc &ex) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: malloc memory failed\";\n  } catch (const std::runtime_error &ex) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: runtime error occurred: \" << ex.what();\n  } catch (const std::exception &ex) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: exception occurred: \" << ex.what();\n  } catch (...) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: exception occurred\";\n  }\n  task_que_.PushTaskResult(instances, status);\n}\n\nStatus PredictThread::PredictInner(const TaskInfo &task_info, const std::vector<InstancePtr> &instances,\n                                   std::vector<ResultInstance> *instance_result) {\n  Status status;\n  std::vector<InstanceData> inputs;\n  for (auto &item : instances) {\n    // cppcheck-suppress useStlAlgorithm\n    inputs.push_back(item->data);\n  }\n  status = model_loader_->Predict(inputs, instance_result, task_info.subgraph);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Predict failed, model info \" << model_meta_.common_meta.model_key;\n    return status;\n  }\n  return SUCCESS;\n}\n\nStatus PredictThread::CheckPredictInput(uint64_t subgraph, const InstancePtr &instance) {\n  const auto &inputs_info = executor_info_.sub_graph_infos[subgraph].input_infos;\n  if (instance->data.size() < inputs_info.size()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Given model inputs size \" << instance->data.size()\n                                                  << \" less than model inputs size \" << inputs_info.size();\n  }\n  for (size_t i = 0; i < instance->data.size(); i++) {\n    auto input_data = instance->data[i];\n    if (inputs_info[i].is_no_batch_dim) {\n      if (static_cast<size_t>(inputs_info[i].size) != input_data->data_size()) {\n        return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n               << \"Given model input \" << i << \" size \" << input_data->data_size() << \" not match the size \"\n               << inputs_info[i].size << \" defined in model\";\n      }\n    } else if (static_cast<size_t>(inputs_info[i].size / executor_info_.batch_size) != input_data->data_size()) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"Given model input \" << i << \" size \" << input_data->data_size() << \" not match the size \"\n             << inputs_info[i].size / executor_info_.batch_size << \" defined in model\";\n    }\n    if (inputs_info[i].data_type != input_data->data_type()) {\n      return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n             << \"Given model input \" << i << \" data type \" << input_data->data_type() << \" not match the data type \"\n             << inputs_info[i].data_type << \" defined in model\";\n    }\n  }\n  return SUCCESS;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/predict_thread.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_PREDICT_THREAD_H\n#define MINDSPORE_SERVING_WORKER_PREDICT_THREAD_H\n\n#include <queue>\n#include <mutex>\n#include <condition_variable>\n#include <atomic>\n#include <vector>\n#include <map>\n#include <memory>\n#include <string>\n#include \"common/instance.h\"\n#include \"worker/inference/inference.h\"\n#include \"worker/task_queue.h\"\n#include \"worker/model_loader_base.h\"\n\nnamespace mindspore::serving {\nstruct PredictSubgraphInfo {\n  std::vector<TensorInfo> input_infos;\n};\n\nstruct PredictModelInfo {\n  std::vector<PredictSubgraphInfo> sub_graph_infos;\n  uint64_t batch_size = 0;\n};\n\nclass PredictThread {\n public:\n  PredictThread();\n  ~PredictThread() noexcept;\n\n  void PushPredictTask(const MethodStage &stage, const std::vector<InstancePtr> &inputs);\n  void Start(const std::string &que_name, const std::shared_ptr<ModelLoaderBase> &model_loader,\n             const ModelMeta &model_meta, const TaskCallBack &task_callback);\n  void Stop();\n\n  uint64_t GetBatchSize() const { return executor_info_.batch_size; }\n\n private:\n  TaskQueue task_que_;\n  std::vector<std::thread> predict_threads_;\n  ModelMeta model_meta_;\n  std::shared_ptr<ModelLoaderBase> model_loader_ = nullptr;\n  PredictModelInfo executor_info_;\n\n  static void ThreadFunc(PredictThread *queue);\n  void Predict();\n\n  void PredictHandle(const TaskInfo &task_info, const std::vector<InstancePtr> &instances);\n  Status PredictInner(const TaskInfo &task_info, const std::vector<InstancePtr> &instances,\n                      std::vector<ResultInstance> *instance_result);\n  Status CheckPredictInput(uint64_t subgraph, const InstancePtr &instance);\n  std::string AsGroupName(const std::string &model_key, uint64_t subgraph) const;\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_WORKER_PREDICT_THREAD_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/register/argmax.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/stage_function.h\"\n#include \"mindspore_serving/ccsrc/common/tensor.h\"\n\nnamespace mindspore::serving {\nclass ArgmaxStageFunc : public CppStageFunctionBase {\n public:\n  template <typename DT>\n  void ArgmaxImp(const void *input, size_t *output, size_t data_size, size_t elemsize) {\n    auto count = data_size / elemsize;\n    auto data = reinterpret_cast<const DT *>(input);\n    *output = 0;\n    for (size_t i = 1; i < count; i++) {\n      if (data[i] > data[*output]) {\n        *output = i;\n      }\n    }\n  }\n\n  Status Call(const std::string &, const InstanceData &input, InstanceData *output) override {\n    MSI_EXCEPTION_IF_NULL(output);\n    auto input_x = input[0];\n    auto x_data = input_x->data();\n    auto out_tensor = std::make_shared<Tensor>();\n    out_tensor->set_data_type(serving::kMSI_Int64);\n    (void)out_tensor->resize_data(sizeof(size_t));\n    out_tensor->set_shape({});\n    output->push_back(out_tensor);\n    auto y_data = reinterpret_cast<size_t *>(out_tensor->mutable_data());\n    switch (input_x->data_type()) {\n      case kMSI_Float32:\n        ArgmaxImp<float>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      case kMSI_Float64:\n        ArgmaxImp<double>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      case kMSI_Int8:\n        ArgmaxImp<int8_t>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      case kMSI_Uint8:\n        ArgmaxImp<uint8_t>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      case kMSI_Int16:\n        ArgmaxImp<int16_t>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      case kMSI_Uint16:\n        ArgmaxImp<uint16_t>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      case kMSI_Int32:\n        ArgmaxImp<int32_t>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      case kMSI_Uint32:\n        ArgmaxImp<uint32_t>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      case kMSI_Int64:\n        ArgmaxImp<int64_t>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      case kMSI_Uint64:\n        ArgmaxImp<uint64_t>(x_data, y_data, input_x->data_size(), input_x->itemsize());\n        break;\n      default:\n        return INFER_STATUS(FAILED) << \"Argmax not support data type \" << input_x->data_type();\n    }\n    return SUCCESS;\n  }\n\n  size_t GetInputsCount(const std::string &) const override { return 1; }\n\n  size_t GetOutputsCount(const std::string &) const override { return 1; }\n};\n\nREGISTER_STAGE_FUNCTION(ArgmaxStageFunc, \"argmax_cpp\")\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/servable_register.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"worker/servable_register.h\"\n#include <set>\n#include <string>\n#include \"worker/stage_function.h\"\n\nnamespace mindspore {\nnamespace serving {\nServableRegister &ServableRegister::Instance() {\n  static ServableRegister storage = ServableRegister();\n  return storage;\n}\n\nStatus ServableRegister::RegisterMethod(const MethodSignature &method) {\n  MSI_LOG_INFO << \"Declare method \" << method.method_name << \", servable \" << method.servable_name;\n  servable_signatures_.servable_name = method.servable_name;\n  for (auto &item : servable_signatures_.methods) {\n    // cppcheck-suppress useStlAlgorithm\n    if (item.method_name == method.method_name) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Method \" << method.method_name << \" has been registered more than once.\";\n    }\n  }\n  servable_signatures_.methods.push_back(method);\n  return SUCCESS;\n}\n\nStatus ServableRegister::DeclareModel(ModelMeta model) {\n  auto &common_meta = model.common_meta;\n  auto &local_meta = model.local_meta;\n  MSI_LOG_INFO << \"Declare model \" << local_meta.model_files;\n  if (servable_signatures_.servable_type == kServableTypeDistributed) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Declare model failed, servable has already been declared as distributed servable\";\n  }\n  servable_signatures_.servable_name = common_meta.servable_name;\n  servable_signatures_.servable_type = kServableTypeLocal;\n  if (local_meta.model_files.empty()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Declare model failed, model files size cannot be 0\";\n  }\n  std::set<std::string> cur_model_files;\n  for (auto &model_item : servable_signatures_.model_metas) {\n    for (auto &file_item : model_item.local_meta.model_files) {\n      (void)cur_model_files.emplace(file_item);\n    }\n  }\n  for (auto &file : local_meta.model_files) {\n    if (file.empty()) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Declare model \" << local_meta.model_files << \" failed, model file cannot be empty\";\n    }\n    if (cur_model_files.count(file) > 0) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Declare model \" << local_meta.model_files << \" failed, model file '\"\n                                            << file << \"' has already been used\";\n    }\n  }\n  if (local_meta.model_format == ModelType::kUnknownType) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Declare model \" << local_meta.model_files << \" failed, model_format is not inited\";\n  }\n  for (auto &item : servable_signatures_.model_metas) {\n    // cppcheck-suppress useStlAlgorithm\n    if (item.common_meta.model_key == common_meta.model_key) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Declare model \" << local_meta.model_files << \" failed, the same model has already been declared\";\n    }\n  }\n  servable_signatures_.model_metas.push_back(model);\n  return SUCCESS;\n}\n\nStatus ServableRegister::DeclareDistributedModel(ModelMeta model) {\n  auto &common_meta = model.common_meta;\n  MSI_LOG_INFO << \"Declare distributed model \" << common_meta.servable_name;\n  if (servable_signatures_.servable_type == kServableTypeDistributed) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Declare distributed model failed, servable is repeatedly been declared as distributed servable\";\n  }\n  if (servable_signatures_.servable_type == kServableTypeLocal) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Declare distributed model failed, servable has already been declared as local servable\";\n  }\n  servable_signatures_.servable_name = common_meta.servable_name;\n  servable_signatures_.servable_type = kServableTypeDistributed;\n  if (model.distributed_meta.rank_size == 0) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Declare distributed model \" << common_meta.servable_name << \" failed, rank_size cannot be 0\";\n  }\n  if (model.distributed_meta.stage_size == 0) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"Declare distributed model \" << common_meta.servable_name << \" failed, stage_size cannot be 0\";\n  }\n  servable_signatures_.model_metas.push_back(model);\n  return SUCCESS;\n}\n\nStatus ServableRegister::RegisterInputOutputInfo(const std::string &model_key, size_t inputs_count,\n                                                 size_t outputs_count, uint64_t subgraph) {\n  MSI_LOG_INFO << \"Declare model \" << model_key << \" subgraph \" << subgraph << \" inputs count \" << inputs_count\n               << \" outputs count \" << outputs_count;\n  auto &model_metas = servable_signatures_.model_metas;\n  auto it = std::find_if(model_metas.begin(), model_metas.end(),\n                         [model_key](const ModelMeta &item) { return item.common_meta.model_key == model_key; });\n  if (it == model_metas.end()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"RegisterInputOutputInfo failed, cannot find model \" << model_key;\n  }\n  auto &common_meta = it->common_meta;\n\n  if (common_meta.inputs_count.count(subgraph) > 0 && common_meta.inputs_count[subgraph] != inputs_count) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"RegisterInputOutputInfo failed, inputs count \" << inputs_count << \" not match old count \"\n           << common_meta.inputs_count[subgraph] << \", model: \" << model_key;\n  }\n  if (common_meta.outputs_count.count(subgraph) > 0 && common_meta.outputs_count[subgraph] != outputs_count) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"RegisterInputOutputInfo failed, outputs count \" << outputs_count << \" not match old count \"\n           << common_meta.outputs_count[subgraph] << \", model: \" << model_key;\n  }\n  common_meta.inputs_count[subgraph] = inputs_count;\n  common_meta.outputs_count[subgraph] = outputs_count;\n  return SUCCESS;\n}\n\nStatus ServableRegister::InitCallModelMethods(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models) {\n  for (auto &model_it : models) {\n    auto model_key = model_it.first;\n    auto &model_loader = model_it.second;\n    auto graph_num = model_loader->GetGraphNum();\n    for (size_t subgraph = 0; subgraph < graph_num; subgraph++) {\n      auto input_infos = model_loader->GetInputInfos(subgraph);\n      auto output_infos = model_loader->GetOutputInfos(subgraph);\n      auto status = RegisterOneCallModelMethod(model_key, input_infos.size(), output_infos.size(), subgraph);\n      if (status != SUCCESS) {\n        return status;\n      }\n    }\n  }\n  return SUCCESS;\n}\n\nstd::string ServableRegister::GetCallModelMethodName(const std::string &model_key, uint64_t subgraph) {\n  std::string method_name = \"@call_\" + model_key + \"_\" + std::to_string(subgraph);\n  return method_name;\n}\n\nStatus ServableRegister::RegisterOneCallModelMethod(const std::string &model_key, uint64_t input_count,\n                                                    uint64_t output_count, uint64_t subgraph) {\n  std::string method_name = GetCallModelMethodName(model_key, subgraph);\n  MethodSignature method;\n  method.method_name = method_name;\n  method.servable_name = servable_signatures_.servable_name;\n\n  std::vector<std::pair<size_t, uint64_t>> model_inputs;\n  for (uint64_t i = 0; i < input_count; i++) {\n    (void)method.inputs.emplace_back(\"x\" + std::to_string(i));\n    (void)model_inputs.emplace_back(std::make_pair(0, i));  // all method inputs are function inputs\n  }\n  std::vector<std::pair<size_t, uint64_t>> returns;\n  for (uint64_t i = 0; i < output_count; i++) {\n    (void)method.outputs.emplace_back(\"y\" + std::to_string(i));\n    (void)returns.emplace_back(std::make_pair(1, i));\n  }\n  method.AddStageModel(model_key, model_inputs, subgraph);\n  method.SetReturn(returns);\n  auto status = RegisterMethod(method);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Register Method failed\";\n    return status;\n  }\n  status = RegisterInputOutputInfo(model_key, input_count, output_count, subgraph);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Register model input and output info failed\";\n    return status;\n  }\n  return SUCCESS;\n}\n\nStatus ServableRegister::CheckModels(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models) {\n  auto const &signature = servable_signatures_;\n  if (signature.methods.empty()) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"There is no method registered for servable\";\n  }\n  if (models.size() != signature.model_metas.size()) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"The number \" << signature.model_metas.size() << \" of models declared is not equal to the number \"\n           << models.size() << \" of models loaded\";\n  }\n  for (auto &model_meta : signature.model_metas) {\n    auto &model_key = model_meta.common_meta.model_key;\n    auto model_load_it = models.find(model_key);\n    if (model_load_it == models.end()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Model \" << model_key << \" has not been loaded\";\n    }\n    auto &model_loader = model_load_it->second;\n    auto batch_size = model_loader->GetBatchSize();\n    if (batch_size == 0) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Invalid batch size 0, model info: \" << model_key;\n    }\n    auto graph_num = model_loader->GetGraphNum();\n    if (graph_num == 0) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Invalid subgraph number 0, model info: \" << model_key;\n    }\n    for (uint64_t subgraph = 0; subgraph < graph_num; subgraph++) {\n      auto input_infos = model_loader->GetInputInfos(subgraph);\n      auto output_infos = model_loader->GetOutputInfos(subgraph);\n\n      MSI_LOG_INFO << \"Print model info, model info: '\" << model_meta.common_meta.model_key << \"', subgraph \"\n                   << subgraph;\n      MSI_LOG_INFO << \"Model input infos: count \" << input_infos.size();\n      for (auto &item : input_infos) {\n        MSI_LOG_INFO << item.shape << \", \" << item.data_type << \", \" << item.size;\n      }\n      MSI_LOG_INFO << \"Model output infos: count \" << output_infos.size();\n      for (auto &item : output_infos) {\n        MSI_LOG_INFO << item.shape << \", \" << item.data_type << \", \" << item.size;\n      }\n\n      const auto &common_meta = model_meta.common_meta;\n      if (common_meta.inputs_count.count(subgraph) > 0 && input_infos.size() != common_meta.inputs_count.at(subgraph)) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"The inputs count \" << common_meta.inputs_count.at(subgraph) << \" in register_method \"\n               << \"not equal to the count \" << input_infos.size() << \" defined in model, model info: \" << model_key\n               << \", subgraph: \" << subgraph;\n      }\n      if (common_meta.outputs_count.count(subgraph) > 0 &&\n          output_infos.size() != common_meta.outputs_count.at(subgraph)) {\n        return INFER_STATUS_LOG_ERROR(FAILED)\n               << \"The outputs count \" << common_meta.outputs_count.at(subgraph) << \" in register_method \"\n               << \"not equal to the count \" << output_infos.size() << \" defined in model, model info: \" << model_key\n               << \", subgraph: \" << subgraph;\n      }\n    }\n  }\n  return SUCCESS;\n}\n\nStatus ServableRegister::CheckOneMethod(const MethodSignature &method) {\n  const auto &servable_name = servable_signatures_.servable_name;\n  const auto &model_metas = servable_signatures_.model_metas;\n  for (auto &stage_it : method.stage_map) {\n    auto stage_index = stage_it.first;\n    auto &stage = stage_it.second;\n    for (size_t input_index = 0; input_index < stage.stage_inputs.size(); input_index++) {\n      auto input_stage_index = stage.stage_inputs[input_index].first;\n      auto output_index = stage.stage_inputs[input_index].second;\n      // method input\n      if (input_stage_index == 0) {\n        if (output_index >= method.inputs.size()) {\n          return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                 << \"The stage \" << stage_index << \" \" << input_index << \"th input uses method \" << output_index\n                 << \"th input, that is greater than the method inputs size \" << method.inputs.size()\n                 << \", servable: \" << servable_name << \", method: \" << method.method_name;\n        }\n        continue;\n      }\n      // check input stage index\n      if (input_stage_index >= stage_index) {\n        return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n               << \"The \" << input_index << \"th input data of stage \" << stage_index << \" cannot not come from stage \"\n               << input_stage_index << \", servable: \" << servable_name << \", method: \" << method.method_name;\n      }\n      // check input stage output index\n      auto it = method.stage_map.find(input_stage_index);\n      if (it == method.stage_map.end()) {\n        return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n               << \"Cannot find stage \" << input_stage_index << \" from method define information, \"\n               << \", servable: \" << servable_name << \", method: \" << method.method_name;\n      }\n      const auto &input_stage = it->second;\n      if (input_stage.stage_type == kMethodStageTypePyFunction) {\n        size_t input_count, output_count;\n        if (!PyStageFunctionStorage::Instance()->GetPyFunctionInfo(input_stage.stage_key, &input_count,\n                                                                   &output_count)) {\n          return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                 << \"PyFunction \" << input_stage.stage_key << \" is not defined, \"\n                 << \", servable: \" << servable_name << \", method: \" << method.method_name;\n        }\n        if (output_index >= output_count) {\n          return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                 << \"The stage(begin with 1) \" << stage_index << \" \" << input_index << \"th input uses python function \"\n                 << input_stage.stage_key << \" \" << output_index\n                 << \"th output, that is greater than the function output size \" << output_count\n                 << \", servable: \" << servable_name << \", method: \" << method.method_name;\n        }\n      } else if (input_stage.stage_type == kMethodStageTypeCppFunction) {\n        auto function = CppStageFunctionStorage::Instance().GetFunction(input_stage.stage_key);\n        if (function == nullptr) {\n          return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                 << \"CppFunction \" << input_stage.stage_key << \" is not defined, \"\n                 << \", servable: \" << servable_name << \", method: \" << method.method_name;\n        }\n        auto func_output_count = function->GetOutputsCount(input_stage.stage_key);\n        if (output_index >= func_output_count) {\n          return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                 << \"The stage(begin with 1) \" << stage_index << \" \" << input_index << \"th input uses c++ function \"\n                 << input_stage.stage_key << \" \" << output_index\n                 << \"th output, that is greater than the function output size \" << func_output_count\n                 << \", servable: \" << servable_name << \", method: \" << method.method_name;\n        }\n      } else if (input_stage.stage_type == kMethodStageTypeModel) {\n        auto model_it =\n          std::find_if(model_metas.begin(), model_metas.end(), [&input_stage](const ModelMeta &model_meta) {\n            return input_stage.stage_key == model_meta.common_meta.model_key;\n          });\n        if (model_it == model_metas.end()) {\n          return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                 << \"Model \" << input_stage.stage_key << \" is not defined, \"\n                 << \", servable: \" << servable_name << \", method: \" << method.method_name;\n        }\n        if (model_it->common_meta.outputs_count.count(input_stage.subgraph) == 0) {\n          return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                 << \"Model \" << input_stage.stage_key << \" subgraph \" << input_stage.subgraph << \" is not declared\"\n                 << \", servable: \" << servable_name << \", method: \" << method.method_name;\n        }\n        auto model_output_count = model_it->common_meta.outputs_count.at(input_stage.subgraph);\n        if (output_index >= model_output_count) {\n          return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n                 << \"The stage(begin with 1) \" << stage_index << \" \" << input_index << \"th input uses model \"\n                 << input_stage.stage_key << \" subgraph \" << input_stage.subgraph << \" \" << output_index\n                 << \"th output, that is greater than the model output size \" << model_output_count\n                 << \", servable: \" << servable_name << \", method: \" << method.method_name;\n        }\n      } else {\n        return INFER_STATUS_LOG_ERROR(SYSTEM_ERROR)\n               << \"Invalid stage type \" << static_cast<int>(stage.stage_type) << \", servable: \" << servable_name\n               << \", method: \" << method.method_name;\n      }\n    }\n  }\n  return SUCCESS;\n}\n\nStatus ServableRegister::CheckMethods() {\n  std::set<std::string> method_set;\n  Status status;\n  for (const auto &method : servable_signatures_.methods) {\n    if (method_set.count(method.method_name) > 0) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Servable \" << servable_signatures_.servable_name << \" method '\"\n                                            << method.method_name << \"' has been defined repeatedly\";\n    }\n    (void)method_set.emplace(method.method_name);\n    status = CheckOneMethod(method);\n    if (status != SUCCESS) {\n      return status;\n    }\n  }\n  return SUCCESS;\n}\n\nStatus ServableRegister::InitMethodBatchSize(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models) {\n  // stages only use method inputs as inputs batch_size == mini model batch size\n  // other stages batch_size == max model batch size\n  for (auto &method : servable_signatures_.methods) {\n    uint64_t mini_batch_size = UINT32_MAX;\n    uint64_t max_batch_size = 0;\n    for (auto &stage_it : method.stage_map) {\n      auto &stage = stage_it.second;\n      if (stage.stage_type == kMethodStageTypeModel) {\n        auto model_it = models.find(stage.stage_key);\n        if (model_it == models.end()) {\n          return INFER_STATUS_LOG_ERROR(FAILED) << \"Model \" << stage.stage_key << \" has not been loaded\";\n        }\n        stage.batch_size = model_it->second->GetBatchSize();\n        if (stage.batch_size < mini_batch_size) {\n          mini_batch_size = stage.batch_size;\n        }\n        if (stage.batch_size > max_batch_size) {\n          max_batch_size = stage.batch_size;\n        }\n      }\n    }\n    if (mini_batch_size == UINT32_MAX || max_batch_size == 0) {\n      mini_batch_size = 1;\n      max_batch_size = 1;\n    }\n    for (auto &stage_it : method.stage_map) {\n      auto &stage = stage_it.second;\n      if (stage.stage_type != kMethodStageTypeModel && stage.batch_size == 0) {\n        auto all_method_input = std::all_of(stage.stage_inputs.begin(), stage.stage_inputs.end(),\n                                            [](const std::pair<size_t, uint64_t> &item) { return item.first == 0; });\n        if (all_method_input) {\n          stage.batch_size = mini_batch_size;\n        } else {\n          stage.batch_size = max_batch_size;\n        }\n      }\n    }\n  }\n  return SUCCESS;\n}\n\nStatus ServableRegister::InitOnModelsLoad(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models) {\n  Status status;\n  status = CheckModels(models);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Check models failed\";\n    return status;\n  }\n  status = InitCallModelMethods(models);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Init call model methods failed\";\n    return status;\n  }\n  status = CheckMethods();\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Check methods failed\";\n    return status;\n  }\n  status = InitMethodBatchSize(models);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Init models batch size failed\";\n    return status;\n  }\n  return SUCCESS;\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/servable_register.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_SERVABLE_REGISTER_H\n#define MINDSPORE_SERVING_SERVABLE_REGISTER_H\n\n#include <map>\n#include <string>\n#include <memory>\n#include <utility>\n#include <vector>\n\n#include \"common/servable.h\"\n#include \"worker/model_loader_base.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API ServableRegister {\n public:\n  static ServableRegister &Instance();\n  const ServableSignature &GetServableSignature() const { return servable_signatures_; }\n\n  // register_method\n  Status RegisterMethod(const MethodSignature &method);\n  // call_model\n  Status RegisterInputOutputInfo(const std::string &model_key, size_t inputs_count, size_t outputs_count,\n                                 uint64_t subgraph = 0);\n  // declare_model\n  Status DeclareModel(ModelMeta model);\n  Status DeclareDistributedModel(ModelMeta model);\n\n  static std::string GetCallModelMethodName(const std::string &model_key, uint64_t subgraph);\n\n  Status InitOnModelsLoad(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models);\n\n private:\n  ServableSignature servable_signatures_;\n  Status RegisterOneCallModelMethod(const std::string &model_key, uint64_t input_count, uint64_t output_count,\n                                    uint64_t subgraph);\n  Status CheckModels(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models);\n  Status InitCallModelMethods(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models);\n  Status CheckMethods();\n  Status InitMethodBatchSize(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models);\n  Status CheckOneMethod(const MethodSignature &method);\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_SERVABLE_REGISTER_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/stage_function.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/stage_function.h\"\n#include <utility>\n\nnamespace mindspore::serving {\nbool CppStageFunctionStorage::Register(const std::string &function_name,\n                                       std::shared_ptr<CppStageFunctionBase> function) {\n  if (function_map_.find(function_name) != function_map_.end()) {\n    MSI_LOG_WARNING << \"function \" << function_name << \" has been registered\";\n    return false;\n  }\n  function_map_[function_name] = std::move(function);\n  return true;\n}\n\nvoid CppStageFunctionStorage::Unregister(const std::string &function_name) {\n  auto it = function_map_.find(function_name);\n  if (it == function_map_.end()) {\n    return;\n  }\n  (void)function_map_.erase(it);\n}\n\nCppStageFunctionStorage &CppStageFunctionStorage::Instance() {\n  static CppStageFunctionStorage storage = CppStageFunctionStorage();\n  return storage;\n}\n\nstd::shared_ptr<CppStageFunctionBase> CppStageFunctionStorage::GetFunction(const std::string &func_name) const {\n  auto it = function_map_.find(func_name);\n  if (it != function_map_.end()) {\n    return it->second;\n  }\n  return nullptr;\n}\n\nCppRegStageFunction::CppRegStageFunction(const std::string &function_name,\n                                         std::shared_ptr<CppStageFunctionBase> function) {\n  func_name_ = function_name;\n  MSI_LOG_INFO << \"Register C++ function \" << function_name;\n  register_success_ = CppStageFunctionStorage::Instance().Register(function_name, std::move(function));\n}\n\nCppRegStageFunction::~CppRegStageFunction() noexcept {\n  if (register_success_) {\n    MSI_LOG_INFO << \"Unregister C++ function \" << func_name_;\n    CppStageFunctionStorage::Instance().Unregister(func_name_);\n  }\n}\n\nPyStageFunctionStorage::PyStageFunctionStorage() = default;\nPyStageFunctionStorage::~PyStageFunctionStorage() = default;\n\nstd::shared_ptr<PyStageFunctionStorage> PyStageFunctionStorage::Instance() {\n  static std::shared_ptr<PyStageFunctionStorage> instance = nullptr;\n  if (instance == nullptr) {\n    instance = std::make_shared<PyStageFunctionStorage>();\n  }\n  return instance;\n}\n\nvoid PyStageFunctionStorage::Register(const std::string &func_name, size_t inputs_count, size_t outputs_count) {\n  function_infos_[func_name] = std::make_pair(inputs_count, outputs_count);\n  MSI_LOG_INFO << \"Register python stage function \" << func_name << \" inputs count \" << inputs_count\n               << \" outputs count \" << outputs_count;\n}\n\nbool PyStageFunctionStorage::HasPyFunction(const std::string &func_name) {\n  auto it = function_infos_.find(func_name);\n  return it != function_infos_.end();\n}\n\nbool PyStageFunctionStorage::GetPyFunctionInfo(const std::string &func_name, size_t *inputs_count,\n                                               size_t *outputs_count) {\n  MSI_EXCEPTION_IF_NULL(inputs_count);\n  MSI_EXCEPTION_IF_NULL(outputs_count);\n  auto it = function_infos_.find(func_name);\n  if (it == function_infos_.end()) {\n    return false;\n  }\n  *inputs_count = it->second.first;\n  *outputs_count = it->second.second;\n  return true;\n}\n\nstd::vector<size_t> PyStageFunctionStorage::GetPyCppFunctionInfo(const std::string &func_name) const {\n  size_t inputs_count = 0;\n  size_t outputs_count = 0;\n  if (PyStageFunctionStorage::Instance()->GetPyFunctionInfo(func_name, &inputs_count, &outputs_count)) {\n    return {inputs_count, outputs_count};\n  }\n  auto function = CppStageFunctionStorage::Instance().GetFunction(func_name);\n  if (!function) {\n    return {};\n  }\n  inputs_count = function->GetInputsCount(func_name);\n  outputs_count = function->GetOutputsCount(func_name);\n  if (inputs_count == 0 || outputs_count == 0) {\n    MSI_LOG_ERROR << \"Call \" + func_name + \" inputs or outputs count cannot be 0\";\n    return {};\n  }\n  return {inputs_count, outputs_count};\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/stage_function.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_STAGE_FUNCTION_PY_H\n#define MINDSPORE_SERVING_WORKER_STAGE_FUNCTION_PY_H\n\n#include <memory>\n#include <unordered_map>\n#include <vector>\n#include <string>\n#include <utility>\n#include \"common/serving_common.h\"\n#include \"common/instance.h\"\n\nnamespace mindspore::serving {\nclass CppStageFunctionBase : public std::enable_shared_from_this<CppStageFunctionBase> {\n public:\n  CppStageFunctionBase() = default;\n  virtual ~CppStageFunctionBase() = default;\n\n  virtual Status Call(const std::string &func_name, const InstanceData &input, InstanceData *output) = 0;\n  virtual size_t GetInputsCount(const std::string &func_name) const = 0;\n  virtual size_t GetOutputsCount(const std::string &func_name) const = 0;\n};\n\nclass CppStageFunctionStorage {\n public:\n  bool Register(const std::string &func_name, std::shared_ptr<CppStageFunctionBase> function);\n  void Unregister(const std::string &func_name);\n\n  std::shared_ptr<CppStageFunctionBase> GetFunction(const std::string &func_name) const;\n\n  static CppStageFunctionStorage &Instance();\n\n private:\n  std::unordered_map<std::string, std::shared_ptr<CppStageFunctionBase>> function_map_;\n};\n\nclass CppRegStageFunction {\n public:\n  CppRegStageFunction(const std::string &func_name, std::shared_ptr<CppStageFunctionBase> function);\n  ~CppRegStageFunction() noexcept;\n\n private:\n  std::string func_name_;\n  bool register_success_ = false;\n};\n\n#define REGISTER_STAGE_FUNCTION(cls_name, func_name) \\\n  static CppRegStageFunction g_register_stage_function_##cls_name(func_name, std::make_shared<cls_name>());\n\nclass MS_API PyStageFunctionStorage {\n public:\n  static std::shared_ptr<PyStageFunctionStorage> Instance();\n\n  void Register(const std::string &func_name, size_t inputs_count, size_t outputs_count);\n\n  bool HasPyFunction(const std::string &func_name);\n  bool GetPyFunctionInfo(const std::string &func_name, size_t *inputs_count, size_t *outputs_count);\n\n  std::vector<size_t> GetPyCppFunctionInfo(const std::string &func_name) const;\n\n  PyStageFunctionStorage();\n  ~PyStageFunctionStorage();\n\n private:\n  std::unordered_map<std::string, std::pair<size_t, size_t>> function_infos_;\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_WORKER_STAGE_FUNCTION_PY_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/task_queue.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/task_queue.h\"\n#include <utility>\n#include <unordered_map>\n#include \"worker/stage_function.h\"\n\nnamespace mindspore::serving {\nTaskQueue::TaskQueue() {}\n\nvoid TaskQueue::Start(const std::string &que_name, const std::vector<TaskInfo> &task_infos,\n                      const TaskCallBack &callback) {\n  std::unique_lock<std::mutex> lock{que_lock_};\n  if (is_running) {\n    return;\n  }\n  que_name_ = que_name;\n  task_callback_ = callback;\n  methods_queue_.group_que_map.clear();\n  methods_queue_.groups_que_instances_count = 0;\n  for (auto &info : task_infos) {\n    if (info.batch_size == 0) {\n      MSI_LOG_EXCEPTION << \"Invalid batch size 0, queue name: \" << que_name;\n    }\n    auto &method_queue = methods_queue_.group_que_map[info.group_name];\n    auto &stage_queue = method_queue.priority_que_map[info.priority];\n    stage_queue.task_info = info;\n  }\n  is_running = true;\n}\n\nvoid TaskQueue::Stop() {\n  std::unique_lock<std::mutex> lock{que_lock_};\n  if (!is_running) {\n    return;\n  }\n  methods_queue_.group_que_map.clear();\n  task_callback_ = nullptr;\n\n  is_running = false;\n  cond_var_.notify_all();\n}\n\nvoid TaskQueue::PushTask(const std::string &group_name, size_t priority, const std::vector<InstancePtr> &instances) {\n  if (instances.empty()) {\n    MSI_LOG_WARNING << \"Instances cannot be empty\";\n    return;\n  }\n  MSI_LOG_DEBUG << que_name_ << \" Push instances count \" << instances.size()\n                << \", inputs size: \" << instances[0]->data.size();\n  {\n    std::unique_lock<std::mutex> lock{que_lock_};\n    auto method_it = methods_queue_.group_que_map.find(group_name);\n    if (method_it == methods_queue_.group_que_map.end()) {\n      MSI_LOG_EXCEPTION << \"Cannot find method \" << group_name << \" in task queue, queue name: \" << que_name_;\n    }\n    auto &stage_queue = method_it->second;\n    auto stage_it = stage_queue.priority_que_map.find(priority);\n    if (stage_it == stage_queue.priority_que_map.end()) {\n      MSI_LOG_EXCEPTION << \"Cannot find stage index \" << priority << \" in task queue, method name: \" << group_name\n                        << \", queue name: \" << que_name_;\n    }\n    auto &que = stage_it->second;\n    for (auto &instance : instances) {\n      que.instance_list.push_back(instance);\n    }\n    stage_queue.priority_que_instances_count += instances.size();\n    methods_queue_.groups_que_instances_count += instances.size();\n  }\n  cond_var_.notify_all();\n}\n\nbool TaskQueue::FindProcessTaskQueue(std::string *method_name) {\n  auto next_que = methods_queue_.next_exe_que;\n  auto &que_map = methods_queue_.group_que_map;\n  size_t index = 0;\n  std::string name;\n  for (auto &item : que_map) {\n    if (item.second.priority_que_instances_count > 0 && (name.empty() || index >= next_que)) {\n      name = item.first;\n      if (index >= next_que) {\n        break;\n      }\n    }\n    index++;\n  }\n  if (name.empty()) {\n    return false;\n  }\n  if (index + 1 >= que_map.size()) {\n    methods_queue_.next_exe_que = 0;\n  } else {\n    methods_queue_.next_exe_que = index + 1;\n  }\n  *method_name = name;\n  return true;\n}\n\nvoid TaskQueue::PopTask(TaskItem *task_item) {\n  MSI_EXCEPTION_IF_NULL(task_item);\n  std::unique_lock<std::mutex> lock{que_lock_};\n  if (!is_running) {  // before start, or after stop\n    MSI_LOG_INFO << \"Detect task queue is not running, maybe the Serving server is stopped.\";\n    task_item->has_stopped = true;\n    return;\n  }\n  while (true) {\n    if (methods_queue_.groups_que_instances_count == 0) {\n      cond_var_.wait(lock, [this] { return !is_running || methods_queue_.groups_que_instances_count > 0; });\n      if (!is_running) {\n        MSI_LOG_INFO << \"Detect task queue '\" << que_name_ << \"' is not running, maybe the Serving server is stopped.\";\n        task_item->has_stopped = true;\n        return;\n      }\n    }\n    std::string method_name;\n    if (!FindProcessTaskQueue(&method_name)) {\n      MSI_LOG_EXCEPTION << \"Cannot find task when the number \" << methods_queue_.groups_que_instances_count\n                        << \" of instances in task queue is not 0\";\n    }\n    auto &method_que = methods_queue_.group_que_map[method_name];\n    auto &stage_que_map = method_que.priority_que_map;\n    auto stage_it = stage_que_map.rbegin();\n    for (; stage_it != stage_que_map.rend(); ++stage_it) {\n      if (!stage_it->second.instance_list.empty()) {\n        break;\n      }\n    }\n    if (stage_it == stage_que_map.rend()) {\n      MSI_LOG_EXCEPTION << \"Cannot find task when the number \" << method_que.priority_que_instances_count\n                        << \" of instances in method task queue is not 0\";\n    }\n    auto &task_handle = stage_it->second;\n    auto batch_size = task_handle.task_info.batch_size;\n    // Pop a maximum of batch_size instances\n    if (task_handle.instance_list.size() <= batch_size) {\n      *task_item = task_handle;\n      task_handle.instance_list.clear();\n    } else {\n      *task_item = task_handle;\n      auto &instances_ret = task_item->instance_list;\n      (void)instances_ret.erase(instances_ret.begin() + static_cast<ptrdiff_t>(batch_size), instances_ret.end());\n      auto &instances_reserved = task_handle.instance_list;\n      (void)instances_reserved.erase(instances_reserved.begin(),\n                                     instances_reserved.begin() + static_cast<ptrdiff_t>(batch_size));\n    }\n    MSI_LOG_DEBUG << que_name_ << \" Pop instances count \" << task_item->instance_list.size()\n                  << \", batch size: \" << batch_size;\n\n    method_que.priority_que_instances_count -= task_item->instance_list.size();\n    methods_queue_.groups_que_instances_count -= task_item->instance_list.size();\n    break;\n  }\n}\n\nvoid TaskQueue::PushTaskResult(const InstancePtr &input, const ResultInstance &output) {\n  if (!is_running) {\n    MSI_LOG_INFO << \"Task queue has exited\";\n    return;\n  }\n  task_callback_({input}, {output});\n}\n\nvoid TaskQueue::PushTaskResult(const std::vector<InstancePtr> &inputs, const std::vector<ResultInstance> &outputs) {\n  if (!is_running) {\n    MSI_LOG_INFO << \"Task queue has exited\";\n    return;\n  }\n  task_callback_(inputs, outputs);\n}\n\nvoid TaskQueue::PushTaskResult(const std::vector<InstancePtr> &inputs, const Status &failed_result) {\n  std::vector<ResultInstance> result;\n  for (auto &item : inputs) {\n    (void)item;\n    ResultInstance output;\n    output.error_msg = failed_result;\n    result.push_back(output);\n  }\n  PushTaskResult(inputs, result);\n}\n\nvoid PyTaskQueue::Start(const std::string &que_name, const std::vector<MethodStage> &stage_infos,\n                        const TaskCallBack &callback) {\n  std::vector<TaskInfo> task_infos;\n  for (auto &item : stage_infos) {\n    TaskInfo info;\n    info.batch_size = item.batch_size;\n    info.priority = item.stage_index;\n    info.group_name = item.method_name;\n    info.task_name = item.stage_key;\n    info.tag = item.tag;\n    task_infos.push_back(info);\n  }\n  task_queue_.Start(que_name, task_infos, callback);\n  py_task_item_processing_ = TaskItem();\n}\n\nvoid PyTaskQueue::Stop() { task_queue_.Stop(); }\n\nvoid PyTaskQueue::PushTask(const std::string &method_name, size_t stage_index,\n                           const std::vector<InstancePtr> &instances) {\n  task_queue_.PushTask(method_name, stage_index, instances);\n}\n\nvoid PyTaskQueue::PyPopTask(TaskItem *task_item) {\n  MSI_EXCEPTION_IF_NULL(task_item);\n  task_queue_.PopTask(task_item);\n  if (!task_item->has_stopped) {\n    py_task_item_processing_ = *task_item;\n  }\n}\n\nvoid PyTaskQueue::PyPushTaskResult(const std::vector<ResultInstance> &outputs) {\n  if (!task_queue_.IsRunning()) {\n    MSI_LOG_INFO << \"Task queue has exited\";\n    return;\n  }\n  auto &instance_list = py_task_item_processing_.instance_list;\n  if (outputs.empty() || instance_list.size() < outputs.size()) {\n    MSI_LOG_EXCEPTION << \"processing task not match result, processing size \" << instance_list.size()\n                      << \", result size \" << outputs.size();\n  }\n  std::vector<InstancePtr> instances;\n  std::vector<ResultInstance> results;\n  for (size_t i = 0; i < outputs.size(); i++) {\n    instances.push_back(instance_list[i]);\n    results.push_back(outputs[i]);\n  }\n  task_queue_.PushTaskResult(instances, results);\n  (void)instance_list.erase(instance_list.begin(), instance_list.begin() + static_cast<ptrdiff_t>(outputs.size()));\n}\n\nCppTaskQueueThreadPool::CppTaskQueueThreadPool() = default;\n\nCppTaskQueueThreadPool::~CppTaskQueueThreadPool() = default;\n\nvoid CppTaskQueueThreadPool::ThreadFunc(CppTaskQueueThreadPool *thread_pool) {\n  while (true) {\n    TaskItem task_item;\n    thread_pool->task_queue_.PopTask(&task_item);\n    if (task_item.has_stopped) {\n      return;\n    }\n    auto status = thread_pool->HandleTask(task_item);\n    if (status != SUCCESS) {\n      MSI_LOG_ERROR << \"System error happens, thread exit\";\n      return;\n    }\n  }\n}\n\nvoid CppTaskQueueThreadPool::Start(const std::string &que_name, const std::vector<MethodStage> &stage_infos,\n                                   const TaskCallBack &callback, uint32_t size) {\n  if (is_running_) {\n    return;\n  }\n  is_running_ = true;  // start before ThreadFunc thread pool start\n  std::vector<TaskInfo> task_infos;\n  for (auto &item : stage_infos) {\n    TaskInfo info;\n    info.batch_size = item.batch_size;\n    info.priority = item.stage_index;\n    info.group_name = item.method_name;\n    info.task_name = item.stage_key;\n    info.tag = item.tag;\n    task_infos.push_back(info);\n  }\n  task_queue_.Start(que_name, task_infos, callback);  // start before ThreadFunc thread pool start\n  for (uint32_t i = 0; i < size; ++i) {\n    (void)pool_.emplace_back(ThreadFunc, this);\n  }\n}\n\nvoid CppTaskQueueThreadPool::Stop() {\n  task_queue_.Stop();\n  for (std::thread &thd : pool_) {\n    if (thd.joinable()) {\n      try {\n        thd.join();\n      } catch (const std::system_error &) {\n      } catch (...) {\n      }\n    }\n  }\n  pool_.clear();\n  is_running_ = false;\n}\n\nvoid CppTaskQueueThreadPool::PushTask(const std::string &method_name, size_t stage_index,\n                                      const std::vector<InstancePtr> &instances) {\n  task_queue_.PushTask(method_name, stage_index, instances);\n}\n\nStatus CppTaskQueueThreadPool::HandleTask(const TaskItem &task_item) {\n  Status status;\n  auto &task_name = task_item.task_info.task_name;\n  auto preprocess = CppStageFunctionStorage::Instance().GetFunction(task_name);\n  if (!preprocess) {\n    status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"System error, get preprocess \" << task_name << \" failed\";\n    return status;\n  }\n  for (const auto &instance : task_item.instance_list) {\n    ResultInstance result;\n    try {\n      status = preprocess->Call(task_name, instance->data, &result.data);\n    } catch (const std::bad_alloc &ex) {\n      status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: malloc memory failed\";\n    } catch (const std::runtime_error &ex) {\n      status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: runtime error occurred: \" << ex.what();\n    } catch (const std::exception &ex) {\n      status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: exception occurred: \" << ex.what();\n    } catch (...) {\n      status = INFER_STATUS_LOG_ERROR(SYSTEM_ERROR) << \"Serving Error: exception occurred\";\n    }\n    if (status != SUCCESS) {\n      result.error_msg = status;\n    }\n    task_queue_.PushTaskResult(instance, result);\n  }\n  return SUCCESS;\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/task_queue.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_TASK_QUEUE_H\n#define MINDSPORE_SERVING_WORKER_TASK_QUEUE_H\n\n#include <vector>\n#include <unordered_map>\n#include <memory>\n#include <string>\n#include <queue>\n#include <mutex>\n#include <condition_variable>\n#include <atomic>\n#include <set>\n#include <thread>\n#include <map>\n#include \"common/instance.h\"\n\nnamespace mindspore::serving {\nstruct TaskInfo {\n  std::string group_name;  // method name\n  std::string task_name;   // function name, model name\n  uint64_t priority = 0;\n  uint64_t batch_size = 0;\n  uint64_t subgraph = 0;  // for model\n  std::string tag;\n};\n\nstruct TaskItem {\n  bool has_stopped = false;  // whether system is stopped\n  TaskInfo task_info;\n  std::vector<InstancePtr> instance_list;\n};\n\nusing TaskCallBack =\n  std::function<void(const std::vector<InstancePtr> &inputs, const std::vector<ResultInstance> &output)>;\n\nstruct TaskQueuePriority {\n  std::map<uint64_t, TaskItem> priority_que_map;  // priority: stage index, task list\n  uint64_t priority_que_instances_count = 0;\n};\n\nstruct TaskQueueGroups {\n  std::map<std::string, TaskQueuePriority> group_que_map;  // group name: method name, task que\n  size_t next_exe_que = 0;                                 // next method index\n  uint64_t groups_que_instances_count = 0;\n};\n\nclass TaskQueue {\n public:\n  TaskQueue();\n  void Start(const std::string &que_name, const std::vector<TaskInfo> &task_infos, const TaskCallBack &callback);\n  void Stop();\n  void PushTask(const std::string &group_name, size_t priority, const std::vector<InstancePtr> &instances);\n  void PopTask(TaskItem *task_item);\n\n  void PushTaskResult(const InstancePtr &input, const ResultInstance &output);\n  void PushTaskResult(const std::vector<InstancePtr> &inputs, const std::vector<ResultInstance> &outputs);\n  void PushTaskResult(const std::vector<InstancePtr> &inputs, const Status &failed_result);\n\n  bool IsRunning() const { return is_running; }\n\n private:\n  std::string que_name_;\n  TaskQueueGroups methods_queue_;\n\n  TaskCallBack task_callback_ = nullptr;\n  std::mutex que_lock_;  // Lock only when the queue changes to avoid deadlock caused by lock in complex scenarios.\n  std::condition_variable cond_var_;\n  bool is_running = false;\n\n  bool FindProcessTaskQueue(std::string *method_name);\n};\n\nclass MS_API PyTaskQueue {\n public:\n  PyTaskQueue() = default;\n  ~PyTaskQueue() = default;\n\n  void Start(const std::string &que_name, const std::vector<MethodStage> &stage_infos, const TaskCallBack &callback);\n  void Stop();\n  void PushTask(const std::string &method_name, size_t stage_index, const std::vector<InstancePtr> &instances);\n  // for python task\n  void PyPopTask(TaskItem *task_item);\n  void PyPushTaskResult(const std::vector<ResultInstance> &outputs);\n  TaskInfo GetHandledTaskInfo() const { return py_task_item_processing_.task_info; }\n\n  bool IsRunning() const { return task_queue_.IsRunning(); }\n\n private:\n  TaskQueue task_queue_;\n  TaskItem py_task_item_processing_;\n};\n\nclass CppTaskQueueThreadPool {\n public:\n  CppTaskQueueThreadPool();\n  virtual ~CppTaskQueueThreadPool();\n\n  void Start(const std::string &que_name, const std::vector<MethodStage> &stage_infos, const TaskCallBack &callback,\n             uint32_t size = 4);\n  void Stop();\n\n  void PushTask(const std::string &method_name, size_t stage_index, const std::vector<InstancePtr> &instances);\n\n protected:\n  TaskQueue task_queue_;\n  std::atomic<bool> is_running_ = false;\n  std::vector<std::thread> pool_;\n\n  Status HandleTask(const TaskItem &task_item);\n  static void ThreadFunc(CppTaskQueueThreadPool *thread_pool);\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_WORKER_TASK_QUEUE_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/work_executor.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/work_executor.h\"\n#include <utility>\n#include <cstring>\n#include <thread>\n#include <chrono>\n#include <map>\n#include \"worker/stage_function.h\"\n#include \"common/tensor.h\"\n#include \"worker/servable_register.h\"\n\nnamespace mindspore::serving {\nWorkExecutor::WorkExecutor() = default;\n\nWorkExecutor::~WorkExecutor() noexcept { Stop(); }\n\nStatus WorkExecutor::Init(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &model_loaders) {\n  Status status;\n  if (init_flag_) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Worker service has been initialized\";\n  }\n  // servable can be nullptr\n  model_loaders_ = model_loaders;\n  status = ServableRegister::Instance().InitOnModelsLoad(model_loaders);\n  if (status != SUCCESS) {\n    MSI_LOG_ERROR << \"Init on models load failed\";\n    return status;\n  }\n  InitStageFunctionQueue();\n  InitPredictTaskQueue();\n\n  init_flag_ = true;\n  return SUCCESS;\n}\n\nvoid WorkExecutor::StageCallback(const std::vector<InstancePtr> &instances,\n                                 const std::vector<ResultInstance> &outputs) {\n  if (instances.empty() || instances.size() != outputs.size()) {\n    MSI_LOG_ERROR << \"Invalid inputs size \" << instances.size() << \", result size \" << outputs.size();\n    return;\n  }\n  // <method name, <stage index, instances>>\n  std::map<std::string, std::map<uint64_t, std::vector<InstancePtr>>> outputs_real;\n  for (size_t i = 0; i < instances.size(); i++) {\n    auto &instance = instances[i];\n    auto &output = outputs[i];\n    if (output.error_msg != SUCCESS) {\n      (void)ReplyError(instance, output.error_msg);\n      continue;\n    }\n    CreateResultInstance(instance, output);\n    outputs_real[instance->method_def->method_name][instance->stage_index].push_back(instance);\n  }\n  for (auto &method_instances_it : outputs_real) {\n    for (auto &stage_instances_it : method_instances_it.second) {\n      auto &stage_instances = stage_instances_it.second;\n      if (!stage_instances.empty()) {\n        auto &method_def = *stage_instances[0]->method_def;\n        auto stage_index = stage_instances_it.first;\n        OnReceiveStageInputs(method_def, stage_index + 1, stage_instances);\n      }\n    }\n  }\n}\n\nvoid WorkExecutor::InitStageFunctionQueue() {\n  // init cpp preprocess and postprocess\n  auto stage_callback = [this](const std::vector<InstancePtr> &instances, const std::vector<ResultInstance> &outputs) {\n    StageCallback(instances, outputs);\n  };\n  auto const &signature = ServableRegister::Instance().GetServableSignature();\n  // start task queue for handle preprocess and postprocess\n  std::vector<MethodStage> py_stage_infos;\n  std::vector<MethodStage> cpp_stage_infos;\n  for (auto &method : signature.methods) {\n    for (auto &stage_it : method.stage_map) {\n      auto &stage = stage_it.second;\n      if (stage.stage_type == kMethodStageTypePyFunction) {\n        MSI_LOG_INFO << \"PyFunction stage \" << stage.stage_key << \", method name: \" << stage.method_name\n                     << \", stage index: \" << stage.stage_index << \", batch size: \" << stage.batch_size;\n        py_stage_infos.push_back(stage);\n      } else if (stage.stage_type == kMethodStageTypeCppFunction) {\n        MSI_LOG_INFO << \"CppFunction stage \" << stage.stage_key << \", method name: \" << stage.method_name\n                     << \", stage index: \" << stage.stage_index << \", batch size: \" << stage.batch_size;\n        cpp_stage_infos.push_back(stage);\n      }\n    }\n  }\n  if (!py_stage_infos.empty()) {\n    py_task_queue_.Start(\"PyTask\", py_stage_infos, stage_callback);\n  }\n  if (!cpp_stage_infos.empty()) {\n    cpp_task_queue_pool_.Start(\"CppTask\", cpp_stage_infos, stage_callback, 3);  // 3 thread\n  }\n}\n\nvoid WorkExecutor::InitPredictTaskQueue() {\n  auto stage_callback = [this](const std::vector<InstancePtr> &instances, const std::vector<ResultInstance> &outputs) {\n    StageCallback(instances, outputs);\n  };\n  auto const &signature = ServableRegister::Instance().GetServableSignature();\n  for (auto &model_meta : signature.model_metas) {\n    auto model_key = model_meta.common_meta.model_key;\n    auto &thread = predict_thread_map_[model_key];  // insert\n    thread.Start(\"PredictTask\", model_loaders_[model_key], model_meta, stage_callback);\n  }\n}\n\nvoid WorkExecutor::Stop() {\n  init_flag_ = false;\n  for (auto &item : predict_thread_map_) {\n    item.second.Stop();\n  }\n  predict_thread_map_.clear();\n  ClearInstances(Status(WORKER_UNAVAILABLE, \"Servable stopped\"));\n  for (auto &model : model_loaders_) {\n    model.second->Clear();\n  }\n  model_loaders_.clear();\n  py_task_queue_.Stop();\n  cpp_task_queue_pool_.Stop();\n}\n\nStatus WorkExecutor::Work(const RequestSpec &request_spec, const std::vector<InstanceData> &instances_data,\n                          const WorkCallBack &on_process_done) {\n  if (!init_flag_) {\n    MSI_LOG_EXCEPTION << \"Worker service has not been initialized\";\n  }\n  auto user_id = WorkExecutor::GetNextUserId();\n  InferSession infer_session;\n  infer_session.call_back = on_process_done;\n\n  auto const &signature = ServableRegister::Instance().GetServableSignature();\n  auto method_def = signature.GetMethodDeclare(request_spec.method_name);\n  if (method_def == nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Not support method \" << request_spec.method_name;\n  }\n\n  std::vector<InstancePtr> instances;\n  for (size_t i = 0; i < instances_data.size(); i++) {\n    if (method_def->inputs.size() != instances_data[i].size()) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"The inputs count \" << instances_data[i].size() << \" of instance \" << i\n                                            << \" is not equal to the inputs count \" << method_def->inputs.size()\n                                            << \" of the method \" << request_spec.method_name;\n    }\n\n    auto instance = std::make_shared<Instance>();\n    instances.push_back(instance);\n\n    instance->method_def = method_def;\n    instance->stage_data_list[0] = instances_data[i];  // stage 0 data: input\n    instance->stage_max = method_def->GetStageMax();\n    instance->user_id = user_id;\n  }\n  infer_session.instances = instances;\n  {\n    std::unique_lock<std::mutex> lock(infer_session_map_mutex_);\n    infer_session_map_[user_id] = infer_session;\n  }\n  OnReceiveStageInputs(*method_def, kStageStartIndex, instances);  // stage 1 is the first stage\n  return SUCCESS;\n}\n\nvoid WorkExecutor::OnReceiveStageInputs(const MethodSignature &method_def, uint64_t stage_index,\n                                        const std::vector<InstancePtr> &instances) {\n  if (instances.empty()) {\n    MSI_LOG_EXCEPTION << \"Inputs cannot be empty\";\n  }\n  auto stage_it = method_def.stage_map.find(stage_index);\n  if (stage_it == method_def.stage_map.end()) {\n    MSI_LOG_EXCEPTION << \"Cannot find stage \" << stage_index;\n  }\n  auto &stage = stage_it->second;\n  CreateInputInstance(stage, instances);\n  if (stage_index >= method_def.GetStageMax()) {\n    (void)ReplyRequest(instances);\n    return;\n  }\n  if (stage.stage_type == kMethodStageTypePyFunction) {\n    py_task_queue_.PushTask(method_def.method_name, stage_index, instances);\n  } else if (stage.stage_type == kMethodStageTypeCppFunction) {\n    cpp_task_queue_pool_.PushTask(method_def.method_name, stage_index, instances);\n  } else if (stage.stage_type == kMethodStageTypeModel) {\n    auto it = predict_thread_map_.find(stage.stage_key);\n    if (it == predict_thread_map_.end()) {\n      MSI_LOG_EXCEPTION << \"Cannot find model \" << stage.stage_key << \" in predict_thread_map_\";\n    }\n    it->second.PushPredictTask(stage, instances);\n  } else {\n    MSI_LOG_EXCEPTION << \"Invalid stage type \" << static_cast<int>(stage.stage_type);\n  }\n}\n\nbool WorkExecutor::ReplyRequest(const std::vector<InstancePtr> &outputs) {\n  MSI_TIME_STAMP_START(ReplyRequest)\n  for (auto &item : outputs) {\n    (void)ReplyRequest(item);\n  }\n  MSI_TIME_STAMP_END(ReplyRequest)\n  return true;\n}\n\nbool WorkExecutor::ReplyCallback(const InstancePtr &instance) {\n  instance->stage_data_list.clear();\n  instance->stage_index = instance->stage_max;\n\n  std::unique_lock<std::mutex> lock(infer_session_map_mutex_);\n  auto it = infer_session_map_.find(instance->user_id);\n  if (it == infer_session_map_.end()) {\n    MSI_LOG_WARNING << \"Cannot find user in session map, user id \" << instance->user_id;\n    return false;\n  }\n  auto &infer_session = it->second;\n  infer_session.reply_count++;\n  if (infer_session.reply_count == infer_session.instances.size()) {\n    infer_session.call_back(infer_session.instances);\n    (void)infer_session_map_.erase(it);\n  }\n  return true;\n}\n\nbool WorkExecutor::ReplyRequest(const InstancePtr &instance) {\n  instance->error_msg = SUCCESS;\n  return ReplyCallback(instance);\n}\n\nbool WorkExecutor::ReplyError(const InstancePtr &instance, const Status &error_msg) {\n  instance->error_msg = error_msg;\n  instance->data.clear();\n  return ReplyCallback(instance);\n}\n\nvoid WorkExecutor::CreateInputInstance(const MethodStage &stage, const std::vector<InstancePtr> &instances) {\n  for (auto &instance : instances) {\n    CreateInputInstance(stage, instance);\n  }\n}\n\nvoid WorkExecutor::CreateInputInstance(const MethodStage &stage, const InstancePtr &instance) {\n  instance->data.clear();\n  const auto &inputs = stage.stage_inputs;\n  instance->stage_index = stage.stage_index;\n  for (auto &item : inputs) {\n    if (item.first >= instance->stage_data_list.size()) {\n      MSI_LOG_EXCEPTION << \"Invalid input stage index \" << item.first << \", data stage count \"\n                        << instance->stage_data_list.size();\n    }\n    auto &data = instance->stage_data_list[item.first];\n    if (data.size() <= item.second) {\n      MSI_LOG_EXCEPTION << \"Invalid output index \" << item.second << \", output count \" << data.size()\n                        << \", input stage index \" << item.first << \", stage index \" << stage.stage_index << \", method \"\n                        << stage.method_name;\n    }\n    instance->data.push_back(data[item.second]);\n  }\n}\n\nvoid WorkExecutor::CreateResultInstance(const InstancePtr &instance, const ResultInstance &result) {\n  instance->data.clear();\n  auto stage_index = instance->stage_index;\n  instance->stage_data_list[stage_index] = result.data;\n}\n\nuint64_t WorkExecutor::GetNextUserId() {\n  static std::atomic<uint64_t> user_id = 0;\n  return ++user_id;\n}\n\nuint64_t WorkExecutor::GetMaxBatchSize() const {\n  uint64_t batch_size = 1;\n  for (auto &model : predict_thread_map_) {\n    auto model_batch = model.second.GetBatchSize();\n    if (model_batch > batch_size) {\n      batch_size = model_batch;\n    }\n  }\n  return batch_size;\n}\n\nvoid WorkExecutor::ClearInstances(const Status &error_msg) {\n  std::unique_lock<std::mutex> lock(infer_session_map_mutex_);\n  MSI_LOG_INFO << \"Clear instances, remain request count \" << infer_session_map_.size();\n  for (auto &item : infer_session_map_) {\n    auto &infer_session = item.second;\n    for (auto &instance : infer_session.instances) {\n      if (instance->stage_index != instance->stage_max) {\n        instance->error_msg = error_msg;\n      }\n    }\n    item.second.call_back(item.second.instances);\n  }\n  infer_session_map_.clear();\n}\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/work_executor.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_WORK_EXECUTOR_H\n#define MINDSPORE_SERVING_WORKER_WORK_EXECUTOR_H\n\n#include <vector>\n#include <memory>\n#include <string>\n#include <mutex>\n#include <map>\n#include \"common/serving_common.h\"\n#include \"common/instance.h\"\n#include \"common/servable.h\"\n#include \"worker/model_loader_base.h\"\n#include \"worker/predict_thread.h\"\n#include \"worker/task_queue.h\"\n\nnamespace mindspore::serving {\nusing WorkCallBack = std::function<void(const std::vector<InstancePtr> &instances)>;\n\nstruct InferSession {\n  std::vector<InstancePtr> instances;\n  size_t reply_count = 0;\n  WorkCallBack call_back = nullptr;\n};\n\nclass WorkExecutor : public std::enable_shared_from_this<WorkExecutor> {\n public:\n  WorkExecutor();\n  ~WorkExecutor() noexcept;\n\n  Status Init(const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &model_loaders);\n  Status Work(const RequestSpec &request_spec, const std::vector<InstanceData> &inputs,\n              const WorkCallBack &on_process_done);\n  void Stop();\n\n  static uint64_t GetNextUserId();\n\n  void ClearInstances(const Status &error_msg);\n  uint64_t GetMaxBatchSize() const;\n\n  PyTaskQueue &GetPyTaskQueue() { return py_task_queue_; }\n\n private:\n  std::map<std::string, std::shared_ptr<ModelLoaderBase>> model_loaders_;\n\n  bool init_flag_ = false;\n\n  std::map<std::string, PredictThread> predict_thread_map_;\n  PyTaskQueue py_task_queue_;\n  CppTaskQueueThreadPool cpp_task_queue_pool_;\n\n  std::map<uint64_t, InferSession> infer_session_map_;\n  std::mutex infer_session_map_mutex_;\n\n  bool ReplyCallback(const InstancePtr &instance);\n  bool ReplyError(const InstancePtr &context, const Status &error_msg);\n  bool ReplyRequest(const std::vector<InstancePtr> &outputs);\n  bool ReplyRequest(const InstancePtr &outputs);\n\n  void OnReceiveStageInputs(const MethodSignature &method_def, uint64_t stage_index,\n                            const std::vector<InstancePtr> &instances);\n\n  static void CreateInputInstance(const MethodStage &stage, const InstancePtr &instance);\n  static void CreateInputInstance(const MethodStage &stage, const std::vector<InstancePtr> &instances);\n  static void CreateResultInstance(const InstancePtr &instance, const ResultInstance &result);\n\n  void StageCallback(const std::vector<InstancePtr> &instances, const std::vector<ResultInstance> &outputs);\n  void InitStageFunctionQueue();\n  void InitPredictTaskQueue();\n};\n\n}  // namespace mindspore::serving\n\n#endif  // MINDSPORE_SERVING_WORKER_WORK_EXECUTOR_H\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/worker.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/worker.h\"\n#include <unistd.h>\n#include <condition_variable>\n#include <regex>\n#include \"pybind11/pybind11.h\"\n#include \"common/proto_tensor.h\"\n#include \"common/exit_handle.h\"\n#include \"worker/context.h\"\n#include \"worker/grpc/worker_process.h\"\n#include \"worker/task_queue.h\"\n#include \"worker/grpc/worker_server.h\"\n#include \"worker/servable_register.h\"\n\nnamespace py = pybind11;\n\nnamespace mindspore {\nnamespace serving {\nWorker &Worker::GetInstance() {\n  static Worker instance;\n  return instance;\n}\n\nStatus Worker::RegisterWorker(const std::string &master_address, const std::string &worker_address) {\n  notify_master_ = std::make_shared<GrpcNotifyMaster>(master_address, worker_address);\n  WorkerRegSpec worker_spec;\n  worker_spec.servable_spec = servable_spec_;\n  worker_spec.worker_address = worker_address;\n  worker_spec.worker_pid = getpid();\n  auto status = notify_master_->Register(worker_spec);\n  return status;\n}\n\nStatus Worker::RunAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                        const PredictOnFinish &on_finish) {\n  Status status;\n  RequestSpec request_spec;\n  GrpcTensorHelper::GetRequestSpec(request, &request_spec);\n\n  auto servable_name = request_spec.servable_name;\n  auto method_name = request_spec.method_name;\n\n  const ServableSignature &servable_signature = ServableRegister::Instance().GetServableSignature();\n  if (servable_signature.servable_name != servable_name) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Servable \" << servable_name << \" is not declared\";\n  }\n  auto method_signature = servable_signature.GetMethodDeclare(method_name);\n  if (method_signature == nullptr) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS)\n           << \"Method \" << method_name << \" is not registered for servable \" << servable_name;\n  }\n  const MethodSignature &method = *method_signature;\n  std::vector<InstanceData> instances_data;\n  status = GrpcTensorHelper::CreateInstanceFromRequest(method, request, &instances_data);\n  if (status != SUCCESS) {\n    MSI_LOG(ERROR) << \"transfer request to instances failed\";\n    return status;\n  }\n  *(reply->mutable_servable_spec()) = request.servable_spec();\n  WorkCallBack on_process_done = [&request, reply, on_finish, method](const std::vector<InstancePtr> &instances) {\n    GrpcTensorHelper::CreateReplyFromInstances(request, method, instances, reply);\n    on_finish();\n  };\n  return RunAsync(request_spec, instances_data, on_process_done);\n}\n\nStatus Worker::RunAsync(const RequestSpec &request_spec, const std::vector<InstanceData> &instances_data,\n                        const WorkCallBack &on_process_done) {\n  while (true) {\n    // avoid deadlock when Worker::Clear->gRPC shutdown, while gRPC shutdown waiting all request finished\n    if (worker_shared_lock_.try_lock_shared()) {\n      auto status = RunAsyncInner(request_spec, instances_data, on_process_done);\n      worker_shared_lock_.unlock_shared();\n      return status;\n    } else if (!servable_started_) {\n      return INFER_STATUS_LOG_ERROR(WORKER_UNAVAILABLE)\n             << \"RunAsync worker for inference failed, worker has not been started or stopped\";\n    }\n    std::chrono::milliseconds duration(1);  // 1ms\n    std::this_thread::sleep_for(duration);\n  }\n}\n\nStatus Worker::RunAsyncInner(const RequestSpec &request_spec, const std::vector<InstanceData> &instances_data,\n                             const WorkCallBack &on_process_done) {\n  if (!servable_started_) {\n    return INFER_STATUS_LOG_ERROR(WORKER_UNAVAILABLE)\n           << \"RunAsync worker for inference failed, worker has not been started or stopped\";\n  }\n  if (instances_data.empty()) {\n    return INFER_STATUS_LOG_ERROR(INVALID_INPUTS) << \"Input instances count is 0\";\n  }\n  if (!CheckServableRequest(request_spec)) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Cannot find servable match \" << request_spec.Repr();\n  }\n  MSI_LOG_INFO << \"New request, method: \" << request_spec.method_name << \", instances count: \" << instances_data.size();\n  return worker_executor_.Work(request_spec, instances_data, on_process_done);\n}\n\nStatus Worker::Run(const RequestSpec &request_spec, const std::vector<InstanceData> &instances_data,\n                   std::vector<InstancePtr> *out) {\n  if (!servable_started_) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Run worker for inference failed, worker has not been started\";\n  }\n  MSI_EXCEPTION_IF_NULL(out);\n  auto promise = std::make_shared<std::promise<void>>();\n  auto future = promise->get_future();\n  WorkCallBack on_process_done = [promise, out](const std::vector<InstancePtr> &instances) {\n    *out = instances;\n    promise->set_value();\n  };\n  auto status = RunAsync(request_spec, instances_data, on_process_done);\n  if (status != SUCCESS) {\n    return status;\n  }\n  future.get();\n  return SUCCESS;\n}\n\nStatus Worker::StartGrpcServer(const std::string &server_address) {\n  if (worker_grpc_server_ != nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Worker gRPC server is already running\";\n  }\n  worker_grpc_server_ = std::make_shared<WorkerGrpcServer>();\n  SSLConfig ssl_config;\n  return worker_grpc_server_->Start(server_address, ssl_config, gRpcMaxMBMsgSize, \"Worker gRPC\");\n}\n\nStatus Worker::StartDistributedGrpcServer(std::shared_ptr<DistributedModelLoader> servable,\n                                          const std::string &server_address) {\n  if (distributed_grpc_server_ != nullptr) {\n    return INFER_STATUS_LOG_ERROR(FAILED) << \"Distributed gRPC server is already running\";\n  }\n  distributed_grpc_server_ = std::make_shared<DistributedWorkerGrpcServer>(servable, server_address);\n  SSLConfig ssl_config;\n  return distributed_grpc_server_->Start(server_address, ssl_config, gRpcMaxMBMsgSize, \"Distributed gRPC\");\n}\n\nStatus Worker::StartServable(const std::string &servable_directory, const std::string &servable_name,\n                             uint32_t version_number,\n                             const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models,\n                             const std::string &master_address, const std::string &worker_address, bool own_device) {\n  auto status = StartServableInner(servable_name, version_number, models, own_device);\n  if (status != SUCCESS) {\n    return status;\n  }\n  status = StartGrpcServer(worker_address);\n  if (status != SUCCESS) {\n    return status;\n  }\n  status = RegisterWorker(master_address, worker_address);\n  if (status != SUCCESS) {\n    return status;\n  }\n  status = INFER_STATUS(SUCCESS) << \"Serving: Start servable success, servable directory: '\" << servable_directory\n                                 << \"', servable name: '\" << servable_name << \"', version number: \" << version_number;\n  MSI_LOG_INFO << status.StatusMessage();\n  std::cout << status.StatusMessage() << std::endl;\n  return SUCCESS;\n}\n\nStatus Worker::StartServableInner(const std::string &servable_name, uint32_t version_number,\n                                  const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models,\n                                  bool own_device) {\n  if (servable_started_) {\n    return INFER_STATUS_LOG_ERROR(FAILED)\n           << \"A servable has been started, only one servable can run in a process currently.\";\n  }\n  clear_flag_.clear();\n  auto status = worker_executor_.Init(models);\n  if (status != SUCCESS) {\n    return status;\n  }\n  servable_spec_.servable_name = servable_name;\n  servable_spec_.version_number = version_number;\n  servable_spec_.batch_size = worker_executor_.GetMaxBatchSize();\n  servable_spec_.methods.clear();\n  servable_spec_.own_device = own_device;\n\n  for (auto &model_it : models) {\n    ModelInfo model_info;\n    auto &model_key = model_it.first;\n    auto &model = model_it.second;\n    model_info.batch_size = model->GetBatchSize();\n    auto graph_num = model->GetGraphNum();\n    model_info.sub_graph_infos.resize(graph_num);\n    for (uint64_t i = 0; i < graph_num; i++) {\n      model_info.sub_graph_infos[i].input_infos = model->GetInputInfos(i);\n      model_info.sub_graph_infos[i].output_infos = model->GetOutputInfos(i);\n    }\n    servable_spec_.models[model_key] = model_info;\n  }\n  const ServableSignature &signature = ServableRegister::Instance().GetServableSignature();\n  for (auto &method : signature.methods) {\n    ServableMethodInfo worker_method_info;\n    bool has_model = false;\n    bool has_func = false;\n    for (auto &stage : method.stage_map) {\n      if (stage.second.stage_type == kMethodStageTypeModel) {\n        has_model = true;\n      } else if (stage.second.stage_type == kMethodStageTypePyFunction ||\n                 stage.second.stage_type == kMethodStageTypeCppFunction) {\n        has_func = true;\n      }\n    }\n    if (has_model && !has_func) {\n      worker_method_info.only_model_stage = true;\n    } else {\n      worker_method_info.only_model_stage = false;\n    }\n    // This worker does not occupy device and is only used to run python function stage to support python parallelism.\n    // If one method does not contain function stage, requests of this method do not need to routed to this\n    // worker.\n    if (!servable_spec_.own_device && worker_method_info.only_model_stage) {\n      continue;\n    }\n    worker_method_info.name = method.method_name;\n    for (auto &name : method.inputs) {\n      worker_method_info.input_names.push_back(name);\n    }\n    servable_spec_.methods.push_back(worker_method_info);\n  }\n  servable_started_ = true;\n  return SUCCESS;\n}\n\nvoid Worker::StopServable(bool notify_master) {\n  exit_notify_master_ = notify_master;\n  ExitSignalHandle::Instance().Stop();\n}\n\nvoid Worker::Clear() {\n  std::unique_lock<std::shared_mutex> lock(worker_shared_lock_);\n  MSI_LOG_INFO << \"Start clear worker session\";\n  servable_started_ = false;\n  worker_executor_.Stop();\n  if (exit_notify_master_ && notify_master_) {\n    notify_master_->Unregister();\n  }\n  if (worker_grpc_server_) {\n    worker_grpc_server_->Stop();\n    worker_grpc_server_ = nullptr;\n  }\n  if (distributed_grpc_server_) {\n    distributed_grpc_server_->Stop();\n    distributed_grpc_server_ = nullptr;\n  }\n  MSI_LOG_INFO << \"End clear worker session\";\n}\n\nbool Worker::IsRunning() { return servable_started_; }\n\nWorker::~Worker() {\n  Clear();\n  if (listening_parent_thread_.joinable()) {\n    listening_parent_thread_.join();\n  }\n}\n\nbool Worker::CheckServableRequest(const RequestSpec &request_spec) {\n  if (servable_spec_.servable_name != request_spec.servable_name) {\n    return false;\n  }\n  if (request_spec.version_number != 0 && servable_spec_.version_number != request_spec.version_number) {\n    return false;\n  }\n  return true;\n}\n\nWorker::Worker() {}\n\nvoid Worker::ClearOnSystemFailed(const Status &error_msg) {\n  std::shared_lock<std::shared_mutex> lock(worker_shared_lock_);\n  MSI_LOG_INFO << \"Clear instances on system failed: \" << error_msg.StatusMessage();\n  worker_executor_.ClearInstances(error_msg);\n}\n\nstatic std::vector<int> GetAllChildrenPids(int cur_pid) {\n  if (cur_pid <= 0) {\n    return {};\n  }\n  std::string get_all_children_pids = \"ps -o pid --no-headers --ppid \" + std::to_string(cur_pid);\n  FILE *fp = popen(get_all_children_pids.c_str(), \"r\");\n  if (fp == nullptr) {\n    return {};\n  }\n  constexpr int max_result_size = 1024;\n  char buf[max_result_size] = {0};\n  std::string cmd_result;\n  while (fgets(buf, max_result_size, fp) != nullptr && cmd_result.size() <= max_result_size) {\n    cmd_result += std::string(buf) + \" \";\n  }\n  pclose(fp);\n  if (cmd_result.size() == max_result_size || cmd_result.empty()) {\n    return {};\n  }\n  std::regex pid_reg(\"[0-9]+\");\n  auto match_beg = std::sregex_iterator(cmd_result.begin(), cmd_result.end(), pid_reg);\n  auto match_end = std::sregex_iterator();\n  if (match_beg == match_end) {\n    return {};\n  }\n  std::vector<int> direct_children;\n  for (auto item = match_beg; item != match_end; ++item) {\n    auto pid_str = item->str();\n    auto pid = static_cast<int>(std::strtol(pid_str.c_str(), nullptr, 10));\n    if (pid <= 0) {\n      continue;\n    }\n    std::ifstream stat_fp(\"/proc/\" + std::to_string(pid) + \"/stat\");\n    if (!stat_fp.is_open()) {\n      continue;\n    }\n    constexpr int cache_size_max = 128;\n    char cache[cache_size_max + 1] = {0};\n    stat_fp.read(cache, cache_size_max);\n    std::string cache_str = cache;\n    auto pos = cache_str.find(\") \");\n    if (pos == std::string::npos) {\n      continue;\n    }\n    cache_str = cache_str.substr(pos + strlen(\") S \"));\n    int child_ppid = static_cast<int>(std::strtol(cache_str.c_str(), nullptr, 10));\n    if (child_ppid != cur_pid) {\n      continue;\n    }\n    direct_children.push_back(pid);\n  }\n  std::vector<int> all_pids = direct_children;\n  for (auto &pid : direct_children) {\n    auto pids = GetAllChildrenPids(pid);\n    all_pids.insert(all_pids.end(), pids.begin(), pids.end());\n  }\n  return all_pids;\n}\n\nvoid Worker::StartListeningParentExitThread() {\n  auto thread_func = [this]() {\n    MSI_LOG_INFO << \"Start listening parent\";\n    auto init_parent_pid = getppid();\n    constexpr int sleep_period_in_ms = 100;\n    constexpr int try_kill_children_times = 100;\n    // exit when receive SIGINT SIGTERM, or parent process exit\n    while (true) {\n      if (ExitSignalHandle::Instance().HasStopped()) {\n        MSI_LOG_WARNING << \"Worker has received exit message, worker begin to exit\";\n        break;\n      }\n      auto cur_parent_pid = getppid();\n      if (init_parent_pid != cur_parent_pid) {\n        MSI_LOG_WARNING << \"Worker detect parent pid=\" << init_parent_pid << \" has exited, worker begin to exit\";\n        ExitSignalHandle::Instance().Stop();\n        break;\n      }\n      std::this_thread::sleep_for(std::chrono::milliseconds(sleep_period_in_ms));\n    }\n    Clear();\n    auto cur_pid = getpid();\n    for (int i = 0; i < try_kill_children_times; i++) {  // 100*100ms=10s\n      auto child_pids = GetAllChildrenPids(cur_pid);\n      if (child_pids.empty() && !continue_listen_children_) {\n        break;\n      }\n      for (auto pid : child_pids) {\n        kill(pid, SIGTERM);\n      }\n      std::this_thread::sleep_for(std::chrono::milliseconds(sleep_period_in_ms));\n    }\n    MSI_LOG_INFO << \"Stop listening parent\";\n  };\n  listening_parent_thread_ = std::thread(thread_func);\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/ccsrc/worker/worker.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_WORKER_WORKER_H\n#define MINDSPORE_SERVING_WORKER_WORKER_H\n\n#include <memory>\n#include <unordered_map>\n#include <vector>\n#include <string>\n#include <utility>\n#include <shared_mutex>\n#include <map>\n#include \"worker/work_executor.h\"\n#include \"common/serving_common.h\"\n#include \"proto/ms_service.pb.h\"\n#include \"worker/notfiy_master/grpc_notify.h\"\n#include \"common/grpc_server.h\"\n#include \"worker/task_queue.h\"\n#include \"common/grpc_async_server.h\"\n#include \"worker/model_loader_base.h\"\n#include \"worker/grpc/worker_server.h\"\n#include \"worker/distributed_worker/distributed_process/distributed_server.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass MS_API Worker {\n public:\n  Worker();\n  ~Worker();\n\n  static Worker &GetInstance();\n  void Clear();\n  Status Run(const RequestSpec &request_spec, const std::vector<InstanceData> &instances_data,\n             std::vector<InstancePtr> *out);\n  Status RunAsync(const proto::PredictRequest &request, proto::PredictReply *reply, const PredictOnFinish &on_finish);\n\n  Status RunAsync(const RequestSpec &request_spec, const std::vector<InstanceData> &instances_data,\n                  const WorkCallBack &on_process_done);\n  Status StartServable(const std::string &servable_directory, const std::string &servable_name, uint32_t version_number,\n                       const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models,\n                       const std::string &master_address, const std::string &worker_address, bool own_device);\n\n  Status StartGrpcServer(const std::string &server_address);\n  Status StartDistributedGrpcServer(std::shared_ptr<DistributedModelLoader> servable,\n                                    const std::string &server_address);\n\n  void StopServable(bool notify_master = true);\n  bool IsRunning();\n  Status RegisterWorker(const std::string &master_address, const std::string &worker_address);\n\n  WorkExecutor &GetWorkExecutor() { return worker_executor_; }\n  void ClearOnSystemFailed(const Status &error_msg);\n  std::shared_ptr<GrpcNotifyMaster> GetGrpcNotifyMaster() { return notify_master_; }\n\n  void SetContinueListenChildren(bool continue_listen_children) {\n    continue_listen_children_ = continue_listen_children;\n  }\n  void StartListeningParentExitThread();\n\n private:\n  WorkExecutor worker_executor_;\n\n  ServableRegSpec servable_spec_;\n\n  std::atomic_bool exit_notify_master_ = true;\n  std::atomic_bool servable_started_ = false;\n  std::atomic_flag clear_flag_ = ATOMIC_FLAG_INIT;\n  std::shared_ptr<GrpcNotifyMaster> notify_master_ = nullptr;\n  std::shared_ptr<WorkerGrpcServer> worker_grpc_server_ = nullptr;\n  std::shared_ptr<DistributedWorkerGrpcServer> distributed_grpc_server_ = nullptr;\n\n  std::shared_mutex worker_shared_lock_;\n  bool continue_listen_children_ = false;\n  std::thread listening_parent_thread_;\n\n  Status StartServableInner(const std::string &servable_name, uint32_t version_number,\n                            const std::map<std::string, std::shared_ptr<ModelLoaderBase>> &models, bool own_device);\n\n  Status RunAsyncInner(const RequestSpec &request_spec, const std::vector<InstanceData> &instances_data,\n                       const WorkCallBack &on_process_done);\n  bool CheckServableRequest(const RequestSpec &request_spec);\n};\n\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_WORKER_WORKER_H\n"
  },
  {
    "path": "mindspore_serving/client/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MindSpore Serving Client API, which can be used to access the Serving Server through gRPC\"\"\"\n\nfrom .python.client import Client\nfrom .python.client import SSLConfig\n\n__all__ = []\n__all__.extend([\n    \"Client\",\n    \"SSLConfig\"\n])\n"
  },
  {
    "path": "mindspore_serving/client/cpp/client.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"client/cpp/client.h\"\n#include <grpcpp/grpcpp.h>\n#include <google/protobuf/text_format.h>\n#include <algorithm>\n#include <unordered_map>\n#include <utility>\n#include <sstream>\n#include \"proto/ms_service.pb.h\"\n#include \"proto/ms_service.grpc.pb.h\"\n\nnamespace mindspore {\nnamespace serving {\nnamespace client {\nStatus &Status::operator<<(DataType val) {\n  std::unordered_map<DataType, std::string> data_type_map = {\n    {DT_UINT8, \"uint8\"},   {DT_UINT16, \"uint16\"},   {DT_UINT32, \"uint32\"},   {DT_UINT64, \"uint64\"},\n    {DT_INT8, \"int8\"},     {DT_INT16, \"int16\"},     {DT_INT32, \"int32\"},     {DT_INT64, \"int64\"},\n    {DT_BOOL, \"bool\"},     {DT_FLOAT16, \"float16\"}, {DT_FLOAT32, \"float32\"}, {DT_FLOAT64, \"float64\"},\n    {DT_STRING, \"string\"}, {DT_BYTES, \"bytes\"},     {DT_UNKNOWN, \"unknown\"},\n  };\n  auto it = data_type_map.find(val);\n  if (it == data_type_map.end()) {\n    status_msg_ += \"unknown\";\n  } else {\n    status_msg_ += it->second;\n  }\n  return *this;\n}\n\nStatus &operator<<(Status &status, proto::DataType val) {\n  std::unordered_map<proto::DataType, std::string> data_type_map = {\n    {proto::MS_UINT8, \"uint8\"},     {proto::MS_UINT16, \"uint16\"},   {proto::MS_UINT32, \"uint32\"},\n    {proto::MS_UINT64, \"uint64\"},   {proto::MS_INT8, \"int8\"},       {proto::MS_INT16, \"int16\"},\n    {proto::MS_INT32, \"int32\"},     {proto::MS_INT64, \"int64\"},     {proto::MS_BOOL, \"bool\"},\n    {proto::MS_FLOAT16, \"float16\"}, {proto::MS_FLOAT32, \"float32\"}, {proto::MS_FLOAT64, \"float64\"},\n    {proto::MS_STRING, \"string\"},   {proto::MS_BYTES, \"bytes\"},     {proto::MS_UNKNOWN, \"unknown\"},\n  };\n  auto it = data_type_map.find(val);\n  if (it == data_type_map.end()) {\n    status << \"unknown\";\n  } else {\n    status << it->second;\n  }\n  return status;\n}\n\nStatus &operator<<(Status &status, grpc::StatusCode val) {\n  std::unordered_map<grpc::StatusCode, std::string> data_type_map = {\n    {grpc::OK, \"OK\"},\n    {grpc::CANCELLED, \"CANCELLED\"},\n    {grpc::UNKNOWN, \"UNKNOWN\"},\n    {grpc::INVALID_ARGUMENT, \"INVALID_ARGUMENT\"},\n    {grpc::DEADLINE_EXCEEDED, \"DEADLINE_EXCEEDED\"},\n    {grpc::NOT_FOUND, \"NOT_FOUND\"},\n    {grpc::ALREADY_EXISTS, \"ALREADY_EXISTS\"},\n    {grpc::PERMISSION_DENIED, \"PERMISSION_DENIED\"},\n    {grpc::UNAUTHENTICATED, \"UNAUTHENTICATED\"},\n    {grpc::RESOURCE_EXHAUSTED, \"RESOURCE_EXHAUSTED\"},\n    {grpc::FAILED_PRECONDITION, \"FAILED_PRECONDITION\"},\n    {grpc::ABORTED, \"ABORTED\"},\n    {grpc::OUT_OF_RANGE, \"OUT_OF_RANGE\"},\n    {grpc::UNIMPLEMENTED, \"UNIMPLEMENTED\"},\n    {grpc::INTERNAL, \"INTERNAL\"},\n    {grpc::UNAVAILABLE, \"UNAVAILABLE\"},\n    {grpc::DATA_LOSS, \"DATA_LOSS\"},\n  };\n  auto it = data_type_map.find(val);\n  if (it == data_type_map.end()) {\n    status << \"unknown\";\n  } else {\n    status << it->second;\n  }\n  return status;\n}\n\nStatus MutableTensor::SetBytesData(const std::vector<uint8_t> &val) {\n  if (mutable_proto_tensor_ == nullptr) {\n    return Status(SYSTEM_ERROR) << \"proto tensor cannot be nullptr\";\n  }\n  auto proto_shape = mutable_proto_tensor_->mutable_shape();\n  proto_shape->add_dims(1);\n  mutable_proto_tensor_->set_dtype(proto::MS_BYTES);\n  if (val.empty()) {\n    return Status(INVALID_INPUTS) << \"Input index bytes val len is empty\";\n  }\n  mutable_proto_tensor_->add_bytes_val(val.data(), val.size());\n  return SUCCESS;\n}\n\nStatus MutableTensor::SetStrData(const std::string &val) {\n  if (mutable_proto_tensor_ == nullptr) {\n    return Status(SYSTEM_ERROR) << \"proto tensor cannot be nullptr\";\n  }\n  auto proto_shape = mutable_proto_tensor_->mutable_shape();\n  proto_shape->add_dims(val.size());\n  mutable_proto_tensor_->set_dtype(proto::MS_STRING);\n  if (val.empty()) {\n    return Status(INVALID_INPUTS) << \"string index string val len is empty\";\n  }\n  mutable_proto_tensor_->add_bytes_val(val);\n  return SUCCESS;\n}\n\nStatus MutableTensor::SetData(const std::vector<uint8_t> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(uint8_t), shape, DT_UINT8);\n}\n\nStatus MutableTensor::SetData(const std::vector<uint16_t> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(uint16_t), shape, DT_UINT16);\n}\n\nStatus MutableTensor::SetData(const std::vector<uint32_t> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(uint32_t), shape, DT_UINT32);\n}\n\nStatus MutableTensor::SetData(const std::vector<uint64_t> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(uint64_t), shape, DT_UINT64);\n}\n\nStatus MutableTensor::SetData(const std::vector<int8_t> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(int8_t), shape, DT_INT8);\n}\n\nStatus MutableTensor::SetData(const std::vector<int16_t> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(int16_t), shape, DT_INT16);\n}\n\nStatus MutableTensor::SetData(const std::vector<int32_t> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(int32_t), shape, DT_INT32);\n}\n\nStatus MutableTensor::SetData(const std::vector<int64_t> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(int64_t), shape, DT_INT64);\n}\n\nStatus MutableTensor::SetData(const std::vector<bool> &val, const std::vector<int64_t> &shape) {\n  std::vector<uint8_t> val_uint8;\n  std::transform(val.begin(), val.end(), std::back_inserter(val_uint8),\n                 [](bool item) { return static_cast<uint8_t>(item); });\n  return SetData(val_uint8.data(), val_uint8.size() * sizeof(bool), shape, DT_BOOL);\n}\n\nStatus MutableTensor::SetData(const std::vector<float> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(float), shape, DT_FLOAT32);\n}\n\nStatus MutableTensor::SetData(const std::vector<double> &val, const std::vector<int64_t> &shape) {\n  return SetData(val.data(), val.size() * sizeof(double), shape, DT_FLOAT64);\n}\n\nStatus MutableTensor::SetData(const void *data, size_t data_len, const std::vector<int64_t> &shape,\n                              DataType data_type) {\n  if (mutable_proto_tensor_ == nullptr) {\n    return Status(SYSTEM_ERROR) << \"proto tensor cannot be nullptr\";\n  }\n  if (data == nullptr || data_len == 0) {\n    return Status(INVALID_INPUTS) << \"data cannot be nullptr, or data len cannot be 0\";\n  }\n  mutable_proto_tensor_->set_data(data, data_len);\n  auto proto_shape = mutable_proto_tensor_->mutable_shape();\n\n  std::unordered_map<DataType, std::pair<proto::DataType, int64_t>> data_type_map = {\n    {DT_UINT8, {proto::MS_UINT8, sizeof(uint8_t)}},\n    {DT_UINT16, {proto::MS_UINT16, sizeof(uint16_t)}},\n    {DT_UINT32, {proto::MS_UINT32, sizeof(uint32_t)}},\n    {DT_UINT64, {proto::MS_UINT64, sizeof(uint64_t)}},\n    {DT_INT8, {proto::MS_INT8, sizeof(int8_t)}},\n    {DT_INT16, {proto::MS_INT16, sizeof(int16_t)}},\n    {DT_INT32, {proto::MS_INT32, sizeof(int32_t)}},\n    {DT_INT64, {proto::MS_INT64, sizeof(int64_t)}},\n    {DT_BOOL, {proto::MS_BOOL, sizeof(bool)}},\n    {DT_FLOAT16, {proto::MS_FLOAT16, 2}},\n    {DT_FLOAT32, {proto::MS_FLOAT32, 4}},\n    {DT_FLOAT64, {proto::MS_FLOAT64, 8}},\n  };\n  auto it = data_type_map.find(data_type);\n  if (it == data_type_map.end()) {\n    return Status(INVALID_INPUTS) << \"Input unsupported find data type \" << data_type;\n  }\n  mutable_proto_tensor_->set_dtype(it->second.first);\n\n  auto shape_str = [](const std::vector<int64_t> &val) noexcept {\n    std::stringstream sstream;\n    sstream << \"[\";\n    for (size_t i = 0; i < val.size(); i++) {\n      sstream << val[i];\n      if (i + 1 < val.size()) {\n        sstream << \", \";\n      }\n    }\n    sstream << \"]\";\n    return sstream.str();\n  };\n  int64_t element_cnt = 1;\n  for (auto &item : shape) {\n    proto_shape->add_dims(item);\n    if (item <= 0 || item >= INT64_MAX || INT64_MAX / element_cnt < item) {\n      return Status(INVALID_INPUTS) << \"Input input shape invalid \" << shape_str(shape);\n    }\n  }\n  auto item_size = it->second.second;\n  if (static_cast<int64_t>(data_len) / element_cnt < item_size ||\n      element_cnt * item_size != static_cast<int64_t>(data_len)) {\n    return Status(INVALID_INPUTS) << \"Input input shape \" << shape_str(shape) << \" does not match data len \"\n                                  << data_len;\n  }\n  return SUCCESS;\n}\n\nStatus Tensor::GetBytesData(std::vector<uint8_t> *val) const {\n  if (val == nullptr) {\n    return Status(SYSTEM_ERROR) << \"input val cannot be nullptr\";\n  }\n  if (proto_tensor_ == nullptr) {\n    return Status(SYSTEM_ERROR) << \"proto tensor cannot be nullptr\";\n  }\n  if (proto_tensor_->dtype() != proto::MS_BYTES) {\n    return Status(INVALID_INPUTS) << \"Output data type is not match, its' real data type is \" << proto_tensor_->dtype();\n  }\n  auto &bytes_data = proto_tensor_->bytes_val();\n  if (bytes_data.size() != 1) {\n    return Status(INVALID_INPUTS) << \"Bytes value type size can only be 1\";\n  }\n  val->resize(bytes_data[0].size());\n  memcpy(val->data(), val->data(), bytes_data[0].size());\n  return SUCCESS;\n}\n\nStatus Tensor::GetStrData(std::string *val) const {\n  if (val == nullptr) {\n    return Status(SYSTEM_ERROR) << \"input val cannot be nullptr\";\n  }\n  if (proto_tensor_ == nullptr) {\n    return Status(SYSTEM_ERROR) << \"proto tensor cannot be nullptr\";\n  }\n  if (proto_tensor_->dtype() != proto::MS_STRING) {\n    return Status(INVALID_INPUTS) << \"Output data type is not match, its' real data type is \" << proto_tensor_->dtype();\n  }\n  auto &bytes_data = proto_tensor_->bytes_val();\n  if (bytes_data.size() != 1) {\n    return Status(INVALID_INPUTS) << \"String value type size can only be 1\";\n  }\n  val->resize(bytes_data[0].size());\n  memcpy(val->data(), val->data(), bytes_data[0].size());\n  return SUCCESS;\n}\n\ntemplate <proto::DataType proto_dtype, class DT>\nStatus GetInputImp(const proto::Tensor *proto_tensor, std::vector<DT> *val) {\n  if (val == nullptr) {\n    return Status(SYSTEM_ERROR) << \"input val cannot be nullptr\";\n  }\n  if (proto_tensor == nullptr) {\n    return Status(SYSTEM_ERROR) << \"proto tensor cannot be nullptr\";\n  }\n  if (proto_tensor->dtype() != proto_dtype) {\n    return Status(INVALID_INPUTS) << \"Output data type is not match, its' real data type is \" << proto_tensor->dtype();\n  }\n  auto data = proto_tensor->data().data();\n  auto data_len = proto_tensor->data().length();\n  val->resize(data_len / sizeof(DT));\n  memcpy(val->data(), data, data_len);\n  return SUCCESS;\n}\n\nStatus Tensor::GetData(std::vector<uint8_t> *val) const { return GetInputImp<proto::MS_UINT8>(proto_tensor_, val); }\n\nStatus Tensor::GetData(std::vector<uint16_t> *val) const { return GetInputImp<proto::MS_UINT16>(proto_tensor_, val); }\n\nStatus Tensor::GetData(std::vector<uint32_t> *val) const { return GetInputImp<proto::MS_UINT32>(proto_tensor_, val); }\n\nStatus Tensor::GetData(std::vector<uint64_t> *val) const { return GetInputImp<proto::MS_UINT64>(proto_tensor_, val); }\n\nStatus Tensor::GetData(std::vector<int8_t> *val) const { return GetInputImp<proto::MS_INT8>(proto_tensor_, val); }\n\nStatus Tensor::GetData(std::vector<int16_t> *val) const { return GetInputImp<proto::MS_INT16>(proto_tensor_, val); }\n\nStatus Tensor::GetData(std::vector<int32_t> *val) const { return GetInputImp<proto::MS_INT32>(proto_tensor_, val); }\n\nStatus Tensor::GetData(std::vector<int64_t> *val) const { return GetInputImp<proto::MS_INT64>(proto_tensor_, val); }\n\nStatus Tensor::GetData(std::vector<bool> *val) const {\n  if (val == nullptr) {\n    return Status(SYSTEM_ERROR) << \"input val cannot be nullptr\";\n  }\n  std::vector<uint8_t> val_uint8;\n  Status status = GetInputImp<proto::MS_BOOL>(proto_tensor_, &val_uint8);\n  if (!status.IsSuccess()) {\n    return status;\n  }\n  std::transform(val_uint8.begin(), val_uint8.end(), std::back_inserter(*val), [](uint8_t item) { return item != 0; });\n  return SUCCESS;\n}\n\nStatus Tensor::GetData(std::vector<float> *val) const { return GetInputImp<proto::MS_FLOAT32>(proto_tensor_, val); }\n\nStatus Tensor::GetData(std::vector<double> *val) const { return GetInputImp<proto::MS_FLOAT64>(proto_tensor_, val); }\n\nStatus Tensor::GetFp16Data(std::vector<uint16_t> *val) const {\n  return GetInputImp<proto::MS_FLOAT16>(proto_tensor_, val);\n}\n\nDataType Tensor::GetDataType() const {\n  if (proto_tensor_ == nullptr) {\n    std::cout << \"proto tensor cannot be nullptr\" << std::endl;\n    return DT_UNKNOWN;\n  }\n  std::unordered_map<proto::DataType, DataType> data_type_map = {\n    {proto::MS_UNKNOWN, DT_UNKNOWN}, {proto::MS_UINT8, DT_UINT8},     {proto::MS_UINT16, DT_UINT16},\n    {proto::MS_UINT32, DT_UINT32},   {proto::MS_UINT64, DT_UINT64},   {proto::MS_INT8, DT_INT8},\n    {proto::MS_INT16, DT_INT16},     {proto::MS_INT32, DT_INT32},     {proto::MS_INT64, DT_INT64},\n    {proto::MS_BOOL, DT_BOOL},       {proto::MS_FLOAT16, DT_FLOAT16}, {proto::MS_FLOAT32, DT_FLOAT32},\n    {proto::MS_FLOAT64, DT_FLOAT64}, {proto::MS_STRING, DT_STRING},   {proto::MS_BYTES, DT_BYTES},\n  };\n  auto it_dt = data_type_map.find(proto_tensor_->dtype());\n  if (it_dt == data_type_map.end()) {\n    std::cout << \"Unsupported data type \" << proto_tensor_->dtype() << std::endl;\n    return DT_UNKNOWN;\n  }\n  return it_dt->second;\n}\n\nstd::vector<int64_t> Tensor::GetShape() const {\n  if (proto_tensor_ == nullptr) {\n    std::cout << \"proto tensor cannot be nullptr\" << std::endl;\n    return std::vector<int64_t>();\n  }\n  std::vector<int64_t> shape;\n  auto &dims = proto_tensor_->shape().dims();\n  std::copy(dims.begin(), dims.end(), std::back_inserter(shape));\n  return shape;\n}\n\nTensor Instance::Get(const std::string &item_name) const {\n  if (proto_instance_ == nullptr) {\n    std::cout << \"proto instance cannot be nullptr\" << std::endl;\n    return Tensor(nullptr, nullptr);\n  }\n  auto &items = proto_instance_->items();\n  auto it = items.find(item_name);\n  if (it == items.end()) {\n    std::cout << \"Cannot find item name \" << item_name << std::endl;\n    return Tensor(nullptr, nullptr);\n  }\n  return Tensor(message_owner_, &it->second);\n}\n\nbool Instance::HasErrorMsg(int64_t *error_code, std::string *error_msg) const {\n  if (error_code == nullptr) {\n    return false;\n  }\n  if (error_msg == nullptr) {\n    return false;\n  }\n  if (error_msg_ == nullptr) {\n    return false;\n  }\n  *error_code = error_msg_->error_code();\n  *error_msg = error_msg_->error_msg();\n  return true;\n}\n\nMutableTensor MutableInstance::Add(const std::string &item_name) {\n  if (mutable_proto_instance_ == nullptr) {\n    std::cout << \"proto instance cannot be nullptr\" << std::endl;\n    return MutableTensor(nullptr, nullptr);\n  }\n  auto items = mutable_proto_instance_->mutable_items();\n  auto &proto_tensor = (*items)[item_name];\n  return MutableTensor(message_owner_, &proto_tensor);\n}\n\nInstancesRequest::InstancesRequest() { request_ = std::make_shared<proto::PredictRequest>(); }\n\nMutableInstance InstancesRequest::AddInstance() {\n  auto proto_instance = request_->add_instances();\n  return MutableInstance(request_, proto_instance);\n}\n\nInstancesReply::InstancesReply() { reply_ = std::make_shared<proto::PredictReply>(); }\n\nstd::vector<Instance> InstancesReply::GetResult() const {\n  std::vector<Instance> instances;\n  auto &proto_instances = reply_->instances();\n  auto &proto_error_msgs = reply_->error_msg();\n  for (int i = 0; i < proto_instances.size(); i++) {\n    auto &proto_instance = proto_instances[i];\n    const proto::ErrorMsg *error_msg = nullptr;\n    if (proto_error_msgs.size() == 1) {\n      error_msg = &proto_error_msgs[0];\n    } else if (proto_error_msgs.size() == proto_instances.size() && proto_error_msgs[i].error_code() != 0) {\n      error_msg = &proto_error_msgs[i];\n    }\n    instances.push_back(Instance(reply_, &proto_instance, error_msg));\n  }\n  return instances;\n}\n\nclass ClientImpl {\n public:\n  ClientImpl(const std::string &server_ip, uint64_t server_port) {\n    std::string target_str = server_ip + \":\" + std::to_string(server_port);\n    auto channel = grpc::CreateChannel(target_str, grpc::InsecureChannelCredentials());\n    stub_ = proto::MSService::NewStub(channel);\n  }\n  Status Predict(const proto::PredictRequest &request, proto::PredictReply *reply) {\n    if (reply == nullptr) {\n      return Status(SYSTEM_ERROR, \"ClientImpl::Predict input reply cannot be nullptr\");\n    }\n    grpc::ClientContext context;\n\n    // The actual RPC.\n    grpc::Status status = stub_->Predict(&context, request, reply);\n    if (status.ok()) {\n      return SUCCESS;\n    } else {\n      std::cout << status.error_code() << \": \" << status.error_message() << std::endl;\n      return Status(FAILED, status.error_message());\n    }\n  }\n\n private:\n  std::unique_ptr<proto::MSService::Stub> stub_;\n};\n\nClient::Client(const std::string &server_ip, uint64_t server_port, const std::string &servable_name,\n               const std::string &method_name, uint64_t version_number)\n    : server_ip_(server_ip),\n      server_port_(server_port),\n      servable_name_(servable_name),\n      method_name_(method_name),\n      version_number_(version_number),\n      impl_(std::make_shared<ClientImpl>(server_ip, server_port)) {}\n\nStatus Client::SendRequest(const InstancesRequest &request, InstancesReply *reply) {\n  if (reply == nullptr) {\n    return Status(SYSTEM_ERROR) << \"input reply cannot be nullptr\";\n  }\n  proto::PredictRequest *proto_request = request.request_.get();\n  proto::PredictReply *proto_reply = reply->reply_.get();\n  auto servable_spec = proto_request->mutable_servable_spec();\n  servable_spec->set_name(servable_name_);\n  servable_spec->set_method_name(method_name_);\n  servable_spec->set_version_number(version_number_);\n\n  Status result = impl_->Predict(*proto_request, proto_reply);\n  return result;\n}\n}  // namespace client\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "mindspore_serving/client/cpp/client.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_CLIENT_H\n#define MINDSPORE_SERVING_CLIENT_H\n\n#include <string>\n#include <vector>\n#include <memory>\n#include <sstream>\n\nnamespace google {\nnamespace protobuf {\nclass Message;\n}\n}  // namespace google\n\nnamespace mindspore {\nnamespace serving {\n#define MS_API __attribute__((visibility(\"default\")))\n\nnamespace proto {\nclass Tensor;\nclass Instance;\nclass PredictRequest;\nclass PredictReply;\nclass ErrorMsg;\n}  // namespace proto\n\nnamespace client {\n\nusing ProtoMsgOwner = std::shared_ptr<google::protobuf::Message>;\n\nenum DataType {\n  DT_UNKNOWN,\n  DT_UINT8,\n  DT_UINT16,\n  DT_UINT32,\n  DT_UINT64,\n  DT_INT8,\n  DT_INT16,\n  DT_INT32,\n  DT_INT64,\n  DT_BOOL,\n  DT_FLOAT16,\n  DT_FLOAT32,\n  DT_FLOAT64,\n  DT_STRING,\n  DT_BYTES,\n};\n\nenum StatusCode { SUCCESS = 0, FAILED, INVALID_INPUTS, SYSTEM_ERROR, UNAVAILABLE };\n\nclass MS_API Status {\n public:\n  Status() : status_code_(FAILED) {}\n  Status(enum StatusCode status_code, const std::string &status_msg = \"\")  // NOLINT(runtime/explicit)\n      : status_code_(status_code), status_msg_(status_msg) {}\n  bool IsSuccess() const { return status_code_ == SUCCESS; }\n  enum StatusCode StatusCode() const { return status_code_; }\n  std::string StatusMessage() { return status_msg_; }\n  bool operator==(const Status &other) const { return status_code_ == other.status_code_; }\n  bool operator==(enum StatusCode other_code) const { return status_code_ == other_code; }\n  bool operator!=(const Status &other) const { return status_code_ != other.status_code_; }\n  bool operator!=(enum StatusCode other_code) const { return status_code_ != other_code; }\n  operator bool() const = delete;\n\n  template <class T>\n  Status &operator<<(T val);\n  Status &operator<<(DataType val);\n  template <class T>\n  Status &operator<<(const std::vector<T> &val);\n\n private:\n  enum StatusCode status_code_;\n  std::string status_msg_;\n};\n\nclass MS_API Tensor {\n public:\n  Tensor(const ProtoMsgOwner &owner, const proto::Tensor *proto_tensor)\n      : message_owner_(owner), proto_tensor_(proto_tensor) {}\n  virtual ~Tensor() = default;\n  // Bytes type: for images etc.\n  Status GetBytesData(std::vector<uint8_t> *val) const;\n  Status GetStrData(std::string *val) const;\n  Status GetData(std::vector<uint8_t> *val) const;\n  Status GetData(std::vector<uint16_t> *val) const;\n  Status GetData(std::vector<uint32_t> *val) const;\n  Status GetData(std::vector<uint64_t> *val) const;\n  Status GetData(std::vector<int8_t> *val) const;\n  Status GetData(std::vector<int16_t> *val) const;\n  Status GetData(std::vector<int32_t> *val) const;\n  Status GetData(std::vector<int64_t> *val) const;\n  Status GetData(std::vector<bool> *val) const;\n  Status GetData(std::vector<float> *val) const;\n  Status GetData(std::vector<double> *val) const;\n  Status GetFp16Data(std::vector<uint16_t> *val) const;\n  DataType GetDataType() const;\n  std::vector<int64_t> GetShape() const;\n\n  bool IsValid() const { return proto_tensor_ != nullptr; }\n\n protected:\n  ProtoMsgOwner message_owner_;\n\n private:\n  const proto::Tensor *proto_tensor_;\n};\n\nclass MS_API MutableTensor : public Tensor {\n public:\n  MutableTensor(const ProtoMsgOwner &owner, proto::Tensor *proto_tensor)\n      : Tensor(owner, proto_tensor), mutable_proto_tensor_(proto_tensor) {}\n  ~MutableTensor() = default;\n\n  // Bytes type: for images etc.\n  Status SetBytesData(const std::vector<uint8_t> &val);\n  Status SetStrData(const std::string &val);\n\n  Status SetData(const std::vector<uint8_t> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<uint16_t> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<uint32_t> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<uint64_t> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<int8_t> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<int16_t> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<int32_t> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<int64_t> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<bool> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<float> &val, const std::vector<int64_t> &shape);\n  Status SetData(const std::vector<double> &val, const std::vector<int64_t> &shape);\n\n  Status SetData(const void *data, size_t data_bytes_len, const std::vector<int64_t> &shape, DataType data_type);\n\n private:\n  proto::Tensor *mutable_proto_tensor_;\n};\n\nclass MS_API Instance {\n public:\n  Instance(const ProtoMsgOwner &owner, const proto::Instance *proto_instance, const proto::ErrorMsg *error_msg)\n      : message_owner_(owner), proto_instance_(proto_instance), error_msg_(error_msg) {}\n  virtual ~Instance() = default;\n\n  Tensor Get(const std::string &item_name) const;\n\n  bool IsValid() const { return proto_instance_ != nullptr; }\n  bool HasErrorMsg(int64_t *error_code, std::string *error_msg) const;\n\n protected:\n  ProtoMsgOwner message_owner_;\n\n private:\n  const proto::Instance *proto_instance_;\n  const proto::ErrorMsg *error_msg_;\n};\n\nclass MS_API MutableInstance : public Instance {\n public:\n  MutableInstance(const ProtoMsgOwner &owner, proto::Instance *proto_instance)\n      : Instance(owner, proto_instance, nullptr), mutable_proto_instance_(proto_instance) {}\n  ~MutableInstance() = default;\n\n  MutableTensor Add(const std::string &item_name);\n\n private:\n  proto::Instance *mutable_proto_instance_;\n};\n\nclass MS_API InstancesRequest {\n public:\n  InstancesRequest();\n  ~InstancesRequest() = default;\n  MutableInstance AddInstance();\n\n private:\n  std::shared_ptr<proto::PredictRequest> request_ = nullptr;\n  friend class Client;\n};\n\nclass MS_API InstancesReply {\n public:\n  InstancesReply();\n  ~InstancesReply() = default;\n  std::vector<Instance> GetResult() const;\n\n private:\n  std::shared_ptr<proto::PredictReply> reply_ = nullptr;\n  friend class Client;\n};\n\nclass ClientImpl;\nclass MS_API Client {\n public:\n  Client(const std::string &server_ip, uint64_t server_port, const std::string &servable_name,\n         const std::string &method_name, uint64_t version_number = 0);\n  ~Client() = default;\n\n  Status SendRequest(const InstancesRequest &request, InstancesReply *reply);\n\n private:\n  std::string server_ip_;\n  uint64_t server_port_;\n  std::string servable_name_;\n  std::string method_name_;\n  uint64_t version_number_ = 0;\n  std::shared_ptr<ClientImpl> impl_;\n};\n\ntemplate <class T>\nStatus &Status::operator<<(T val) {\n  std::stringstream stringstream;\n  stringstream << val;\n  status_msg_ += stringstream.str();\n  return *this;\n}\n\ntemplate <class T>\nStatus &Status::operator<<(const std::vector<T> &val) {\n  operator<<(\"[\");\n  for (size_t i = 0; i < val.size(); i++) {\n    operator<<(val[i]);\n    if (i != val.size() - 1) {\n      operator<<(\", \");\n    }\n  }\n  operator<<(\"[\");\n  return *this;\n}\n\n}  // namespace client\n}  // namespace serving\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_CLIENT_H\n"
  },
  {
    "path": "mindspore_serving/client/python/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n"
  },
  {
    "path": "mindspore_serving/client/python/client.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MindSpore Serving Client\"\"\"\n\nimport grpc\nimport numpy as np\nimport mindspore_serving.proto.ms_service_pb2 as ms_service_pb2\nimport mindspore_serving.proto.ms_service_pb2_grpc as ms_service_pb2_grpc\n\n\ndef _create_tensor(data, tensor=None):\n    \"\"\"Create tensor from numpy data\"\"\"\n    if tensor is None:\n        tensor = ms_service_pb2.Tensor()\n\n    tensor.shape.dims.extend(data.shape)\n    dtype_map = {\n        np.bool: ms_service_pb2.MS_BOOL,\n        np.int8: ms_service_pb2.MS_INT8,\n        np.uint8: ms_service_pb2.MS_UINT8,\n        np.int16: ms_service_pb2.MS_INT16,\n        np.uint16: ms_service_pb2.MS_UINT16,\n        np.int32: ms_service_pb2.MS_INT32,\n        np.uint32: ms_service_pb2.MS_UINT32,\n\n        np.int64: ms_service_pb2.MS_INT64,\n        np.uint64: ms_service_pb2.MS_UINT64,\n        np.float16: ms_service_pb2.MS_FLOAT16,\n        np.float32: ms_service_pb2.MS_FLOAT32,\n        np.float64: ms_service_pb2.MS_FLOAT64,\n    }\n    for k, v in dtype_map.items():\n        if k == data.dtype:\n            tensor.dtype = v\n            break\n    if tensor.dtype == ms_service_pb2.MS_UNKNOWN:\n        raise RuntimeError(\"Unknown data type \" + str(data.dtype))\n    tensor.data = data.tobytes()\n    return tensor\n\n\ndef _create_scalar_tensor(vals, tensor=None):\n    \"\"\"Create tensor from scalar data\"\"\"\n    if not isinstance(vals, (tuple, list)):\n        vals = (vals,)\n    return _create_tensor(np.array(vals), tensor)\n\n\ndef _create_bytes_tensor(bytes_vals, tensor=None):\n    \"\"\"Create tensor from bytes data\"\"\"\n    if tensor is None:\n        tensor = ms_service_pb2.Tensor()\n\n    if not isinstance(bytes_vals, (tuple, list)):\n        bytes_vals = (bytes_vals,)\n    tensor.shape.dims.extend([len(bytes_vals)])\n    tensor.dtype = ms_service_pb2.MS_BYTES\n    for item in bytes_vals:\n        tensor.bytes_val.append(item)\n    return tensor\n\n\ndef _create_str_tensor(str_vals, tensor=None):\n    \"\"\"Create tensor from str data\"\"\"\n    if tensor is None:\n        tensor = ms_service_pb2.Tensor()\n\n    if not isinstance(str_vals, (tuple, list)):\n        str_vals = (str_vals,)\n    tensor.shape.dims.extend([len(str_vals)])\n    tensor.dtype = ms_service_pb2.MS_STRING\n    for item in str_vals:\n        tensor.bytes_val.append(bytes(item, encoding=\"utf8\"))\n    return tensor\n\n\ndef _create_numpy_from_tensor(tensor):\n    \"\"\"Create numpy from protobuf tensor\"\"\"\n    dtype_map = {\n        ms_service_pb2.MS_BOOL: np.bool,\n        ms_service_pb2.MS_INT8: np.int8,\n        ms_service_pb2.MS_UINT8: np.uint8,\n        ms_service_pb2.MS_INT16: np.int16,\n        ms_service_pb2.MS_UINT16: np.uint16,\n        ms_service_pb2.MS_INT32: np.int32,\n        ms_service_pb2.MS_UINT32: np.uint32,\n\n        ms_service_pb2.MS_INT64: np.int64,\n        ms_service_pb2.MS_UINT64: np.uint64,\n        ms_service_pb2.MS_FLOAT16: np.float16,\n        ms_service_pb2.MS_FLOAT32: np.float32,\n        ms_service_pb2.MS_FLOAT64: np.float64,\n    }\n    if tensor.dtype == ms_service_pb2.MS_STRING or tensor.dtype == ms_service_pb2.MS_BYTES:\n        result = []\n        for item in tensor.bytes_val:\n            if tensor.dtype == ms_service_pb2.MS_STRING:\n                result.append(bytes.decode(item))\n            else:\n                result.append(item)\n        if len(result) == 1:\n            return result[0]\n        return result\n\n    result = np.frombuffer(tensor.data, dtype_map[tensor.dtype]).reshape(tensor.shape.dims)\n    return result\n\n\ndef _check_str(arg_name, str_val):\n    \"\"\"Check whether the input parameters are reasonable str input\"\"\"\n    if not isinstance(str_val, str):\n        raise RuntimeError(f\"Parameter '{arg_name}' should be str, but actually {type(str_val)}\")\n    if not str_val:\n        raise RuntimeError(f\"Parameter '{arg_name}' should not be empty str\")\n\n\ndef _check_int(arg_name, int_val, minimum=None, maximum=None):\n    \"\"\"Check whether the input parameters are reasonable int input\"\"\"\n    if not isinstance(int_val, int):\n        raise RuntimeError(f\"Parameter '{arg_name}' should be int, but actually {type(int_val)}\")\n    if minimum is not None and int_val < minimum:\n        if maximum is not None:\n            raise RuntimeError(f\"Parameter '{arg_name}' should be in range [{minimum},{maximum}]\")\n        raise RuntimeError(f\"Parameter '{arg_name}' should be >= {minimum}\")\n    if maximum is not None and int_val > maximum:\n        if minimum is not None:\n            raise RuntimeError(f\"Parameter '{arg_name}' should be in range [{minimum},{maximum}]\")\n        raise RuntimeError(f\"Parameter '{arg_name}' should be <= {maximum}\")\n\n\nclass SSLConfig:\n    \"\"\"\n    The client's ssl_config encapsulates grpc's ssl channel credentials for SSL-enabled connections.\n\n    Args:\n        certificate (str, optional): File holding the PEM-encoded certificate chain as a byte string to use or\n            ``None`` if no certificate chain should be used. Default: ``None``.\n        private_key (str, optional): File holding the PEM-encoded private key as a byte string, or ``None``\n            if no private key should be used. Default: ``None``.\n        custom_ca (str, optional): File holding the PEM-encoded root certificates as a byte string, or ``None``\n            to retrieve them from a default location chosen by gRPC runtime. Default: ``None``.\n\n    Raises:\n        RuntimeError: The type or value of the parameters is invalid.\n\n    \"\"\"\n\n    def __init__(self, certificate=None, private_key=None, custom_ca=None):\n        if certificate is not None:\n            _check_str(\"certificate\", certificate)\n        if private_key is not None:\n            _check_str(\"private_key\", private_key)\n        if custom_ca is not None:\n            _check_str(\"custom_ca\", custom_ca)\n\n        self.certificate = certificate\n        self.private_key = private_key\n        self.custom_ca = custom_ca\n\n\nclass Client:\n    \"\"\"\n    The Client encapsulates the serving gRPC API, which can be used to create requests,\n    access serving, and parse results.\n\n    Note:\n        The maximum amount of data that the client can send in one request is 512MB, and the maximum amount of data that\n        the server can accept can be configured as 1~512MB, 100MB by default.\n\n    Args:\n        address (str): Serving address.\n        servable_name (str): The name of servable supplied by Serving.\n        method_name (str): The name of method supplied by servable.\n        version_number (int, optional): The version number of servable, ``0`` means the maximum version number in all\n            running versions. Default: ``0``.\n        ssl_config (mindspore_serving.client.SSLConfig, optional): The server's ssl_config, if ``None``, disabled ssl.\n            Default: ``None``.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid, or other errors happened.\n\n    Examples:\n        >>> from mindspore_serving.client import Client\n        >>> import numpy as np\n        >>> client = Client(\"localhost:5500\", \"add\", \"add_cast\")\n        >>> instances = []\n        >>> x1 = np.ones((2, 2), np.int32)\n        >>> x2 = np.ones((2, 2), np.int32)\n        >>> instances.append({\"x1\": x1, \"x2\": x2})\n        >>> result = client.infer(instances)\n        >>> print(result)\n    \"\"\"\n\n    def __init__(self, address, servable_name, method_name, version_number=0, ssl_config=None):\n        _check_str(\"address\", address)\n        _check_str(\"servable_name\", servable_name)\n        _check_str(\"method_name\", method_name)\n        _check_int(\"version_number\", version_number, 0)\n\n        self.address = address\n        self.servable_name = servable_name\n        self.method_name = method_name\n        self.version_number = version_number\n\n        msg_bytes_size = 512 * 1024 * 1024  # 512MB\n        options = [\n            ('grpc.max_send_message_length', msg_bytes_size),\n            ('grpc.max_receive_message_length', msg_bytes_size),\n        ]\n        if ssl_config is not None:\n            if not isinstance(ssl_config, SSLConfig):\n                raise RuntimeError(\"The type of ssl_config should be type of SSLConfig\")\n            rc_bytes = pk_bytes = c_bytes = None\n            if ssl_config.certificate is not None:\n                with open(ssl_config.certificate, 'rb') as c_fs:\n                    c_bytes = c_fs.read()\n            if ssl_config.private_key is not None:\n                with open(ssl_config.private_key, 'rb') as pk_fs:\n                    pk_bytes = pk_fs.read()\n            if ssl_config.custom_ca is not None:\n                with open(ssl_config.custom_ca, 'rb') as rc_fs:\n                    rc_bytes = rc_fs.read()\n            if (c_bytes is None and pk_bytes is not None) or (c_bytes is not None and pk_bytes is None):\n                raise RuntimeError(\"The certificate and private_key should be passed at the same time\")\n            creds = grpc.ssl_channel_credentials(root_certificates=rc_bytes,\n                                                 private_key=pk_bytes,\n                                                 certificate_chain=c_bytes)\n            self.channel = grpc.secure_channel(address, creds, options=options)\n        else:\n            self.channel = grpc.insecure_channel(address, options=options)\n\n        self.stub = ms_service_pb2_grpc.MSServiceStub(self.channel)\n\n    def infer(self, instances):\n        \"\"\"\n        Used to create requests, access serving service, and parse and return results.\n\n        Args:\n            instances (Union[dict, tuple[dict]]): Instance or tuple of instances,\n                every instance item is the inputs dict. The key is the input name,\n                and the value is the input value, the type of value can be python int,\n                float, bool, str, bytes, numpy number, or numpy array object.\n\n        Raises:\n            RuntimeError: The type or value of the parameters is invalid, or other errors happened.\n\n        Examples:\n            >>> from mindspore_serving.client import Client\n            >>> import numpy as np\n            >>> client = Client(\"localhost:5500\", \"add\", \"add_cast\")\n            >>> instances = []\n            >>> x1 = np.ones((2, 2), np.int32)\n            >>> x2 = np.ones((2, 2), np.int32)\n            >>> instances.append({\"x1\": x1, \"x2\": x2})\n            >>> result = client.infer(instances)\n            >>> print(result)\n        \"\"\"\n        request = self._create_request(instances)\n        try:\n            result = self.stub.Predict(request)\n            return self._paser_result(result)\n\n        except grpc.RpcError as e:\n            print(e.details())\n            status_code = e.code()\n            print(status_code.name)\n            print(status_code.value)\n            return {\"error\": f\"Grpc Error, {status_code.value}, {e.details()}\"}\n\n    def infer_async(self, instances):\n        \"\"\"\n        Used to create requests, async access serving.\n\n        Args:\n            instances (Union[dict, tuple[dict]]): Instance or tuple of instances, every instance item\n                is the inputs dict. The key is the input name, and the value is the input value, the\n                type of value can be python int, float, bool, str, bytes, numpy number,\n                or numpy array object.\n\n        Raises:\n            RuntimeError: The type or value of the parameters is invalid, or other errors happened.\n\n        Examples:\n            >>> from mindspore_serving.client import Client\n            >>> import numpy as np\n            >>> client = Client(\"localhost:5500\", \"add\", \"add_cast\")\n            >>> instances = []\n            >>> x1 = np.ones((2, 2), np.int32)\n            >>> x2 = np.ones((2, 2), np.int32)\n            >>> instances.append({\"x1\": x1, \"x2\": x2})\n            >>> result_future = client.infer_async(instances)\n            >>> result = result_future.result()\n            >>> print(result)\n        \"\"\"\n        request = self._create_request(instances)\n        try:\n            result_future = self.stub.Predict.future(request)\n            return ClientGrpcAsyncResult(result_future)\n\n        except grpc.RpcError as e:\n            print(e.details())\n            status_code = e.code()\n            print(status_code.name)\n            print(status_code.value)\n            return ClientGrpcAsyncError({\"error\": f\"Grpc Error, {status_code.value}, {e.details()}\"})\n\n    def _create_request(self, instances):\n        \"\"\"Used to create request spec.\"\"\"\n        if not isinstance(instances, (tuple, list)):\n            instances = (instances,)\n\n        request = ms_service_pb2.PredictRequest()\n        request.servable_spec.name = self.servable_name\n        request.servable_spec.method_name = self.method_name\n        request.servable_spec.version_number = self.version_number\n\n        for item in instances:\n            if isinstance(item, dict):\n                request.instances.append(self._create_instance(**item))\n            else:\n                raise RuntimeError(\"instance should be a map\")\n        return request\n\n    @staticmethod\n    def _create_instance(**kwargs):\n        \"\"\"Used to create gRPC instance.\"\"\"\n        instance = ms_service_pb2.Instance()\n        for k, w in kwargs.items():\n            tensor = instance.items[k]\n            if isinstance(w, (np.ndarray, np.number)):\n                _create_tensor(w, tensor)\n            elif isinstance(w, str):\n                _create_str_tensor(w, tensor)\n            elif isinstance(w, (bool, int, float)):\n                _create_scalar_tensor(w, tensor)\n            elif isinstance(w, bytes):\n                _create_bytes_tensor(w, tensor)\n            else:\n                raise RuntimeError(\"Not support value type \" + str(type(w)))\n        return instance\n\n    @staticmethod\n    def _paser_result(result):\n        \"\"\"Used to parse result.\"\"\"\n        error_msg_len = len(result.error_msg)\n        if error_msg_len == 1 and result.error_msg[0].error_code != 0:\n            return {\"error\": bytes.decode(result.error_msg[0].error_msg)}\n        ret_val = []\n        instance_len = len(result.instances)\n        if error_msg_len not in (0, instance_len):\n            raise RuntimeError(f\"error msg result size {error_msg_len} not be 0, 1 or \"\n                               f\"length of instances {instance_len}\")\n        for i in range(instance_len):\n            instance = result.instances[i]\n            if error_msg_len == 0 or result.error_msg[i].error_code == 0:\n                instance_map = {}\n                for k, w in instance.items.items():\n                    instance_map[k] = _create_numpy_from_tensor(w)\n                ret_val.append(instance_map)\n            else:\n                ret_val.append({\"error\": bytes.decode(result.error_msg[i].error_msg)})\n        return ret_val\n\n\nclass ClientGrpcAsyncResult:\n    \"\"\"\n    When Client.infer_async invoke successfully, a ClientGrpcAsyncResult object is returned.\n\n    Examples:\n        >>> from mindspore_serving.client import Client\n        >>> import numpy as np\n        >>> client = Client(\"localhost:5500\", \"add\", \"add_cast\")\n        >>> instances = []\n        >>> x1 = np.ones((2, 2), np.int32)\n        >>> x2 = np.ones((2, 2), np.int32)\n        >>> instances.append({\"x1\": x1, \"x2\": x2})\n        >>> result_future = client.infer_async(instances)\n        >>> result = result_future.result()\n        >>> print(result)\n    \"\"\"\n\n    def __init__(self, result_future):\n        self.result_future = result_future\n\n    def result(self):\n        \"\"\"Wait and get result of inference result, the gRPC message will be parse to tuple of instances result.\n        Every instance result is dict, and value could be numpy array/number, str or bytes according gRPC Tensor\n        data type.\n        \"\"\"\n        result = self.result_future.result()\n        # pylint: disable=protected-access\n        result = Client._paser_result(result)\n        return result\n\n\nclass ClientGrpcAsyncError:\n    \"\"\"When gRPC failed happened when calling Client.infer_async, a ClientGrpcAsyncError object is returned.\n    \"\"\"\n\n    def __init__(self, result_error):\n        self.result_error = result_error\n\n    def result(self):\n        \"\"\"Get gRPC error message.\n        \"\"\"\n        return self.result_error\n"
  },
  {
    "path": "mindspore_serving/log.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nlog module\n\"\"\"\nimport sys\nimport os\nimport stat\nimport time\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport traceback\nimport threading\nimport platform\n\nif platform.system() != \"Windows\":\n    import fcntl\n\n__all__ = ['get_level', 'get_log_config']\n\n# The lock for setting up the logger\n_setup_logger_lock = threading.Lock()\n\n# When getting the logger, Used to check whether\n# the logger already exists\n_global_logger = None\n\n# The flag for enable console output\n_std_on = '1'\n# The flag for disable console output\n_std_off = '0'\n# Rotating max bytes, default is 50M\n_logger_def_max_bytes = '52428800'\n# Rotating backup count, default is 30\n_logger_def_backup_count = '30'\n# The default log level\n_logger_def_level = '2'\n\n# Log level name and level mapping\n_name_to_level = {\n    'ERROR': 40,\n    'WARNING': 30,\n    'INFO': 20,\n    'DEBUG': 10,\n}\n\n# GLog level and level name\n_gloglevel_to_name = {\n    '3': 'ERROR',\n    '2': 'WARNING',\n    '1': 'INFO',\n    '0': 'DEBUG',\n}\n\n# The mapping of logger configurations to glog configurations\n_confmap_dict = {'level': 'GLOG_v', 'console': 'GLOG_logtostderr', 'filepath': 'GLOG_log_dir',\n                 'maxBytes': 'logger_maxBytes', 'backupCount': 'logger_backupCount',\n                 'stderr_level': 'GLOG_stderrthreshold'}\n\n\nclass _MultiCompatibleRotatingFileHandler(RotatingFileHandler):\n    \"\"\"Inherit RotatingFileHandler for multiprocess compatibility.\"\"\"\n\n    def rolling_rename(self):\n        \"\"\"Rolling rename log files and set permission of Log file\"\"\"\n        for i in range(self.backupCount - 1, 0, -1):\n            sfn = self.rotation_filename(\"%s.%d\" % (self.baseFilename, i))\n            dfn = self.rotation_filename(\"%s.%d\" % (self.baseFilename, i + 1))\n            if os.path.exists(sfn):\n                if os.path.exists(dfn):\n                    os.remove(dfn)\n                # Modify the permission of Log file\n                os.chmod(sfn, stat.S_IREAD)\n                os.rename(sfn, dfn)\n\n    def doRollover(self):\n        \"\"\"Override doRollover for multiprocess compatibility\n        and setting permission of Log file.\n        \"\"\"\n        if self.stream:\n            self.stream.close()\n            self.stream = None\n\n        # Attain an exclusive lock with blocking mode by `fcntl` module.\n        with open(self.baseFilename, 'a') as file_pointer:\n            if platform.system() != \"Windows\":\n                fcntl.lockf(file_pointer.fileno(), fcntl.LOCK_EX)\n\n        if self.backupCount > 0:\n            self.rolling_rename()\n\n        dfn = self.rotation_filename(self.baseFilename + \".1\")\n        if os.path.exists(dfn):\n            os.remove(dfn)\n        # Modify the permission of Log file\n        os.chmod(self.baseFilename, stat.S_IREAD)\n        self.rotate(self.baseFilename, dfn)\n\n        with open(self.baseFilename, 'a'):\n            # Modify the permission of Log file\n            os.chmod(self.baseFilename, stat.S_IREAD | stat.S_IWRITE)\n\n        if not self.delay:\n            self.stream = self._open()\n\n\nclass _DataFormatter(logging.Formatter):\n    \"\"\"Log formatter\"\"\"\n\n    def __init__(self, sub_module, fmt=None, **kwargs):\n        \"\"\"\n        Initialization of logFormatter.\n\n        Args:\n            sub_module (str): The submodule name.\n            fmt (str): Specified format pattern. Default: None.\n        \"\"\"\n        super(_DataFormatter, self).__init__(fmt=fmt, **kwargs)\n        self.sub_module = sub_module.upper()\n\n    def formatTime(self, record, datefmt=None):\n        \"\"\"\n        Override formatTime for uniform format %Y-%m-%d-%H:%M:%S.SSS.SSS\n\n        Args:\n            record (str): Log record.\n            datefmt (str): Date format.\n\n        Returns:\n            str, formatted timestamp.\n        \"\"\"\n        created_time = self.converter(record.created)\n        if datefmt:\n            return time.strftime(datefmt, created_time)\n\n        timestamp = time.strftime('%Y-%m-%d-%H:%M:%S', created_time)\n        msecs = str(round(record.msecs * 1000))\n        # Format the time stamp\n        return f'{timestamp}.{msecs[:3]}.{msecs[3:]}'\n\n    def format(self, record):\n        \"\"\"\n        Apply log format with specified pattern.\n\n        Args:\n            record (str): Format pattern.\n\n        Returns:\n            str, formatted log content according to format pattern.\n        \"\"\"\n        # NOTICE: when the Installation directory of mindspore changed,\n        # ms_home_path must be changed\n        ms_install_home_path = 'mindspore'\n        idx = record.pathname.rfind(ms_install_home_path)\n        if idx >= 0:\n            # Get the relative path of the file\n            record.filepath = record.pathname[idx:]\n        else:\n            record.filepath = record.pathname\n        record.sub_module = self.sub_module\n        return super().format(record)\n\n\ndef _get_logger():\n    \"\"\"\n    Get logger instance.\n\n    Returns:\n        Logger, a logger.\n    \"\"\"\n    if _global_logger:\n        return _global_logger\n\n    kwargs = _get_env_config()\n    _verify_config(kwargs)\n    logger = _setup_logger(_adapt_cfg(kwargs))\n    return logger\n\n\ndef _adapt_cfg(kwargs):\n    \"\"\"\n    Glog configurations converted to logger configurations.\n\n    Args:\n        kwargs (dict): The dictionary of log configurations.\n\n            - console (str): Whether to output log to stdout.\n            - level (str): Log level.\n            - filepath (str): The path for saving logs, if console is false, a file path must be assigned.\n            - maxBytes (str): The Maximum value of a log file for rotating, only valid if console is false.\n            - backupCount (str): The count of rotating backup log files, only valid if console is false.\n\n    Returns:\n        Dict, the input parameter dictionary.\n    \"\"\"\n    kwargs['level'] = _gloglevel_to_name.get(kwargs.get('level', _logger_def_level))\n    kwargs['stderr_level'] = _gloglevel_to_name.get(kwargs.get('stderr_level', _logger_def_level))\n    kwargs['console'] = not kwargs.get('console') == _std_off\n    kwargs['maxBytes'] = int(kwargs.get('maxBytes', _logger_def_max_bytes))\n    kwargs['backupCount'] = int(kwargs.get('backupCount', _logger_def_backup_count))\n    return kwargs\n\n\ndef info(msg, *args, **kwargs):\n    \"\"\"\n    Log a message with severity 'INFO' on the MindSpore logger.\n\n    Examples:\n        >>> from mindspore_serving import log as logger\n        >>> logger.info(\"The arg(%s) is: %r\", name, arg)\n    \"\"\"\n    _get_logger().info(msg, *args, **kwargs)\n\n\ndef debug(msg, *args, **kwargs):\n    \"\"\"\n    Log a message with severity 'DEBUG' on the MindSpore logger.\n\n    Examples:\n        >>> from mindspore_serving import log as logger\n        >>> logger.debug(\"The arg(%s) is: %r\", name, arg)\n    \"\"\"\n    _get_logger().debug(msg, *args, **kwargs)\n\n\ndef error(msg, *args, **kwargs):\n    \"\"\"Log a message with severity 'ERROR' on the MindSpore logger.\"\"\"\n    _get_logger().error(msg, *args, **kwargs)\n\n\ndef warning(msg, *args, **kwargs):\n    \"\"\"Log a message with severity 'WARNING' on the MindSpore logger.\"\"\"\n    _get_logger().warning(msg, *args, **kwargs)\n\n\ndef get_level():\n    \"\"\"\n    Get the logger level.\n\n    Returns:\n        str, the Log level includes 3(ERROR), 2(WARNING), 1(INFO), 0(DEBUG).\n\n    Examples:\n        >>> import os\n        >>> os.environ['GLOG_v'] = '0'\n        >>> from mindspore_serving import log as logger\n        >>> logger.get_level()\n    \"\"\"\n    # level and glog level mapping dictionary\n    level_to_glog_level = dict(zip(_name_to_level.values(), _gloglevel_to_name.keys()))\n\n    return level_to_glog_level.get(_get_logger().getEffectiveLevel())\n\n\ndef _get_formatter():\n    \"\"\"\n    Get the string of log formatter.\n\n    Returns:\n        str, the string of log formatter.\n    \"\"\"\n    formatter = '[%(levelname)s] %(sub_module)s(%(process)d:' \\\n                '%(thread)d,%(processName)s):%(asctime)s ' \\\n                '[%(filepath)s:%(lineno)d] %(message)s'\n    return formatter\n\n\ndef _get_env_config():\n    \"\"\"\n    Get configurations from environment variables.\n\n    Returns:\n        Dict, the dictionary of configurations.\n    \"\"\"\n    config_dict = {}\n    for key, env_value in _confmap_dict.items():\n        value = os.environ.get(env_value)\n        if value:\n            config_dict[key] = value.strip()\n    return config_dict\n\n\ndef _verify_config(kwargs):\n    \"\"\"\n    Verify log configurations.\n\n    Args:\n        kwargs (dict): The dictionary of log configurations.\n\n            - console (str): Whether to output log to stdout.\n            - level (str): Log level.\n            - filepath (str): The path for saving logs, if console is false, a file path must be assigned.\n            - maxBytes (str): The Maximum value of a log file for rotating, only valid if console is false.\n            - backupCount (str): The count of rotating backup log files, only valid if console is false.\n    \"\"\"\n    # Check the input value of level\n    level = kwargs.get('level', None)\n    if level is not None:\n        _verify_level(level)\n\n    # Check the input value of stderr_level\n    level = kwargs.get('stderr_level', None)\n    if level is not None:\n        _verify_level(level)\n\n    # Check the input value of console\n    console = kwargs.get('console', None)\n    file_path = kwargs.get('filepath', None)\n\n    if console is not None:\n        if not console.isdigit() or console not in (_std_off, _std_on):\n            raise ValueError(f'Incorrect value, The value of {_confmap_dict[\"console\"]} must be 0 or 1,'\n                             f' Output log to console, configure to 1.')\n\n        if console == _std_off and not file_path:\n            raise ValueError(f'When {_confmap_dict[\"console\"]} is set to 0, The directory of '\n                             f'saving log must be set, {_confmap_dict[\"filepath\"]} cannot be empty.')\n\n        # Check the input value of filepath\n        if console == _std_off and file_path is not None:\n            file_real_path = os.path.realpath(file_path)\n            if not os.path.exists(file_real_path):\n                raise ValueError(f'The file path does not exist. '\n                                 f'{_confmap_dict[\"filepath\"]}:{file_path}')\n\n        # Check the input value of maxBytes\n        max_bytes = kwargs.get('maxBytes', None)\n        if console == _std_off and max_bytes is not None:\n            if not max_bytes.isdigit():\n                raise ValueError(f'Incorrect value, The value of {_confmap_dict[\"maxBytes\"]} must be positive integer. '\n                                 f'{_confmap_dict[\"maxBytes\"]}:{max_bytes}')\n\n        # Check the input value of backupCount\n        backup_count = kwargs.get('backupCount', None)\n        if console == _std_off and backup_count is not None:\n            if not backup_count.isdigit():\n                raise ValueError(f'Incorrect value, The value of {_confmap_dict[\"backupCount\"]} must be positive '\n                                 f'integer. {_confmap_dict[\"backupCount\"]}:{backup_count}')\n\n\ndef _verify_level(level):\n    \"\"\"\n    Verify log level.\n\n    Args:\n        level (str): The log level.\n    \"\"\"\n    level_name = _gloglevel_to_name.get(level, None)\n\n    # Check the value of input level\n    if level_name not in _name_to_level:\n        raise ValueError(f'Incorrect log level:{level}, Please check the configuration of GLOG_v or '\n                         f'GLOG_stderrthreshold, desired log level :{_gloglevel_to_name}')\n\n\ndef get_log_config():\n    \"\"\"\n    Get logger configurations.\n\n    Returns:\n        Dict, the dictionary of logger configurations.\n\n    Examples:\n        >>> import os\n        >>> os.environ['GLOG_v'] = '1'\n        >>> os.environ['GLOG_logtostderr'] = '0'\n        >>> os.environ['GLOG_log_dir'] = '/var/log/mindspore'\n        >>> os.environ['logger_maxBytes'] = '5242880'\n        >>> os.environ['logger_backupCount'] = '10'\n        >>> from mindspore_serving import log as logger\n        >>> logger.get_log_config()\n    \"\"\"\n    logger = _get_logger()\n    handler = logger.handlers[0]\n    config_dict = {}\n    config_dict['GLOG_v'] = get_level()\n    config_dict['GLOG_logtostderr'] = _std_on\n\n    if handler.name == 'FileHandler':\n        config_dict['GLOG_logtostderr'] = _std_off\n        # Separating file path and name\n        file_path_and_name = os.path.split(handler.baseFilename)\n        config_dict['GLOG_log_dir'] = file_path_and_name[0]\n        config_dict['logger_maxBytes'] = handler.maxBytes\n        config_dict['logger_backupCount'] = handler.backupCount\n        handler_stderr = logger.handlers[1]\n        # level and glog level mapping dictionary\n        level_to_glog_level = dict(zip(_name_to_level.values(), _gloglevel_to_name.keys()))\n        config_dict['GLOG_stderrthreshold'] = level_to_glog_level.get(handler_stderr.level)\n    return config_dict\n\n\ndef _clear_handler(logger):\n    \"\"\"Clear the handlers that has been set, avoid repeated loading\"\"\"\n    for handler in logger.handlers:\n        logger.removeHandler(handler)\n\n\ndef _find_caller(stack_info=False, _=1):\n    \"\"\"\n    Find the stack frame of the caller.\n\n    Override findCaller on the logger, Support for getting log record.\n    Find the stack frame of the caller so that we can note the source\n    file name, function name and line number.\n\n    Args:\n        stack_info (bool): If the value is true, print stack information to the log. Default: False.\n\n    Returns:\n        tuple, the tuple of the frame data.\n    \"\"\"\n    # pylint: disable=protected-access\n    f = sys._getframe(3)\n    sinfo = None\n    # log_file is used to check caller stack frame\n    log_file = os.path.normcase(f.f_code.co_filename)\n    f = f.f_back\n    rv = \"(unknown file)\", 0, \"(unknown function)\", None\n    while f:\n        co = f.f_code\n        filename = os.path.normcase(co.co_filename)\n        if log_file == filename:\n            f = f.f_back\n            continue\n        if stack_info:\n            sinfo = _get_stack_info(f)\n        rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n        break\n    return rv\n\n\ndef _get_stack_info(frame):\n    \"\"\"\n    Get the stack information.\n\n    Args:\n        frame(frame): the frame requiring information.\n\n    Returns:\n        str, the string of the stack information.\n    \"\"\"\n    sinfo = None\n    stack_prefix = 'Stack (most recent call last):\\n'\n    sinfo = stack_prefix + \"\".join(traceback.format_stack(frame))\n    return sinfo\n\n\ndef _setup_logger(kwargs):\n    \"\"\"\n    Set up the logger.\n\n    Args:\n        kwargs (dict): The dictionary of log configurations.\n\n            - console (bool): Whether to output log to stdout. Default: True.\n            - level (str): Log level. Default: WARNING.\n            - filepath (str): The path for saving logs, if console is false, a file path must be assigned.\n            - maxBytes (int): The Maximum value of a log file for rotating, only valid if console is false.\n              Default: 52428800.\n            - backupCount (int): The count of rotating backup log files, only valid if console is false. Default: 30.\n\n    Returns:\n        Logger, well-configured logger.\n    \"\"\"\n\n    # The name of Submodule\n    sub_module = 'SERVING'\n    # The name of Base log file\n    pid = str(os.getpid())\n    log_name = 'mindspore_serving.log.' + pid\n\n    global _global_logger\n\n    _setup_logger_lock.acquire()\n    try:\n        if _global_logger:\n            return _global_logger\n\n        logger = logging.getLogger(name=f'{sub_module}.{log_name}')\n        # Override findCaller on the logger, Support for getting log record\n        logger.findCaller = _find_caller\n        console = kwargs.get('console', True)\n        # Set log level\n        logger.setLevel(kwargs.get('level', logging.WARNING))\n        # Set \"propagate\" attribute to False, stop searching up the hierarchy,\n        # avoid to load the handler of the root logger\n        logger.propagate = False\n        # Get the formatter for handler\n        formatter = _get_formatter()\n\n        # Clean up handle to avoid repeated loading\n        _clear_handler(logger)\n\n        # Set streamhandler for the console appender\n        if console:\n            console_handler = logging.StreamHandler(sys.stderr)\n            console_handler.name = 'StreamHandler'\n            console_handler.formatter = _DataFormatter(sub_module, formatter)\n            logger.addHandler(console_handler)\n\n        # Set rotatingFileHandler for the file appender\n        else:\n            # filepath cannot be null, checked in function _verify_config ()\n            logfile_dir = os.path.realpath(kwargs.get('filepath'))\n            file_name = f'{logfile_dir}/{log_name}'\n            logfile_handler = _MultiCompatibleRotatingFileHandler(\n                filename=file_name,\n                # Rotating max bytes, default is 50M\n                maxBytes=kwargs.get('maxBytes', _logger_def_max_bytes),\n                # Rotating backup count, default is 30\n                backupCount=kwargs.get('backupCount', _logger_def_backup_count),\n                encoding='utf8'\n            )\n            logfile_handler.name = 'FileHandler'\n            logfile_handler.formatter = _DataFormatter(sub_module, formatter)\n            logger.addHandler(logfile_handler)\n\n            # Write the file and output warning and error logs to stderr\n            console_handler = logging.StreamHandler(sys.stderr)\n            console_handler.name = 'StreamHandler'\n            console_handler.formatter = _DataFormatter(sub_module, formatter)\n            console_handler.setLevel(kwargs.get('stderr_level', logging.WARNING))\n            logger.addHandler(console_handler)\n\n        _global_logger = logger\n\n    finally:\n        _setup_logger_lock.release()\n    return _global_logger\n"
  },
  {
    "path": "mindspore_serving/proto/ms_agent.proto",
    "content": "/**\n * Copyright 2019 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// ms_manager.proto\nsyntax = \"proto3\";\n\npackage mindspore.serving.proto;\nimport \"mindspore_serving/proto/ms_service.proto\";\n\nmessage DistributedPredictRequest {\n  repeated Tensor inputs = 1;\n  bool return_result = 2;\n  int64 subgraph = 3;\n}\n\nmessage DistributedPredictReply {\n  repeated Tensor outputs = 1;\n  ErrorMsg error_msg = 2;\n}\n\nmessage DistributedExitRequest {\n  string address = 1;\n}\n\nmessage DistributedExitReply {\n  ErrorMsg error_msg = 1;\n}\n\nservice MSAgent {\n  rpc Predict(DistributedPredictRequest) returns (DistributedPredictReply) {}\n  rpc Exit(DistributedExitRequest) returns (DistributedExitReply) {}\n  rpc Ping(PingRequest) returns (PingReply) {}\n  rpc Pong(PongRequest) returns (PongReply) {}\n}\n"
  },
  {
    "path": "mindspore_serving/proto/ms_distributed.proto",
    "content": "/**\n * Copyright 2019 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// ms_manager.proto\nsyntax = \"proto3\";\n\npackage mindspore.serving.proto;\nimport \"mindspore_serving/proto/ms_service.proto\";\n\nmessage AgentSpec {\n  int64 rank_id = 1;\n  int64 batch_size = 2;\n  repeated TensorInfo inputs = 3;\n  repeated TensorInfo outputs = 4;\n}\n\nmessage CommonModelMeta {\n  string servable_name = 1;\n  string model_key = 2;\n  bool with_batch_dim = 3;\n  repeated int64 without_batch_dim_inputs = 4;\n  map<uint64, int64> inputs_count = 5;\n  map<uint64, int64> outputs_count = 6;\n}\n\nmessage DistributedModelMeta {\n  int64 rank_size = 1;\n  int64 stage_size = 2;\n}\n\nmessage AgentRegisterRequest {\n  repeated AgentSpec agent_spec = 1;\n  string address = 2;\n}\n\nmessage AgentRegisterReply {\n  ErrorMsg error_msg = 1;\n}\n\nmessage AgentExitRequest {\n  oneof address_choice {\n    string address = 1; // by agent process\n    string agent_ip = 2; // by agent start up process\n  }\n}\n\nmessage AgentExitReply {\n  ErrorMsg error_msg = 1;\n}\n\nmessage AgentFailedRequest {\n}\n\nmessage AgentFailedReply {\n  ErrorMsg error_msg = 1;\n}\n\nmessage AgentConfigAcquireRequest {\n}\n\nmessage AgentConfigAcquireReply {\n  message OneRankConfig {\n    string ip = 1;\n    int64 device_id = 2;\n  }\n  string rank_table_content = 1;\n  repeated OneRankConfig rank_list = 2;\n  CommonModelMeta common_meta = 3;\n  DistributedModelMeta distributed_meta = 4;\n}"
  },
  {
    "path": "mindspore_serving/proto/ms_master.proto",
    "content": "/**\n * Copyright 2019 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// ms_manager.proto\nsyntax = \"proto3\";\n\npackage mindspore.serving.proto;\nimport \"mindspore_serving/proto/ms_service.proto\";\n\nservice MSMaster {\n  rpc Register(RegisterRequest) returns (RegisterReply) {}\n  rpc Exit(ExitRequest) returns (ExitReply) {}\n  rpc NotifyFailed(NotifyFailedRequest) returns (NotifyFailedReply) {}\n  rpc CallModel(PredictRequest) returns (PredictReply) {}\n  rpc GetModelInfo(GetModelInfoRequest) returns (GetModelInfoReply) {}\n}\n\nmessage ServableRegSpec {\n  string name = 1;\n  uint64 version_number = 2;\n  uint64 batch_size = 4;\n  message MethodInfo{\n    string name = 1;\n    repeated string input_names = 2;\n    bool only_model_stage = 3;\n  }\n  repeated MethodInfo methods = 5;\n  ModelInfos model_infos = 6; // model key,\n  bool own_device = 7;\n}\n\nmessage WorkerRegSpec {\n  uint64 worker_pid = 1;\n  string address = 2;\n  ServableRegSpec servable_spec = 4;\n}\n\nmessage RegisterRequest {\n  WorkerRegSpec worker_spec = 1;\n}\n\nmessage RegisterReply {\n  ErrorMsg error_msg = 1;\n}\n\nmessage ExitRequest {\n  string address = 1;\n}\n\nmessage ExitReply {\n  ErrorMsg error_msg = 1;\n}\n\nmessage NotifyFailedRequest {\n  uint64 worker_pid = 1;\n  string error_msg = 2;\n}\n\nmessage NotifyFailedReply {\n\n}\n\n\nmessage GetModelInfoRequest {\n  string servable_name = 1;\n  uint32 version_number = 2;\n}\n\nmessage ModelSubGraphInfo {\n  repeated TensorInfo inputs = 3;\n  repeated TensorInfo outputs = 4;\n}\nmessage ModelInfo {\n  uint64 batch_size = 2;\n  repeated ModelSubGraphInfo subgraph_infos = 1;\n}\n\nmessage ModelInfos {\n  map<string, ModelInfo> model_infos = 1; // model key,\n}\n\nmessage GetModelInfoReply {\n  string servable_name = 1;\n  uint32 version_number = 2;\n  ModelInfos model_infos = 3;\n  ErrorMsg error_msg = 4;\n}\n"
  },
  {
    "path": "mindspore_serving/proto/ms_service.proto",
    "content": "/**\n * Copyright 2019 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// ms_service.proto\nsyntax = \"proto3\";\n\npackage mindspore.serving.proto;\n\nservice MSService {\n  rpc Predict(PredictRequest) returns (PredictReply) {}\n\n}\n\nmessage PredictRequest {\n  ServableSpec servable_spec = 1;\n  repeated Instance instances = 2;\n}\n\nmessage ErrorMsg{\n  int64 error_code = 1; // 0 is valid, otherwise invalid\n  bytes error_msg = 2;\n}\n\nmessage PredictReply {\n  ServableSpec servable_spec = 1;\n  repeated Instance instances = 3;\n  // size 0: OK, 1: for all batch, >1: for every batch\n  repeated ErrorMsg error_msg = 4;\n}\n\nmessage Instance{\n  map<string, Tensor> items = 1;\n  map<string, ShmTensorData> output_buffers = 2;\n}\n\nenum DataType {\n  MS_UNKNOWN = 0;\n  MS_BOOL = 1;\n  MS_INT8 = 2;\n  MS_UINT8 = 3;\n  MS_INT16 = 4;\n  MS_UINT16 = 5;\n  MS_INT32 = 6;\n  MS_UINT32 = 7;\n  MS_INT64 = 8;\n  MS_UINT64 = 9;\n  MS_FLOAT16 = 10;\n  MS_FLOAT32 = 11;\n  MS_FLOAT64 = 12;\n  MS_STRING = 13; // for string model input\n  MS_BYTES = 14;  // for images\n}\n\nmessage TensorShape {\n  repeated int64 dims = 1;\n};\n\nmessage ShmTensorData {\n  string memory_key = 1;\n  uint64 bytes_size = 2; // the total shared memory size\n  uint64 data_offset = 3;\n  uint64 data_size = 4;\n}\n\nmessage Tensor {\n  // tensor shape info\n  TensorShape shape = 1;\n\n  // tensor content data type\n  DataType dtype = 2;\n\n  // tensor data\n  oneof tensor_data {\n    bytes data = 3;\n    ShmTensorData shm_data = 5;\n  }\n\n  // for string type and images, the dtype is MS_BYTES.\n  repeated bytes bytes_val = 4;\n}\n\nmessage ServableSpec {\n  // servable name\n  string name = 1;\n\n  // optional. If unspecified, the latest version servable will be used.\n  uint64 version_number = 3;\n\n  // Specifies the method name in the servable.\n  string method_name = 2;\n}\n\nmessage PingRequest {\n  string address = 1;\n}\n\nmessage PingReply {\n  string address = 1;\n}\nmessage PongRequest {\n  string address = 1;\n}\n\nmessage PongReply {\n  string address = 1;\n}\n\nmessage TensorInfo {\n  TensorShape shape = 1; // tensor shape info\n  DataType dtype = 2; // tensor content data type\n  uint64 size = 3;\n  bool is_no_batch_dim = 4;\n}\n"
  },
  {
    "path": "mindspore_serving/proto/ms_worker.proto",
    "content": "/**\n * Copyright 2019 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// ms_manager.proto\nsyntax = \"proto3\";\n\npackage mindspore.serving.proto;\nimport \"mindspore_serving/proto/ms_service.proto\";\nimport \"mindspore_serving/proto/ms_master.proto\";\nimport \"mindspore_serving/proto/ms_distributed.proto\";\n\nservice MSWorker {\n  // for master\n  rpc Predict(PredictRequest) returns (PredictReply) {}\n  rpc Exit(ExitRequest) returns (ExitReply) {}\n}\n\nservice MSDistributedWorker {\n  // for worker agent\n  rpc AgentExit(AgentExitRequest) returns (AgentExitReply) {}\n  rpc AgentRegister(AgentRegisterRequest) returns (AgentRegisterReply) {}\n  rpc AgentFailed(AgentFailedRequest) returns (AgentFailedReply) {}\n  rpc AgentConfigAcquire(AgentConfigAcquireRequest) returns (AgentConfigAcquireReply) {}\n  rpc Ping(PingRequest) returns (PingReply) {}\n  rpc Pong(PongRequest) returns (PongReply) {}\n}\n"
  },
  {
    "path": "mindspore_serving/server/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nMindSpore Serving is a lightweight and high-performance service module that helps MindSpore developers efficiently\ndeploy online inference services in the production environment.\n\nMindSpore Serving server API, which can be used to start servables, gRPC and RESTful server. A servable corresponds to\nthe service provided by a model. The client sends inference tasks and receives inference results through gRPC and\nRESTful server.\n\"\"\"\n\nfrom .master import start_grpc_server, start_restful_server, stop, SSLConfig\nfrom ._server import start_servables, ServableStartConfig\nfrom . import register\nfrom . import distributed\n\n__all__ = []\n__all__.extend([\n    \"start_grpc_server\",\n    \"start_restful_server\",\n    \"stop\",\n    \"start_servables\",\n    'ServableStartConfig',\n    \"SSLConfig\"\n])\n\n__all__.extend(register.__all__)\n__all__.extend(distributed.__all__)\n"
  },
  {
    "path": "mindspore_serving/server/_servable_common.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Interface for start up servable\"\"\"\n\nimport os\nimport time\nimport threading\nimport signal\nimport psutil\n\nimport mindspore_serving.log as logger\nfrom mindspore_serving._mindspore_serving import WorkerContext_\n\n\nclass ServableContextDataBase:\n    \"\"\"Used to startup servable process\"\"\"\n\n    def __init__(self):\n        pass\n\n    @property\n    def servable_name(self):\n        raise NotImplementedError\n\n    @property\n    def version_number(self):\n        raise NotImplementedError\n\n    def to_string(self):\n        \"\"\"For logging\"\"\"\n        raise NotImplementedError\n\n    def new_worker_process(self):\n        \"\"\"Start worker process to provide servable\"\"\"\n        raise NotImplementedError\n\n    def can_restart(self):\n        \"\"\"Whether the worker can restart\"\"\"\n        return True\n\n    def own_device(self):\n        \"\"\"Whether the worker occupy device\"\"\"\n        return True\n\n\nclass WorkerContext:\n    \"\"\"Used to monitor and manage workers\"\"\"\n\n    def __init__(self, context_data, master_address, sub_process):\n        if not isinstance(context_data, ServableContextDataBase):\n            raise RuntimeError(f\"Parameter '{context_data}' should be instance of ServableReprInfo, \"\n                               f\"but actually {type(context_data)}\")\n        self.context_data_ = context_data\n        self.master_address_ = master_address\n        self.sub_process_ = sub_process\n        self.last_not_alive_time_ = None\n        self.is_in_process_switching_ = False\n        self.context = WorkerContext_.init_worker(context_data.servable_name, context_data.version_number,\n                                                  context_data.to_string(), sub_process.pid)\n\n    @property\n    def servable_name(self):\n        return self.context_data_.servable_name\n\n    @property\n    def worker_pid(self):\n        return self.sub_process_.pid\n\n    @property\n    def master_address(self):\n        return self.master_address_\n\n    def to_string(self):\n        \"\"\"For logging\"\"\"\n        return f\"{self.context_data_.to_string()}, pid: {self.worker_pid}\"\n\n    @property\n    def is_in_process_switching(self):\n        return self.is_in_process_switching_\n\n    def own_device(self):\n        return self.context_data_.own_device()\n\n    def ready(self):\n        \"\"\"Is worker ready to provide service\"\"\"\n        return self.context.ready()\n\n    def print_status(self):\n        \"\"\"DEBUG, used to print worker status\"\"\"\n        self.context.print_status()\n\n    def is_in_starting(self):\n        \"\"\"Whether the worker is in the process of startup\"\"\"\n        return self.context.is_in_starting()\n\n    def has_error_notified(self):\n        \"\"\"Whether error is reported by worker process during startup\"\"\"\n        return self.context.has_error_notified()  # Error message of worker notifying master\n\n    def get_notified_error(self):\n        return self.context.get_notified_error()\n\n    def has_exit_notified(self):\n        \"\"\"Whether exit is reported by worker process\"\"\"\n        return self.context.has_exit_notified()  # Exit message of worker notifying master\n\n    def can_be_restart(self):\n        \"\"\"Whether can restart the worker process\"\"\"\n        if not self.context_data_.can_restart():\n            return False\n        normal_handled_count = self.context.normal_handled_count\n        return normal_handled_count > 0\n\n    def exit_for_enough_time(self):\n        \"\"\" whether has exited for 1s, wait 1s for worker exit or error message\"\"\"\n        return self.last_not_alive_time_ and (time.time() - self.last_not_alive_time_ > 1)\n\n    def is_alive(self):\n        \"\"\"Whether the worker process is alive\"\"\"\n        alive = (self.sub_process_.poll() is None)\n        if not alive:\n            if not self.last_not_alive_time_:\n                self.context.notify_not_alive()\n                self.last_not_alive_time_ = time.time()\n        else:\n            self.last_not_alive_time_ = None\n        return alive\n\n    def is_unavailable(self):\n        \"\"\"Whether the working process can link and provide services\"\"\"\n        if self.is_in_process_switching:  # restart: shutdown and start worker\n            return False\n        if self.is_in_starting():  # start worker\n            return False\n        return self.context.is_unavailable\n\n    def update_worker_process(self, new_sub_process):\n        \"\"\"Update worker process pid\"\"\"\n        self.context.update_worker_pid(new_sub_process.pid)\n        self.sub_process_ = new_sub_process\n        self.last_not_alive_time_ = None\n\n    def _terminate(self):\n        self.sub_process_.terminate()\n\n    def _shutdown_worker(self):\n        \"\"\"Shutdown worker process\"\"\"\n        if not self.is_alive():\n            return\n        self._terminate()\n        for _ in range(100):  # 10s\n            if not self.is_alive():\n                return\n            time.sleep(0.1)\n        self.send_exit_signal(signal.SIGKILL)\n        self.context.notify_not_alive()\n\n    def _restart_worker(self):\n        \"\"\"Restart worker process\"\"\"\n        logger.info(f\"restart worker, {self.to_string()}\")\n        self._shutdown_worker()\n        try:\n            new_sub_process = self.context_data_.new_worker_process()\n        except RuntimeError as e:\n            logger.error(f\"Start worker failed: {e}\")\n            self.context.notify_start_failed(f\"Start worker failed: {e}\")\n            return\n        self.update_worker_process(new_sub_process)\n\n    def shutdown_worker(self):\n        \"\"\"Shutdown worker process in thread\"\"\"\n        self.handle_worker_process(self._shutdown_worker)\n\n    def restart_worker(self):\n        \"\"\"Restart worker process in thread\"\"\"\n        self.handle_worker_process(self._restart_worker)\n\n    def handle_worker_process(self, thread_fun):\n        \"\"\"Used to do something in thread\"\"\"\n        self.is_in_process_switching_ = True\n\n        def handle_thread():\n            thread_fun()\n            self.is_in_process_switching_ = False\n\n        thread = threading.Thread(target=handle_thread)\n        thread.start()\n\n    def send_exit_signal(self, sig):\n        \"\"\"Send signal to worker process, used to exit the worker process\"\"\"\n        if not self.is_alive():\n            return\n        logger.warning(f\"Send signal {sig} to worker, {self.to_string()}\")\n        try:\n            child_process = psutil.Process(self.worker_pid)\n            if not child_process.is_running():\n                return\n            children_of_child = child_process.children(recursive=True)\n            for item in children_of_child:\n                os.kill(item.pid, sig)\n            self.sub_process_.send_signal(sig)\n        except psutil.NoSuchProcess:\n            return\n        except Exception as e:  # pylint: disable=broad-except\n            logger.warning(f\"Get exception when send signal {sig} to worker, {self.to_string()}, \"\n                           f\"exception: {e}\")\n"
  },
  {
    "path": "mindspore_serving/server/_servable_local.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Interface for start up single core servable\"\"\"\nimport os\nimport random\n\nimport sys\nimport subprocess\n\nfrom mindspore_serving import log as logger\nfrom mindspore_serving.server.common import check_type, get_abs_path\nfrom mindspore_serving.server.worker import get_newest_version_number\nfrom mindspore_serving.server._servable_common import ServableContextDataBase\n\n\ndef _get_device_type(target_device_type, enable_lite):\n    \"\"\"Get device type supported, this will load libmindspore.so or libmindspore-lite.so\"\"\"\n    # Get Device type: Ascend, Gpu, Cpu\n    args = f\"{sys.executable} -c 'from mindspore_serving._mindspore_serving import Worker_;\" \\\n           f\"device_type=Worker_.get_device_type(\\\"{target_device_type}\\\", {enable_lite});\" \\\n           f\"print(\\\"#get_device_type_result=\\\", device_type, \\\"#\\\", sep=\\\"\\\")'\"\n    process = subprocess.Popen(args=args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    process.wait()\n    result = process.stdout.read().decode(\"utf-8\")\n    prefix = \"#get_device_type_result=\"\n    index = result.find(prefix)\n    if index < 0:\n        raise RuntimeError(f\"Failed to get device type\")\n    index += len(prefix)\n    end_index = result.find(\"#\", index)\n    device_type = result[index:end_index]\n    return device_type\n\n\ndef _all_reuse_device():\n    \"\"\"Get device type supported, this will load libmindspore.so or libmindspore-lite.so\"\"\"\n    # Whether allow reuse device, for Ascend910 return False, other return True\n    args = f\"{sys.executable} -c 'from mindspore_serving._mindspore_serving import Worker_;\" \\\n           f\"reuse_flag=Worker_.support_reuse_device();\" \\\n           f\"print(\\\"#get_reuse_flag_result=\\\", reuse_flag, \\\"#\\\", sep=\\\"\\\")'\"\n    process = subprocess.Popen(args=args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    process.wait()\n    result = process.stdout.read().decode(\"utf-8\")\n    prefix = \"#get_reuse_flag_result=\"\n    index = result.find(prefix)\n    if index < 0:\n        raise RuntimeError(f\"Failed to get device type\")\n    index += len(prefix)\n    end_index = result.find(\"#\", index)\n    # pylint: disable=simplifiable-if-expression\n    reuse_flag = True if result[index:end_index] == 'True' else False\n    return reuse_flag\n\n\nclass ServableStartConfig:\n    r\"\"\"\n    Servable startup configuration.\n\n    For more detail, please refer to\n    `MindSpore-based Inference Service Deployment <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_example.html>`_ and\n    `Servable Provided Through Model Configuration <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html>`_.\n\n    Args:\n        servable_directory (str): The directory where the servable is located in. There expects to has a directory\n            named `servable_name`.\n        servable_name (str): The servable name.\n        device_ids (Union[int, list[int], tuple[int]], optional): The device list the model loads into and runs in.\n            Used when device type is Nvidia GPU, Ascend 310P/910. Default None.\n        version_number (int, optional): Servable version number to be loaded. The version number should be a positive\n            integer, starting from 1, and 0 means to load the latest version. Default: 0.\n        device_type (str, optional): Target device type for model deployment. Currently supports \"Ascend\", \"GPU\", \"CPU\"\n            and None. Default: None.\n\n            - \"Ascend\": the platform expected to be Ascend 310P/910, etc.\n            - \"GPU\": the platform expected to be Nvidia GPU.\n            - \"CPU\": the platform expected to be CPU.\n            - None: the platform is determined by the MindSpore environment.\n\n        num_parallel_workers (int, optional): The number of processes that process python tasks, at least the number\n            of device cards used specified by the parameter device_ids. It will be adjusted to the number of device\n            cards when it is less than the number of device cards. The value should be in range [0,64]. Default: 0.\n        dec_key (bytes, optional): Byte type key used for decryption. The valid length is 16, 24, or 32. Default: None.\n        dec_mode (str, optional): Specifies the decryption mode, take effect when dec_key is set.\n            Option: 'AES-GCM' or 'AES-CBC'. Default: 'AES-GCM'.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid.\n    \"\"\"\n\n    def __init__(self, servable_directory, servable_name, device_ids=None, version_number=0, device_type=None,\n                 num_parallel_workers=0, dec_key=None, dec_mode='AES-GCM'):\n        super(ServableStartConfig, self).__init__()\n        check_type.check_str(\"servable_directory\", servable_directory)\n        logger.info(f\"input servable directory: {servable_directory}\")\n        servable_directory = get_abs_path(servable_directory)\n        logger.info(f\"absolute servable directory: {servable_directory}\")\n\n        check_type.check_str(\"servable_name\", servable_name)\n        check_type.check_int(\"version_number\", version_number, 0)\n        check_type.check_int(\"num_parallel_workers\", num_parallel_workers, 0, 64)\n        if dec_key is not None:\n            if not isinstance(dec_key, bytes):\n                raise RuntimeError(f\"Parameter 'dec_key' should be bytes, but actually {type(dec_key)}\")\n            if not dec_key:\n                raise RuntimeError(f\"Parameter 'dec_key' should not be empty bytes\")\n            if len(dec_key) not in (16, 24, 32):\n                raise RuntimeError(f\"Parameter 'dec_key' length {len(dec_key)} expected to be 16, 24 or 32\")\n        check_type.check_str(\"dec_mode\", dec_mode)\n        if dec_mode not in ('AES-GCM', 'AES-CBC'):\n            raise RuntimeError(f\"Parameter 'dec_mode' expected to be 'AES-GCM' or 'AES-CBC'\")\n\n        self.servable_directory_ = servable_directory\n        self.servable_name_ = servable_name\n        self.version_number_ = version_number\n\n        if device_ids is None:\n            device_ids = []\n        device_ids = check_type.check_and_as_int_tuple_list(\"device_ids\", device_ids, 0)\n\n        if device_type is not None:\n            check_type.check_str(\"device_type\", device_type)\n        else:\n            device_type = \"None\"\n\n        if device_type.lower() != \"none\":\n            if device_type.lower() not in (\"ascend\", \"gpu\", \"cpu\"):\n                raise RuntimeError(f\"Unsupported device type '{device_type}', only support 'Ascend', 'GPU', 'CPU' \"\n                                   f\"and None, case ignored\")\n        # else device_type is None\n        # if device_ids is empty, and there are models declared, Cpu target should be support\n        # if device_ids is not empty, and there are no models declared, use no device resources\n        # if device_ids is not empty, and there are models declared, final device_type depend on inference package\n        self.device_ids_ = device_ids\n        if not device_ids and not num_parallel_workers:\n            self.num_parallel_workers_ = 1\n        else:\n            self.num_parallel_workers_ = num_parallel_workers\n        self.device_type_ = device_type.lower()\n        self.dec_key_ = dec_key\n        self.dec_mode_ = dec_mode\n\n    @property\n    def servable_directory(self):\n        return self.servable_directory_\n\n    @property\n    def servable_name(self):\n        return self.servable_name_\n\n    @property\n    def version_number(self):\n        return self.version_number_\n\n    @property\n    def device_type(self):\n        return self.device_type_\n\n    @property\n    def device_ids(self):\n        return self.device_ids_\n\n    @property\n    def dec_key(self):\n        return self.dec_key_\n\n    @property\n    def dec_mode(self):\n        return self.dec_mode_\n\n    @property\n    def num_parallel_workers(self):\n        return self.num_parallel_workers_\n\n    def _check_device_type(self, enable_lite):\n        \"\"\"Check whether the device type is valid\"\"\"\n        device_type = self.device_type_\n        if device_type.lower() != \"none\":\n            if device_type.lower() not in (\"ascend\", \"gpu\", \"cpu\"):\n                raise RuntimeError(f\"Unsupported device type '{device_type}', only support 'Ascend', 'GPU', 'CPU' \"\n                                   f\"and None, case ignored\")\n            default_device = _get_device_type(None, enable_lite)\n            support_cpu = _get_device_type(\"cpu\", enable_lite)\n            if support_cpu and support_cpu != default_device:\n                support_device = f\"None, '{default_device}' or '{support_cpu}'\"\n            else:\n                support_device = f\"None or '{default_device}'\"\n            if not _get_device_type(device_type, enable_lite):\n                raise RuntimeError(f\"The device type '{device_type}' of servable name {self.servable_name} \"\n                                   f\"is inconsistent with current running environment, supported device type: \"\n                                   f\"{support_device}\")\n\n\nclass DeployConfig:\n    \"\"\"Deployment configuration of one version for the servable\"\"\"\n\n    def __init__(self, version_number, device_ids, num_parallel_workers=0, dec_key=None, dec_mode='AES-GCM'):\n        check_type.check_int(\"version_number\", version_number)\n        if device_ids is None:\n            device_ids = []\n        device_ids = check_type.check_and_as_int_tuple_list(\"device_ids\", device_ids, 0)\n        check_type.check_int(\"num_parallel_workers\", num_parallel_workers, 0)\n\n        if dec_key is not None:\n            if not isinstance(dec_key, bytes):\n                raise RuntimeError(f\"Parameter 'dec_key' should be bytes, but actually {type(dec_key)}\")\n            if not dec_key:\n                raise RuntimeError(f\"Parameter 'dec_key' should not be empty bytes\")\n            if len(dec_key) not in (16, 24, 32):\n                raise RuntimeError(f\"Parameter 'dec_key' length {len(dec_key)} expected to be 16, 24 or 32\")\n        check_type.check_str(\"dec_mode\", dec_mode)\n        if dec_mode not in ('AES-GCM', 'AES-CBC'):\n            raise RuntimeError(f\"Parameter 'dec_mode' expected to be 'AES-GCM' or 'AES-CBC'\")\n\n        self.version_number = version_number\n        self.device_ids = set(device_ids)\n        if not device_ids and not num_parallel_workers:\n            self.num_parallel_workers = 1\n        else:\n            self.num_parallel_workers = num_parallel_workers\n        self.dec_key = dec_key\n        self.dec_mode = dec_mode\n\n\nclass ServableStartConfigGroup:\n    \"\"\"Servable start config for one servable with multi version deployment configs\"\"\"\n\n    def __init__(self, servable_directory, servable_name, device_type=None):\n        check_type.check_str(\"servable_directory\", servable_directory)\n        logger.info(f\"input servable directory: {servable_directory}\")\n        servable_directory = get_abs_path(servable_directory)\n        logger.info(f\"absolute servable directory: {servable_directory}\")\n\n        check_type.check_str(\"servable_name\", servable_name)\n\n        if device_type is not None:\n            check_type.check_str(\"device_type\", device_type)\n        else:\n            device_type = \"None\"\n\n        self.servable_directory = servable_directory\n        self.servable_name = servable_name\n        self.device_type = device_type\n        self.check_servable_location()\n        self.deploy_configs = {}\n        self.newest_version_number = get_newest_version_number(servable_directory, servable_name)\n        logger.info(f\"The newest version number of servable {self.servable_name} is {self.newest_version_number}, \"\n                    f\"servable directory: {self.servable_directory}\")\n\n    def check_servable_location(self):\n        \"\"\"Check the validity of parameters servable_directory and servable_name\"\"\"\n        config_dir = os.path.join(self.servable_directory, self.servable_name)\n        if not os.path.isdir(config_dir):\n            raise RuntimeError(\n                f\"Check servable config failed, directory '{config_dir}' not exist, servable \"\n                f\"directory '{self.servable_directory}', servable name '{self.servable_name}'\")\n\n        config_file = os.path.join(config_dir, \"servable_config.py\")\n        if not os.path.isfile(config_file):\n            raise RuntimeError(\n                f\"Check servable config failed, file '{config_file}' not exist,  servable directory \"\n                f\"'{self.servable_directory}', servable name '{self.servable_name}'\")\n\n    def append_deploy(self, deploy_config):\n        \"\"\"Append one deployment configuration of one version for the servable\"\"\"\n        if not isinstance(deploy_config, DeployConfig):\n            raise RuntimeError(f\"Parameter 'deploy_config' should be type of DeployConfig\")\n        if deploy_config.version_number == 0:\n            deploy_config.version_number = self.newest_version_number\n\n        if deploy_config.version_number not in self.deploy_configs:\n            self.deploy_configs[deploy_config.version_number] = deploy_config\n        else:\n            last_config = self.deploy_configs[deploy_config.version_number]\n            last_config.device_ids = last_config.device_ids.union(deploy_config.device_ids)\n            if last_config.dec_key != deploy_config.dec_key or last_config.dec_mode != deploy_config.dec_mode:\n                raise RuntimeError(f\"The dec key or dec mode of servable name {self.servable_name} is different in \"\n                                   f\"multiple configurations.\")\n            if deploy_config.num_parallel_workers > last_config.num_parallel_workers:\n                last_config.num_parallel_workers = deploy_config.num_parallel_workers\n\n    def export_as_start_configs(self):\n        \"\"\"Export the configuration as list of ServableStartConfig\"\"\"\n        configs = []\n        for config in self.deploy_configs.values():\n            start_config = ServableStartConfig(servable_directory=self.servable_directory,\n                                               servable_name=self.servable_name,\n                                               device_ids=tuple(config.device_ids),\n                                               version_number=config.version_number,\n                                               device_type=self.device_type,\n                                               num_parallel_workers=config.num_parallel_workers,\n                                               dec_key=config.dec_key, dec_mode=config.dec_mode)\n            configs.append(start_config)\n        return configs\n\n\ndef _check_and_merge_config(configs):\n    \"\"\"Merge ServableStartConfig with the same version number\"\"\"\n    start_config_groups = {}\n    for config in configs:\n        if not isinstance(config, ServableStartConfig):\n            continue\n        if config.servable_name in start_config_groups:\n            if config.servable_directory != start_config_groups[config.servable_name].servable_directory:\n                raise RuntimeError(\n                    f\"The servable directory of servable name {config.servable_name} is different in\"\n                    f\" multiple configurations, servable directory: \"\n                    f\"{config.servable_directory} and {start_config_groups[config.servable_name].servable_directory}\")\n        else:\n            config_group = ServableStartConfigGroup(config.servable_directory, config.servable_name, config.device_type)\n            start_config_groups[config.servable_name] = config_group\n\n        deploy_config = DeployConfig(config.version_number, config.device_ids, config.num_parallel_workers,\n                                     config.dec_key, config.dec_mode)\n        start_config_groups[config.servable_name].append_deploy(deploy_config)\n\n    return start_config_groups\n\n\ndef merge_config(configs):\n    \"\"\"Merge ServableStartConfig with the same version number\"\"\"\n    start_config_groups = _check_and_merge_config(configs)\n    configs_ret = []\n\n    for config_group in start_config_groups.values():\n        start_configs = config_group.export_as_start_configs()\n        configs_ret.extend(start_configs)\n\n    allow_reuse_device = None\n    device_ids_used = set()\n    for config in configs_ret:\n        for device_id in config.device_ids:\n            if device_id in device_ids_used:\n                if allow_reuse_device is None:\n                    allow_reuse_device = _all_reuse_device()\n                if not allow_reuse_device:\n                    raise RuntimeError(f\"Ascend 910 device id {device_id} is used repeatedly in servable \"\n                                       f\"{config.servable_name}\")\n            device_ids_used.add(device_id)\n    for config in configs:\n        if not isinstance(config, ServableStartConfig):\n            configs_ret.append(config)\n    return configs_ret\n\n\nclass ServableContextData(ServableContextDataBase):\n    \"\"\"Used to startup servable process\"\"\"\n\n    def __init__(self, servable_config, device_id, master_address, enable_lite):\n        super(ServableContextData, self).__init__()\n        self.servable_config = servable_config\n        self.device_id = device_id\n        self.master_address = master_address\n        self.log_new_file = True\n        self.enable_lite = enable_lite\n\n    @property\n    def servable_name(self):\n        return self.servable_config.servable_name\n\n    @property\n    def version_number(self):\n        return self.servable_config.version_number\n\n    def to_string(self):\n        \"\"\"For logging\"\"\"\n        return f\"servable name: {self.servable_name}, device id: {self.device_id}\"\n\n    def new_worker_process(self):\n        \"\"\"Start worker process to provide servable\"\"\"\n        python_exe = sys.executable\n        config = self.servable_config\n        device_type = config.device_type\n        if device_type is None:\n            device_type = \"None\"\n        script_dir = os.path.dirname(os.path.abspath(__file__))\n        py_script = os.path.join(script_dir, \"start_worker.py\")\n\n        if self.servable_config.dec_key:\n            pipe_file = f\"serving_temp_dec_{config.servable_name}_device{self.device_id}_\" \\\n                        f\"{random.randrange(1000000, 9999999)}\"\n            os.mkfifo(pipe_file)\n        else:\n            pipe_file = 'None'\n        enable_lite_str = \"True\" if self.enable_lite else \"False\"\n\n        arg = f\"{python_exe} {py_script} \" \\\n              f\"--servable_directory={config.servable_directory} \" \\\n              f\"--servable_name={config.servable_name} \" \\\n              f\"--version_number={config.version_number} \" \\\n              f\"--device_type={device_type} \" \\\n              f\"--device_id={self.device_id} \" \\\n              f\"--master_address={self.master_address} \" \\\n              f\"--enable_lite={enable_lite_str} \" \\\n              f\"--dec_key_pipe_file={pipe_file} \" \\\n              f\"--dec_mode={config.dec_mode} \" \\\n              f\"--listening_master=True\"\n\n        args = arg.split(\" \")\n\n        serving_logs_dir = \"serving_logs\"\n        try:\n            os.mkdir(serving_logs_dir)\n        except FileExistsError:\n            pass\n\n        write_mode = \"w\" if self.log_new_file else \"a\"\n        self.log_new_file = False\n        log_file_name = f\"{serving_logs_dir}/log_{config.servable_name}_device{self.device_id}\" \\\n                        f\"_version{self.version_number}.log\"\n        with open(log_file_name, write_mode) as fp:\n            sub = subprocess.Popen(args=args, shell=False, stdout=fp, stderr=fp)\n        if self.servable_config.dec_key:\n            with open(pipe_file, \"wb\") as fp:\n                fp.write(self.servable_config.dec_key)\n        return sub\n\n\nclass ServableExtraContextData(ServableContextDataBase):\n    \"\"\"Used to startup servable process\"\"\"\n\n    def __init__(self, servable_config, master_address, index, device_ids_empty, enable_lite):\n        super(ServableExtraContextData, self).__init__()\n        self.servable_config = servable_config\n        self.master_address = master_address\n        self.log_new_file = True\n        self.index = index\n        self.device_ids_empty = device_ids_empty\n        self.enable_lite = enable_lite\n\n    @property\n    def servable_name(self):\n        return self.servable_config.servable_name\n\n    @property\n    def version_number(self):\n        return self.servable_config.version_number\n\n    def own_device(self):\n        \"\"\"Whether the worker occupy device\"\"\"\n        return False\n\n    def to_string(self):\n        \"\"\"For logging\"\"\"\n        return f\"servable name: {self.servable_name}, version: {self.version_number}, extra: {self.index}\"\n\n    def new_worker_process(self):\n        \"\"\"Start worker process to provide servable\"\"\"\n        python_exe = sys.executable\n        config = self.servable_config\n        script_dir = os.path.dirname(os.path.abspath(__file__))\n        py_script = os.path.join(script_dir, \"start_extra_worker.py\")\n\n        if config.dec_key:\n            pipe_file = f\"serving_temp_dec_{config.servable_name}_index{self.index}_\" \\\n                        f\"{random.randrange(1000000, 9999999)}\"\n            os.mkfifo(pipe_file)\n        else:\n            pipe_file = 'None'\n\n        device_type = config.device_type\n        if device_type is None:\n            device_type = \"None\"\n\n        enable_lite_str = \"True\" if self.enable_lite else \"False\"\n        arg = f\"{python_exe} {py_script} \" \\\n              f\"--servable_directory={config.servable_directory} \" \\\n              f\"--servable_name={config.servable_name} \" \\\n              f\"--version_number={config.version_number} \" \\\n              f\"--device_type={device_type} \" \\\n              f\"--device_ids_empty={self.device_ids_empty} \" \\\n              f\"--index={self.index} \" \\\n              f\"--enable_lite={enable_lite_str} \" \\\n              f\"--master_address={self.master_address} \" \\\n              f\"--dec_key_pipe_file={pipe_file} \" \\\n              f\"--dec_mode={config.dec_mode} \" \\\n              f\"--listening_master=True\"\n        args = arg.split(\" \")\n\n        serving_logs_dir = \"serving_logs\"\n        try:\n            os.mkdir(serving_logs_dir)\n        except FileExistsError:\n            pass\n\n        write_mode = \"w\" if self.log_new_file else \"a\"\n        self.log_new_file = False\n        log_file_name = f\"{serving_logs_dir}/log_{config.servable_name}_extra{self.index}\" \\\n                        f\"_version{self.version_number}.log\"\n        with open(log_file_name, write_mode) as fp:\n            sub = subprocess.Popen(args=args, shell=False, stdout=fp, stderr=fp)\n        if self.servable_config.dec_key:\n            with open(pipe_file, \"wb\") as fp:\n                fp.write(self.servable_config.dec_key)\n        return sub\n"
  },
  {
    "path": "mindspore_serving/server/_server.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Interface for start up servable\"\"\"\nimport os\nimport time\nimport threading\nimport signal\n\nimport mindspore_serving.log as logger\nfrom mindspore_serving.server.worker.init_mindspore import set_mindspore_cxx_env\nfrom mindspore_serving.server.master import start_master_server, stop_on_except, stop, at_stop_list, only_model_stage\nfrom mindspore_serving.server._servable_common import WorkerContext\nfrom mindspore_serving.server._servable_local import ServableStartConfig, ServableContextData, merge_config\nfrom mindspore_serving.server._servable_local import ServableExtraContextData\nfrom mindspore_serving.server.distributed._servable_distributed import DistributedStartConfig, DistributedContextData\nfrom mindspore_serving.server.common import check_type\nfrom mindspore_serving._mindspore_serving import ExitSignalHandle_\nfrom mindspore_serving._mindspore_serving import ServableContext_\n\n\n@stop_on_except\ndef start_servables(servable_configs, enable_lite=False):\n    r\"\"\"\n    Used to start one or more servables on the serving server. One model can be combined with preprocessing and\n    postprocessing to provide a servable, and multiple models can also be combined to provide a servable.\n\n    This interface can be used to start multiple different servables. One servable can be deployed on multiple devices,\n    and each device runs a servable copy.\n\n    On Ascend 910 hardware platform, each copy of each servable owns one device. Different servables or different\n    versions of the same servable need to be deployed on different devices.\n    On Ascend 310P and GPU hardware platform, one device can be shared by multi servables, and different servables\n    or different versions of the same servable can be deployed on the same chip to realize device reuse.\n\n    For details about how to configure models to provide servables, please refer to\n    `MindSpore-based Inference Service Deployment <https://www.mindspore.cn/serving/docs/en/master/serving_example.html>`_ and\n    `Servable Provided Through Model Configuration <https://www.mindspore.cn/serving/docs/en/master/serving_model.html>`_.\n\n    Args:\n        servable_configs (Union[ServableStartConfig, list[ServableStartConfig], tuple[ServableStartConfig]]): The\n            startup configs of one or more servables.\n        enable_lite (bool): Whether to use MindSpore Lite inference backend. Default False.\n\n    Raises:\n        RuntimeError: Failed to start one or more servables. For log of one servable, please refer to subdirectory\n            serving_logs of the directory where the startup script is located.\n\n    Examples:\n        >>> import os\n        >>> from mindspore_serving import server\n        >>>\n        >>> servable_dir = os.path.abspath(\".\")\n        >>> resnet_config = server.ServableStartConfig(servable_dir, \"resnet\", device_ids=(0,1))\n        >>> add_config = server.ServableStartConfig(servable_dir, \"add\", device_ids=(2,3))\n        >>> server.start_servables(servable_configs=(resnet_config, add_config))  # press Ctrl+C to stop\n        >>> server.start_grpc_server(\"0.0.0.0:5500\")\n    \"\"\"\n    if isinstance(servable_configs, (ServableStartConfig, DistributedStartConfig)):\n        servable_configs = (servable_configs,)\n    if not isinstance(servable_configs, (tuple, list)):\n        raise RuntimeError(f\"Parameter '{servable_configs}' should be ServableStartConfig, list or tuple of \"\n                           f\"ServableStartConfig, but actually {type(servable_configs)}\")\n    check_type.check_bool(\"enable_lite\", enable_lite)\n    for config in servable_configs:\n        if not isinstance(config, (ServableStartConfig, DistributedStartConfig)):\n            raise RuntimeError(\n                f\"The item of parameter '{servable_configs}' should be ServableStartConfig, but actually \"\n                f\"{type(config)}\")\n        if isinstance(config, ServableStartConfig):\n            # pylint: disable=protected-access\n            config._check_device_type(enable_lite)\n    ServableContext_.get_instance().set_enable_lite(enable_lite)\n\n    set_mindspore_cxx_env()\n    # merge ServableStartConfig with same servable name and running version number\n    try:\n        servable_configs = merge_config(servable_configs)\n    except RuntimeError as e:\n        logger.error(f\"Start servables failed: {str(e)}\")\n        raise\n    logger.info(\"Servable configs:\")\n    for config in servable_configs:\n        if isinstance(config, ServableStartConfig):\n            logger.info(\n                f\"servable directory: {config.servable_directory}, servable name: {config.servable_name}, \"\n                f\"running version number: {config.version_number}, device ids:{config.device_ids}, \"\n                f\"device type: {config.device_type}\")\n        if isinstance(config, DistributedStartConfig):\n            logger.info(f\"distributed servable, servable directory: {config.servable_directory}, \"\n                        f\"servable name: {config.servable_name}, rank table json file: {config.rank_table_json_file}, \"\n                        f\"running version number: {config.version_number}, \"\n                        f\"distributed address:{config.distributed_address}, \"\n                        f\"wait agents time: {config.wait_agents_time_in_seconds}s\")\n\n    master_pid = os.getpid()\n    unix_socket_dir = \"unix_socket_files\"\n    try:\n        os.mkdir(unix_socket_dir)\n    except FileExistsError:\n        pass\n    master_address = f\"unix:{unix_socket_dir}/serving_master_{master_pid}\"\n    start_master_server(address=master_address)\n\n    signal.signal(signal.SIGCHLD, signal.SIG_IGN)\n    worker_list = _start_workers_with_devices(master_address, servable_configs, enable_lite)\n    has_device_workers = bool(worker_list)\n    _listening_workers_when_startup(worker_list)\n    extra_worker_list = _start_extra_workers(master_address, servable_configs, enable_lite)\n    worker_list.extend(extra_worker_list)\n    _listening_workers_after_startup(worker_list, has_device_workers)\n\n\ndef _start_workers_with_devices(master_address, servable_configs, enable_lite):\n    \"\"\"Start workers that occupy devices\"\"\"\n    worker_list = []\n    for config in servable_configs:\n        if isinstance(config, ServableStartConfig):\n            for device_id in config.device_ids:\n                try:\n                    context_data = ServableContextData(config, device_id, master_address, enable_lite)\n                    sub_process = context_data.new_worker_process()\n                    worker_context = WorkerContext(context_data, master_address, sub_process)\n                except RuntimeError as e:\n                    _send_exit_signal_to_children(worker_list)\n                    raise RuntimeError(f\"Start worker failed: {e}\")\n                worker_list.append(worker_context)\n        elif isinstance(config, DistributedStartConfig):\n            try:\n                context_data = DistributedContextData(config, master_address)\n                sub_process = context_data.new_worker_process()\n                worker_context = WorkerContext(context_data, master_address, sub_process)\n            except RuntimeError as e:\n                _send_exit_signal_to_children(worker_list)\n                raise RuntimeError(f\"Start worker failed: {e}\")\n            worker_list.append(worker_context)\n    return worker_list\n\n\ndef _start_extra_workers(master_address, servable_configs, enable_lite):\n    \"\"\"Start workers that do not occupy devices\"\"\"\n    worker_list = []\n    worker_pid_set = set()\n    for config in servable_configs:\n        if not isinstance(config, ServableStartConfig):\n            continue\n        if len(config.device_ids) >= config.num_parallel_workers:\n            continue\n        if only_model_stage(config.servable_name):\n            logger.warning(f\"There is no need to startup additional worker processes, all stages are models, servable:\"\n                           f\" {config.servable_name}\")\n            continue\n        extra_worker_count = config.num_parallel_workers - len(config.device_ids)\n        for index in range(extra_worker_count):\n            try:\n                context_data = ServableExtraContextData(config, master_address, index, not config.device_ids,\n                                                        enable_lite)\n                sub_process = context_data.new_worker_process()\n                if sub_process.pid in worker_pid_set:\n                    raise RuntimeError(\n                        f\"Maybe the parameter 'num_parallel_workers' is too large, and the number of open files exceeds\"\n                        f\" the system upper limit. Please check the workers logs in the serving_logs directory for\"\n                        f\" more details\")\n                worker_pid_set.add(sub_process.pid)\n                worker_context = WorkerContext(context_data, master_address, sub_process)\n            except RuntimeError as e:\n                _send_exit_signal_to_children(worker_list)\n                raise RuntimeError(f\"Start worker failed: {e}\")\n            worker_list.append(worker_context)\n    _listening_workers_when_startup(worker_list)\n    return worker_list\n\n\ndef _send_exit_signal_to_children(worker_list):\n    \"\"\"Send exit signal to all child processes, and terminate all child processes when they are still alive\n    in some seconds later.\n    \"\"\"\n    if not worker_list:\n        return\n    for worker in worker_list:\n        worker.send_exit_signal(signal.SIGINT)\n    wait_seconds = 10\n    for i in range(wait_seconds * 100):  # 10s\n        all_exit = True\n        for worker in worker_list:\n            if worker.is_alive():\n                if i % 100 == 0:\n                    logger.warning(f\"Wait for all worker processes to exit, otherwise they will be forcibly killed in \"\n                                   f\"{wait_seconds - (i // 100)} seconds.\")\n                all_exit = False\n                break\n        if all_exit:\n            logger.info(f\"All Child process exited\")\n            return\n        time.sleep(0.01)\n\n    for worker in worker_list:\n        worker.send_exit_signal(signal.SIGKILL)\n\n\ndef _listening_workers_when_startup(worker_list):\n    \"\"\"Listening child process\"\"\"\n    if not worker_list:\n        return\n    time_last = time.time()\n    while True:\n        time.sleep(0.1)\n        if ExitSignalHandle_.has_stopped():\n            logger.warning(\"Fail to start workers because of signal SIGINT or SIGTERM\")\n            _send_exit_signal_to_children(worker_list)\n            raise RuntimeError(\"Fail to start workers because of signal SIGINT or SIGTERM\")\n\n        all_ready = True\n        for worker in worker_list:\n            if not worker.is_alive() or worker.has_error_notified():\n                for _ in range(100):\n                    if worker.has_error_notified():\n                        logger.warning(f\"Fail to start workers: {worker.get_notified_error()}\")\n                        _send_exit_signal_to_children(worker_list)\n                        raise RuntimeError(f\"Fail to start workers: {worker.get_notified_error()}\")\n                    time.sleep(0.01)  # wait 1s for error msg\n                logger.error(f\"Fail to start workers because of death of one worker\")\n                _send_exit_signal_to_children(worker_list)\n                raise RuntimeError(\"Fail to start workers because of death of one worker\")\n            if not worker.ready():\n                if time.time() - time_last > 1:\n                    time_last = time.time()\n                    worker.print_status()\n                all_ready = False\n        if all_ready:\n            break\n    logger.info(\"All workers is ready\")\n\n\ndef _listening_workers_after_startup(worker_list, has_device_workers):\n    \"\"\"Listening agent status after success start up of agents\"\"\"\n\n    def listening_thread_fun():\n        while True:\n            time.sleep(0.01)\n            if ExitSignalHandle_.has_stopped():\n                logger.warning(\"Serving server begin to exit: receive exit signal\")\n                break\n            alive_count = 0\n            for worker in worker_list:\n                occupy_device_worker = 1 if worker.own_device() or not has_device_workers else 0\n                if worker.is_in_process_switching:\n                    alive_count += occupy_device_worker\n                    continue\n                if worker.is_alive():\n                    alive_count += occupy_device_worker\n                    if worker.is_unavailable():\n                        worker.restart_worker()\n                    continue\n                # not alive\n                # has exit or error notified,\n                if worker.has_exit_notified() or worker.has_error_notified():\n                    continue\n                if worker.exit_for_enough_time():\n                    # has exit for 1s and there were no normal handled requests\n                    if not worker.can_be_restart():\n                        continue\n                    logger.warning(\n                        f\"detect worker process has exited, try to restart, servable: {worker.to_string()}\")\n                    worker.restart_worker()\n                alive_count += occupy_device_worker\n\n            if not alive_count:\n                logger.warning(\"Serving server begin to exit: all worker processes that occupy devices have exited\")\n                break\n\n        _send_exit_signal_to_children(worker_list)\n        stop()\n\n    thread = threading.Thread(target=listening_thread_fun)\n    thread.start()\n\n    def join_thread():\n        if thread != threading.current_thread():\n            thread.join()\n            return True\n        return False\n\n    at_stop_list.append(join_thread)\n"
  },
  {
    "path": "mindspore_serving/server/common/__init__.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MindSpore Serving.\"\"\"\n\nfrom . import check_type\nfrom .utils import get_abs_path\nfrom .decorator import deprecated\n"
  },
  {
    "path": "mindspore_serving/server/common/check_type.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"T check for worker\"\"\"\n\n\ndef check_and_as_tuple_with_str_list(arg_name, strs):\n    \"\"\"Check whether the input parameters are reasonable multiple str inputs,\n    which can be single str, tuple or list of str, tuple with list of str.\n    Finally, return tuple with list of str.\n    \"\"\"\n    if isinstance(strs, str):\n        strs = (list(strs),)\n        return tuple(strs)\n\n    if not isinstance(strs, (tuple, list)):\n        raise RuntimeError(f\"Parameter '{arg_name}' should be str or tuple/list of str, but actually {type(strs)}\")\n\n    str_list = []\n    for item in strs:\n        it_list = []\n        if isinstance(item, list):\n            for inner in item:\n                if not isinstance(inner, str):\n                    raise RuntimeError(f\"The inner of parameter '{arg_name}' should be str, \"\n                                       f\"but actually {type(inner)}\")\n                if not inner:\n                    raise RuntimeError(f\"The inner of parameter '{arg_name}' should not be empty str\")\n                if item in it_list:\n                    raise RuntimeError(f\"The inner value '{inner}' in parameter '{arg_name}' \"\n                                       f\"should not be repeated\")\n                it_list.append(inner)\n        else:\n            if not isinstance(item, str):\n                raise RuntimeError(f\"The item of parameter '{arg_name}' should be str, but actually {type(item)}\")\n            if not item:\n                raise RuntimeError(f\"The item of parameter '{arg_name}' should not be empty str\")\n            if item in str_list:\n                raise RuntimeError(f\"The item value '{item}' in parameter '{arg_name}' should not be repeated\")\n            it_list.append(item)\n        str_list.append(it_list)\n\n    return tuple(str_list)\n\n\ndef check_and_as_str_tuple_list(arg_name, strs):\n    \"\"\"Check whether the input parameters are reasonable multiple str inputs,\n    which can be single str, tuple or list of str.\n    Finally, return tuple of str.\n    \"\"\"\n    if isinstance(strs, str):\n        strs = (strs,)\n\n    if not isinstance(strs, (tuple, list)):\n        raise RuntimeError(f\"Parameter '{arg_name}' should be str or tuple/list of str, but actually {type(strs)}\")\n\n    str_list = []\n    for item in strs:\n        if not isinstance(item, str):\n            raise RuntimeError(f\"The item of parameter '{arg_name}' should be str, but actually {type(item)}\")\n        if not item:\n            raise RuntimeError(f\"The item of parameter '{arg_name}' should not be empty str\")\n        if item in str_list:\n            raise RuntimeError(f\"The item value '{item}' in parameter '{arg_name}' should not be repeated\")\n        str_list.append(item)\n\n    return tuple(str_list)\n\n\ndef check_str(arg_name, str_val):\n    \"\"\"Check whether the input parameters are reasonable str input\"\"\"\n    if not isinstance(str_val, str):\n        raise RuntimeError(f\"Parameter '{arg_name}' should be str, but actually {type(str_val)}\")\n    if not str_val:\n        raise RuntimeError(f\"Parameter '{arg_name}' should not be empty str\")\n\n\ndef check_bytes(arg_name, bytes_val):\n    \"\"\"Check whether the input parameters are reasonable bytes input\"\"\"\n    if not isinstance(bytes_val, bytes):\n        raise RuntimeError(f\"Parameter '{arg_name}' should be bytes, but actually {type(bytes_val)}\")\n    if not bytes_val:\n        raise RuntimeError(f\"Parameter '{arg_name}' should not be empty bytes\")\n\n\ndef check_bool(arg_name, bool_val):\n    \"\"\"Check whether the input parameters are reasonable bool input\"\"\"\n    if not isinstance(bool_val, bool):\n        raise RuntimeError(f\"Parameter '{arg_name}' should be bool, but actually {type(bool_val)}\")\n\n\ndef check_int(arg_name, int_val, minimum=None, maximum=None, is_tuple_item=False):\n    \"\"\"Check whether the input parameters are reasonable int input\"\"\"\n    if not is_tuple_item:\n        prefix = f\"Parameter '{arg_name}'\"\n    else:\n        prefix = f\"The item value '{int_val}' in parameter '{arg_name}'\"\n\n    if isinstance(int_val, bool):\n        raise RuntimeError(f\"{prefix} should be int, but actually {type(int_val)}\")\n    if not isinstance(int_val, int):\n        raise RuntimeError(f\"{prefix} should be int, but actually {type(int_val)}\")\n    if minimum is not None and int_val < minimum:\n        if maximum is not None:\n            raise RuntimeError(f\"{prefix} should be in range [{minimum},{maximum}]\")\n        raise RuntimeError(f\"{prefix} should be >= {minimum}\")\n    if maximum is not None and int_val > maximum:\n        if minimum is not None:\n            raise RuntimeError(f\"{prefix} should be in range [{minimum},{maximum}]\")\n        raise RuntimeError(f\"{prefix} should be <= {maximum}\")\n\n\ndef check_ip_port(arg_name, port):\n    \"\"\"Check whether the input parameters are reasonable ip port\"\"\"\n    check_int(arg_name, port, 1, 65535)\n\n\ndef check_and_as_int_tuple_list(arg_name, ints, minimum=None, maximum=None):\n    \"\"\"Check whether the input parameters are reasonable multiple int inputs,\n    which can be single int, tuple or list of int.\n    Finally, return tuple of int.\n    \"\"\"\n    if isinstance(ints, int):\n        ints = (ints,)\n\n    if not isinstance(ints, (tuple, list)):\n        raise RuntimeError(f\"Parameter '{arg_name}' should be int or tuple/list of int, but actually {type(ints)}\")\n\n    int_list = []\n    for item in ints:\n        if item in int_list:\n            raise RuntimeError(f\"The item value '{item}' in parameter '{arg_name}' should not be repeated\")\n        check_int(arg_name, item, minimum, maximum, True)\n        int_list.append(item)\n\n    return tuple(int_list)\n\n\ndef check_int_tuple_list(arg_name, ints, minimum=None, maximum=None):\n    \"\"\"Check whether the input parameters are reasonable multiple int inputs,\n    which can be single tuple or list of int.\n    Finally, return tuple of int.\n    \"\"\"\n    if not isinstance(ints, (tuple, list)):\n        raise RuntimeError(f\"Parameter '{arg_name}' should be tuple/list of int, but actually {type(ints)}\")\n\n    int_list = []\n    for item in ints:\n        if item in int_list:\n            raise RuntimeError(f\"The item value '{item}' in parameter '{arg_name}' should not be repeated\")\n        check_int(arg_name, item, minimum, maximum, True)\n        int_list.append(item)\n"
  },
  {
    "path": "mindspore_serving/server/common/decorator.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Providing decorators.\"\"\"\n\nfrom functools import wraps\n\nfrom mindspore_serving import log\n\n\ndef deprecated(version, substitute):\n    \"\"\"deprecated warning\n\n    Args:\n        version (str): version that the operator or function is deprecated.\n        substitute (str): the substitute name for deprecated operator or function.\n    \"\"\"\n\n    def decorate(func):\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            name = func.__name__\n            log.warning(f\"'{name}' is deprecated from version {version} and \"\n                        f\"will be removed in a future version, use '{substitute}' instead.\")\n            ret = func(*args, **kwargs)\n            return ret\n\n        return wrapper\n\n    return decorate\n"
  },
  {
    "path": "mindspore_serving/server/common/utils.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"common function utils\"\"\"\nimport os\nimport sys\n\n\ndef get_abs_path(path):\n    \"\"\"get the absolute path\"\"\"\n    script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    abs_path = os.path.realpath(os.path.join(script_dir, path))\n    return abs_path\n"
  },
  {
    "path": "mindspore_serving/server/distributed/__init__.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The interface to startup serving server with distributed servable.\nSee how to configure and startup distributed model, please refer to\n`MindSpore Serving-based Distributed Inference Service Deployment <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_distributed_example.html>`_.\"\"\"\n\nfrom mindspore_serving.server.worker.distributed import startup_agents\nfrom mindspore_serving.server.worker.distributed.register import declare_servable\nfrom ._distributed import start_servable\n\n__all__ = []\n__all__.extend([\n    \"start_servable\",\n    'startup_agents',\n    'declare_servable'\n])\n"
  },
  {
    "path": "mindspore_serving/server/distributed/_distributed.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Startup serving server with distributed servable\"\"\"\n\nfrom ._servable_distributed import DistributedStartConfig\n\n\ndef start_servable(servable_directory, servable_name, rank_table_json_file, version_number=1,\n                   distributed_address=\"0.0.0.0:6200\", wait_agents_time_in_seconds=0):\n    r\"\"\"\n    Start up the servable named 'servable_name' defined in 'servable_directory'.\n\n    Args:\n        servable_directory (str): The directory where the servable is located in. There expects to has a directory\n            named `servable_name`. For more detail:\n            `How to config Servable <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html>`_ .\n\n        servable_name (str): The servable name.\n        version_number (int, optional): Servable version number to be loaded. The version number should be a positive\n            integer, starting from 1. Default: 1.\n        rank_table_json_file (str): The rank table json file name.\n        distributed_address (str, optional): The distributed worker address the worker agents linked to.\n            Default: \"0.0.0.0:6200\".\n        wait_agents_time_in_seconds(int, optional): The maximum time in seconds the worker waiting ready of all agents,\n            0 means unlimited time. Default: 0.\n\n    Raises:\n        RuntimeError: Failed to start the distributed servable.\n\n    Examples:\n        >>> import os\n        >>> from mindspore_serving.server import distributed\n        >>>\n        >>> servable_dir = os.path.abspath(\".\")\n        >>> distributed.start_servable(servable_dir, \"matmul\", startup_worker_agents=\"hccl_8p.json\", \\\n        ...                            distributed_address=\"127.0.0.1:6200\")\n    \"\"\"\n    from mindspore_serving.server import start_servables\n    config = DistributedStartConfig(servable_directory=servable_directory, servable_name=servable_name,\n                                    rank_table_json_file=rank_table_json_file, version_number=version_number,\n                                    distributed_address=distributed_address,\n                                    wait_agents_time_in_seconds=wait_agents_time_in_seconds)\n    start_servables(config)\n"
  },
  {
    "path": "mindspore_serving/server/distributed/_servable_distributed.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Distributed servable config\"\"\"\n\nimport os\nimport sys\nimport subprocess\nfrom mindspore_serving.server.common import check_type, get_abs_path\nimport mindspore_serving.log as logger\nfrom mindspore_serving.server._servable_common import ServableContextDataBase\n\n\nclass DistributedStartConfig:\n    r\"\"\"\n    Distributed servable start-up config.\n\n    Args:\n        servable_directory (str): The directory where the servable is located in. There expects to has a directory\n            named `servable_name`. For more detail:\n            `How to config Servable <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html>`_ .\n        servable_name (str): The servable name.\n        rank_table_json_file (str): The rank table json file name.\n        version_number (int): Servable version number to be loaded. The version number should be a positive integer,\n            starting from 1, and 0 means to load the latest version. Default: 0.\n        distributed_address (str): The worker address the agents linked to.\n        wait_agents_time_in_seconds(int): The maximum time in seconds the worker waiting ready of all agents,\n            0 means unlimited time, default 0\n\n    Raises:\n        RuntimeError: Input parameters are invalid.\n    \"\"\"\n\n    def __init__(self, servable_directory, servable_name, rank_table_json_file, version_number,\n                 distributed_address, wait_agents_time_in_seconds):\n        super(DistributedStartConfig, self).__init__()\n        check_type.check_str('servable_directory', servable_directory)\n        logger.info(f\"input servable directory: {servable_directory}\")\n        servable_directory = get_abs_path(servable_directory)\n        logger.info(f\"absolute servable directory: {servable_directory}\")\n\n        check_type.check_str('servable_name', servable_name)\n        check_type.check_int('version_number', version_number, 0)\n        if version_number == 0:\n            version_number = 1\n\n        check_type.check_str('rank_table_json_file', rank_table_json_file)\n        logger.info(f\"input rank table file: {rank_table_json_file}\")\n        rank_table_json_file = get_abs_path(rank_table_json_file)\n        logger.info(f\"absolute path of rank table file: {rank_table_json_file}\")\n\n        check_type.check_str('distributed_address', distributed_address)\n        check_type.check_int('wait_agents_time_in_seconds', wait_agents_time_in_seconds, 0)\n\n        self.servable_directory_ = servable_directory\n        self.servable_name_ = servable_name\n        self.version_number_ = version_number\n        self.rank_table_json_file_ = rank_table_json_file\n        self.distributed_address_ = distributed_address\n        self.wait_agents_time_in_seconds_ = wait_agents_time_in_seconds\n\n    @property\n    def servable_directory(self):\n        return self.servable_directory_\n\n    @property\n    def servable_name(self):\n        return self.servable_name_\n\n    @property\n    def version_number(self):\n        return self.version_number_\n\n    @property\n    def rank_table_json_file(self):\n        return self.rank_table_json_file_\n\n    @property\n    def distributed_address(self):\n        return self.distributed_address_\n\n    @property\n    def wait_agents_time_in_seconds(self):\n        return self.wait_agents_time_in_seconds_\n\n\nclass DistributedContextData(ServableContextDataBase):\n    \"\"\"Used to start distributed servable worker process\"\"\"\n\n    def __init__(self, distributed_config, master_address):\n        super(DistributedContextData, self).__init__()\n        if not isinstance(distributed_config, DistributedStartConfig):\n            raise RuntimeError(f\"Parameter '{distributed_config}' should be instance of DistributedStartConfig, \"\n                               f\"but actually {type(distributed_config)}\")\n        self.distributed_config_ = distributed_config\n        self.master_address_ = master_address\n        self.log_new_file = True\n\n    @property\n    def servable_name(self):\n        return self.distributed_config_.servable_name\n\n    @property\n    def version_number(self):\n        return self.distributed_config_.version_number\n\n    def to_string(self):\n        \"\"\"Used in logging\"\"\"\n        return f\"distributed servable name: {self.servable_name}\"\n\n    def new_worker_process(self):\n        \"\"\"Start distributed worker process\"\"\"\n        python_exe = sys.executable\n        script_dir = os.path.dirname(os.path.abspath(__file__))\n        py_script = os.path.join(script_dir, \"start_distributed_worker.py\")\n        config = self.distributed_config_\n        arg = f\"{python_exe} {py_script} {config.servable_directory} {config.servable_name} \" \\\n              f\"{config.version_number} {config.rank_table_json_file} {config.distributed_address} \" \\\n              f\"{config.wait_agents_time_in_seconds} {self.master_address_} True\"\n        args = arg.split(\" \")\n\n        serving_logs_dir = \"serving_logs\"\n        try:\n            os.mkdir(serving_logs_dir)\n        except FileExistsError:\n            pass\n\n        write_mode = \"w\" if self.log_new_file else \"a\"\n        self.log_new_file = False\n        log_file_name = f\"{serving_logs_dir}/log_{self.servable_name}_distributed.log\"\n        with open(log_file_name, write_mode) as fp:\n            sub = subprocess.Popen(args=args, shell=False, stdout=fp, stderr=fp)\n        return sub\n\n    def can_restart(self):\n        \"\"\"Whether the worker can restart\"\"\"\n        return False\n"
  },
  {
    "path": "mindspore_serving/server/distributed/start_distributed_worker.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Start distributed worker process\"\"\"\n\nimport os\nimport sys\n\nfrom mindspore_serving.server.worker import distributed\nfrom mindspore_serving.server.common import check_type\nfrom mindspore_serving._mindspore_serving import ExitSignalHandle_\nfrom mindspore_serving._mindspore_serving import Worker_\n\n\ndef start_worker(servable_directory, servable_name, version_number, rank_table_json_file,\n                 distributed_address, wait_agents_time_in_seconds,\n                 master_address, listening_master=False):\n    \"\"\"Start distributed worker process\"\"\"\n    check_type.check_str('servable_directory', servable_directory)\n    check_type.check_str('servable_name', servable_name)\n    check_type.check_int('version_number', version_number, 0)\n    check_type.check_str('rank_table_json_file', rank_table_json_file)\n    check_type.check_str('distributed_address', distributed_address)\n    check_type.check_int('wait_agents_time_in_seconds', wait_agents_time_in_seconds, 0)\n\n    check_type.check_str('master_address', master_address)\n    check_type.check_bool('listening_master', listening_master)\n\n    ExitSignalHandle_.start()  # Set flag to running and receive Ctrl+C message\n\n    worker_pid = os.getpid()\n    unix_socket_dir = \"unix_socket_files\"\n    try:\n        os.mkdir(unix_socket_dir)\n    except FileExistsError:\n        pass\n    worker_address = f\"unix:{unix_socket_dir}/serving_worker_{servable_name}_distributed_{worker_pid}\"\n    if len(worker_address) > 90:  # limit maximum unix domain socket address length\n        worker_address = worker_address[:40] + \"___\" + worker_address[-40:]\n    try:\n        distributed.start_servable(servable_directory=servable_directory, servable_name=servable_name,\n                                   version_number=version_number, rank_table_json_file=rank_table_json_file,\n                                   distributed_address=distributed_address,\n                                   wait_agents_time_in_seconds=wait_agents_time_in_seconds,\n                                   master_address=master_address, worker_address=worker_address)\n    except RuntimeError as ex:\n        Worker_.notify_failed(master_address, f\"{{distributed servable:{servable_name}, {ex}}}\")\n        raise\n\n\ndef parse_args_and_start():\n    \"\"\"Parse args and start distributed worker\"\"\"\n    if len(sys.argv) != 9:\n        raise RuntimeError(\"Expect length of input argv to be 8: str{servable_directory} str{servable_name} \"\n                           \"int{version_number} str{rank_table_json_file} str{distributed_address} \"\n                           \"int{wait_agents_time_in_seconds} str{master_address} bool{listening_master}\")\n    servable_directory = sys.argv[1]\n    servable_name = sys.argv[2]\n    version_number = int(sys.argv[3])\n    rank_table_json_file = sys.argv[4]\n    distributed_address = sys.argv[5]\n    wait_agents_time_in_seconds = int(sys.argv[6])\n    master_address = sys.argv[7]\n    # pylint: disable=simplifiable-if-expression\n    listening_master = True if sys.argv[8].lower() == \"true\" else False\n    start_worker(servable_directory, servable_name, version_number, rank_table_json_file, distributed_address,\n                 wait_agents_time_in_seconds, master_address, listening_master)\n\n\nif __name__ == '__main__':\n    parse_args_and_start()\n"
  },
  {
    "path": "mindspore_serving/server/master/__init__.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The master process of serving server: used to receive requests and dispatcher them to worker process\"\"\"\n\nfrom ._master import start_grpc_server, start_restful_server, stop, stop_on_except, SSLConfig\nfrom ._master import start_master_server, at_stop_list, only_model_stage\nfrom . import context\n"
  },
  {
    "path": "mindspore_serving/server/master/_master.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"method of server supplied for master\"\"\"\n\nfrom functools import wraps\n\nfrom mindspore_serving._mindspore_serving import ExitSignalHandle_\nfrom mindspore_serving._mindspore_serving import Master_\nfrom mindspore_serving._mindspore_serving import SSLConfig_\n\nfrom mindspore_serving.server.common import check_type\n\n_wait_and_clear_thread = None\n\nat_stop_list = []\n\n\ndef add_atstop_proc(func):\n    \"\"\"At serving server stop, execute function\"\"\"\n    global at_stop_list\n    at_stop_list.append(func)\n\n\ndef stop():\n    r\"\"\"\n    Stop the running of serving server.\n\n    Examples:\n        >>> from mindspore_serving import server\n        >>>\n        >>> server.start_grpc_server(\"0.0.0.0:5500\")\n        >>> server.start_restful_server(\"0.0.0.0:1500\")\n        >>> ...\n        >>> server.stop()\n    \"\"\"\n    Master_.stop_and_clear()\n    global at_stop_list\n    for func in at_stop_list:\n        result = func()\n        if result is None or result is True:\n            at_stop_list.remove(func)\n\n\ndef stop_on_except(func):\n    \"\"\"Wrap of clear environment and exit on Serving exception\"\"\"\n\n    @wraps(func)\n    def handle_except(*args, **kwargs):\n        try:\n            ExitSignalHandle_.start()  # Set flag to running and receive Ctrl+C message\n            func(*args, **kwargs)\n        except:\n            stop()\n            raise\n\n    return handle_except\n\n\nclass SSLConfig:\n    r\"\"\"\n    The server's ssl_config encapsulates necessary parameters for SSL-enabled connections.\n\n    Args:\n        certificate (str): File holding the PEM-encoded certificate chain as a byte string to use or None if no\n            certificate chain should be used.\n        private_key (str): File holding the PEM-encoded private key as a byte string, or None if no private key should\n            be used.\n        custom_ca (str, optional): File holding the PEM-encoded root certificates as a byte string. When verify_client\n            is True, custom_ca must be provided. When verify_client is False, this parameter will be ignored.\n            Default: None.\n        verify_client (bool, optional): If verify_client is true, use mutual authentication. If false, use one-way\n            authentication. Default: False.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid.\n    \"\"\"\n\n    def __init__(self, certificate, private_key, custom_ca=None, verify_client=False):\n        check_type.check_str(\"certificate\", certificate)\n        check_type.check_str(\"private_key\", private_key)\n        check_type.check_bool(\"verify_client\", verify_client)\n\n        self.custom_ca = custom_ca\n        self.certificate = certificate\n        self.private_key = private_key\n        self.verify_client = verify_client\n        if self.verify_client:\n            check_type.check_str(\"custom_ca\", custom_ca)\n\n\n@stop_on_except\ndef start_grpc_server(address, max_msg_mb_size=100, ssl_config=None):\n    r\"\"\"\n    Start gRPC server for the communication between serving client and server.\n\n    Args:\n        address (str): gRPC server address, the address can be `{ip}:{port}` or `unix:{unix_domain_file_path}`.\n\n            - `{ip}:{port}` - Internet domain socket address.\n            - `unix:{unix_domain_file_path}` - Unix domain socket address, which is used to communicate with multiple\n              processes on the same machine. `{unix_domain_file_path}` can be relative or absolute file path,\n              but the directory where the file is located must already exist.\n\n        max_msg_mb_size (int, optional): The maximum acceptable gRPC message size in megabytes(MB), value range\n            [1, 512]. Default: 100.\n        ssl_config (mindspore_serving.server.SSLConfig, optional): The server's ssl_config, if None, disabled ssl.\n            Default: None.\n\n    Raises:\n        RuntimeError: Failed to start the gRPC server: parameter verification failed, the gRPC address is wrong or\n            the port is duplicate.\n\n    Examples:\n        >>> from mindspore_serving import server\n        >>>\n        >>> server.start_grpc_server(\"0.0.0.0:5500\")\n    \"\"\"\n    check_type.check_str('address', address)\n    check_type.check_int('max_msg_mb_size', max_msg_mb_size, 1, 512)\n\n    config = SSLConfig_()\n    if ssl_config is not None:\n        if not isinstance(ssl_config, SSLConfig):\n            raise RuntimeError(\"The type of ssl_config should be type of SSLConfig\")\n        with open(ssl_config.certificate, 'rb') as c_fs:\n            c_bytes = c_fs.read()\n        with open(ssl_config.private_key, 'rb') as pk_fs:\n            pk_bytes = pk_fs.read()\n        if ssl_config.verify_client:\n            with open(ssl_config.custom_ca, 'rb') as rc_fs:\n                rc_bytes = rc_fs.read()\n            config.custom_ca = rc_bytes\n        config.certificate = c_bytes\n        config.private_key = pk_bytes\n        config.verify_client = ssl_config.verify_client\n        config.use_ssl = True\n    Master_.start_grpc_server(address, config, max_msg_mb_size)\n\n\n@stop_on_except\ndef start_restful_server(address, max_msg_mb_size=100, ssl_config=None):\n    r\"\"\"\n    Start RESTful server for the communication between serving client and server.\n\n    Args:\n        address (str): RESTful server address, the address should be Internet domain socket address.\n        max_msg_mb_size (int, optional): The maximum acceptable RESTful message size in megabytes(MB), value range\n            [1, 512]. Default: 100.\n        ssl_config (mindspore_serving.server.SSLConfig, optional): The server's ssl_config, if None, disabled ssl.\n            Default: None.\n\n    Raises:\n        RuntimeError: Failed to start the RESTful server: parameter verification failed, the RESTful address is wrong\n            or the port is duplicate.\n\n    Examples:\n        >>> from mindspore_serving import server\n        >>>\n        >>> server.start_restful_server(\"0.0.0.0:5900\")\n    \"\"\"\n    check_type.check_str('address', address)\n    check_type.check_int('max_msg_mb_size', max_msg_mb_size, 1, 512)\n\n    config = SSLConfig_()\n    if ssl_config is not None:\n        if not isinstance(ssl_config, SSLConfig):\n            raise RuntimeError(\"The type of ssl_config should be class of SSLConfig\")\n        if ssl_config.verify_client:\n            config.custom_ca = ssl_config.custom_ca\n        config.certificate = ssl_config.certificate\n        config.private_key = ssl_config.private_key\n        config.verify_client = ssl_config.verify_client\n        config.use_ssl = True\n    Master_.start_restful_server(address, config, max_msg_mb_size)\n\n\ndef start_master_server(address):\n    \"\"\"Start the gRPC server for the communication between workers and the master of serving server\"\"\"\n    check_type.check_str('address', address)\n\n    Master_.start_grpc_master_server(address)\n\n\ndef only_model_stage(servable_name):\n    \"\"\"Whether only the model stages exist\"\"\"\n    return Master_.only_model_stage(servable_name)\n"
  },
  {
    "path": "mindspore_serving/server/master/context.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Set context of serving\"\"\"\nfrom mindspore_serving._mindspore_serving import MasterContext_\nfrom mindspore_serving.server.common import check_type\n\n__all__ = [\"set_max_enqueued_requests\"]\n\n_context = MasterContext_.get_instance()\n\n\ndef set_max_enqueued_requests(max_enqueued_requests):\n    r\"\"\"\n    Set the maximum number of requests waiting to be processed.\n\n    Args:\n        max_enqueued_requests (int): The maximum acceptable infer message size in number, default ``10000``,\n            Max infer number should be a positive integer.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid, or other error happened.\n    \"\"\"\n    check_type.check_int(\"max_enqueued_requests\", max_enqueued_requests, 1)\n    _context.set_max_enqueued_requests(max_enqueued_requests)\n"
  },
  {
    "path": "mindspore_serving/server/register/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Servable register interface, used in servable_config.py of one servable.\nSee how to configure servable_config.py file, please refer to\n`Servable Provided Through Model Configuration <https://www.mindspore.cn/serving/docs/zh-CN/master/serving_model.html>`_.\"\"\"\n\nfrom .model import declare_model, Model, Context, AclOptions, GpuOptions\nfrom .model import AscendDeviceInfo, CPUDeviceInfo, GPUDeviceInfo\nfrom .method import register_method, add_stage\n\nfrom .model import declare_servable\nfrom .method import call_preprocess, call_servable, call_postprocess\nfrom .method import call_preprocess_pipeline, call_postprocess_pipeline\n\n__all__ = []\n__all__.extend([\n    \"declare_model\",\n    \"Model\",\n    \"AscendDeviceInfo\",\n    \"CPUDeviceInfo\",\n    \"GPUDeviceInfo\",\n    \"Context\",\n    'register_method',\n    'add_stage'\n])\n"
  },
  {
    "path": "mindspore_serving/server/register/method.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Method registration interface\"\"\"\n\nimport inspect\nimport ast\nfrom functools import wraps\n\nfrom mindspore_serving._mindspore_serving import ServableRegister_\nfrom mindspore_serving._mindspore_serving import MethodSignature_\nfrom mindspore_serving import log as logger\nfrom mindspore_serving.server.common import check_type, deprecated\nfrom .utils import get_func_name, get_servable_dir\nfrom .stage_function import register_stage_function, check_stage_function\nfrom .model import g_declared_models, Model\n\nmethod_def_context_ = MethodSignature_()\ncur_stage_index_ = 0\nhas_called_preprocess_ = False\nhas_called_servable_ = False\nhas_called_postprocess_ = False\n\nmethod_def_ast_meta_ = []\n\n\nclass _TensorDef:\n    \"\"\"Data flow item, for definitions of data flow in a method\"\"\"\n\n    def __init__(self, tag, tensor_index):\n        self.tag = tag\n        self.tensor_index = tensor_index\n\n    def as_pair(self):\n        return self.tag, self.tensor_index\n\n\ndef _create_tensor_def_outputs(tag, outputs_cnt):\n    \"\"\"Create data flow item for output\"\"\"\n    result = [_TensorDef(tag, i) for i in range(outputs_cnt)]\n    if len(result) == 1:\n        return result[0]\n    return tuple(result)\n\n\ndef _wrap_fun_to_batch(fun, input_count):\n    \"\"\"wrap preprocess and postprocess to pipeline\"\"\"\n    argspec_len = len(inspect.signature(fun).parameters)\n    if argspec_len != input_count:\n        raise RuntimeError(f\"function {fun.__name__} input args count {argspec_len} not match the count {input_count} \"\n                           f\"registered in method\")\n\n    @wraps(fun)\n    def call_func(instances):\n        for instance in instances:\n            inputs = []\n            for i in range(input_count):\n                inputs.append(instance[i])\n            yield fun(*inputs)\n\n    return call_func\n\n\ndef _get_stage_outputs_count(call_name):\n    global method_def_ast_meta_\n    method_name = method_def_context_.method_name\n    if call_name not in method_def_ast_meta_:\n        raise RuntimeError(\n            f\"Failed to parse method '{method_name}', complex statements such as conditions and loops are not supported\"\n            f\" in register_method when the interface '{call_name}' is used, use 'add_stage' to replace '{call_name}'\")\n    _, outputs_count = method_def_ast_meta_[call_name]\n    return outputs_count\n\n\n@deprecated(\"1.5.0\", \"mindspore_serving.server.register.add_stage\")\ndef call_preprocess(preprocess_fun, *args):\n    r\"\"\"For method registration, define the preprocessing function and its' parameters.\n\n    .. warning::\n        'call_preprocess' is deprecated from version 1.5.0 and will be removed in a future version, use\n        :class:`mindspore_serving.server.register.add_stage` instead.\n\n    Note:\n        The length of 'args' should be equal to the inputs number of preprocess_fun.\n\n    Args:\n        preprocess_fun (function): Python function for preprocess.\n        args: Preprocess inputs. The length of 'args' should equal to the input parameters number\n            of implemented python function.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid, or other error happened.\n\n    Examples:\n        >>> from mindspore_serving.server import register\n        >>> import numpy as np\n        >>> def add_trans_datatype(x1, x2):\n        ...     return x1.astype(np.float32), x2.astype(np.float32)\n        >>>\n        >>> register.declare_servable(servable_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n        >>>\n        >>> @register.register_method(output_names=[\"y\"]) # register add_cast method in add\n        >>> def add_cast(x1, x2):\n        ...     x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)  # cast input to float32\n        ...     y = register.call_servable(x1, x2)\n        ...     return y\n    \"\"\"\n    global method_def_context_\n    global has_called_preprocess_, has_called_servable_, has_called_postprocess_\n    if has_called_preprocess_:\n        raise RuntimeError(f\"Check failed in method '{method_def_context_.method_name}', \"\n                           f\"call_preprocess or call_preprocess_pipeline should not be invoked more than once\")\n    if has_called_servable_:\n        raise RuntimeError(f\"Check failed in method '{method_def_context_.method_name}', \"\n                           f\"call_servable should be invoked after call_preprocess\")\n    if has_called_postprocess_:\n        raise RuntimeError(f\"Check failed in method '{method_def_context_.method_name}', \"\n                           f\"call_postprocess or call_postprocess_pipeline should be invoked after call_preprocess\")\n    has_called_preprocess_ = True\n    outputs_count = _get_stage_outputs_count('call_preprocess')\n    return add_stage(preprocess_fun, *args, outputs_count=outputs_count, tag=\"Preprocess\")\n\n\n@deprecated(\"1.5.0\", \"mindspore_serving.server.register.add_stage\")\ndef call_preprocess_pipeline(preprocess_fun, *args):\n    r\"\"\"For method registration, define the preprocessing pipeline function and its' parameters.\n\n    .. warning::\n        'call_preprocess_pipeline' is deprecated from version 1.5.0 and will be removed in a future version, use\n        :class:`mindspore_serving.server.register.add_stage` instead.\n\n    A single request can include multiple instances, so multiple queued requests will also have multiple instances.\n    If you need to process multiple instances through multi thread or other parallel processing capability\n    in `preprocess` or `postprocess`, such as using MindData concurrency ability to process multiple input\n    images in `preprocess`, MindSpore Serving provides 'call_preprocess_pipeline' and 'call_postprocess_pipeline'\n    to register such preprocessing and postprocessing. For more detail,\n    please refer to `Resnet50 model configuration example\n    <https://gitee.com/mindspore/serving/blob/master/example/resnet/resnet50/servable_config.py>`_.\n\n    Args:\n        preprocess_fun (function): Python pipeline function for preprocess.\n        args: Preprocess inputs. The length of 'args' should equal to the input parameters number\n            of implemented python function.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid, or other error happened.\n\n    Examples:\n        >>> from mindspore_serving.server import register\n        >>> import numpy as np\n        >>> def add_trans_datatype(instances):\n        ...     for instance in instances:\n        ...         x1 = instance[0]\n        ...         x2 = instance[0]\n        ...         yield x1.astype(np.float32), x2.astype(np.float32)\n        >>>\n        >>> register.declare_servable(servable_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n        >>>\n        >>> @register.register_method(output_names=[\"y\"]) # register add_cast method in add\n        >>> def add_cast(x1, x2):\n        ...     x1, x2 = register.call_preprocess_pipeline(add_trans_datatype, x1, x2)  # cast input to float32\n        ...     y = register.call_servable(x1, x2)\n        ...     return y\n    \"\"\"\n    global method_def_context_\n    global has_called_preprocess_, has_called_servable_, has_called_postprocess_\n    if has_called_preprocess_:\n        raise RuntimeError(f\"Check failed in method '{method_def_context_.method_name}', \"\n                           f\"call_preprocess or call_preprocess_pipeline should not be invoked more than once\")\n    if has_called_servable_:\n        raise RuntimeError(f\"Check failed in method '{method_def_context_.method_name}', \"\n                           f\"call_servable should be invoked after call_preprocess_pipeline\")\n    if has_called_postprocess_:\n        raise RuntimeError(f\"Check failed in method '{method_def_context_.method_name}', call_postprocess \"\n                           f\"or call_postprocess_pipeline should be invoked after call_preprocess_pipeline\")\n    has_called_preprocess_ = True\n    outputs_count = _get_stage_outputs_count('call_preprocess_pipeline')\n    return add_stage(preprocess_fun, *args, outputs_count=outputs_count, batch_size=0, tag=\"Preprocess\")\n\n\n@deprecated(\"1.5.0\", \"mindspore_serving.server.register.add_stage\")\ndef call_postprocess(postprocess_fun, *args):\n    r\"\"\"For method registration, define the postprocessing function and its' parameters.\n\n    .. warning::\n        'call_postprocess' is deprecated from version 1.5.0 and will be removed in a future version, use\n        :class:`mindspore_serving.server.register.add_stage` instead.\n\n    Note:\n        The length of 'args' should be equal to the inputs number of postprocess_fun.\n\n    Args:\n        postprocess_fun (function): Python function for postprocess.\n        args: Preprocess inputs. The length of 'args' should equal to the input parameters number\n            of implemented python function.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid, or other error happened.\n    \"\"\"\n    global method_def_context_\n    global has_called_postprocess_\n    if has_called_postprocess_:\n        raise RuntimeError(f\"Check failed in method '{method_def_context_.method_name}', \"\n                           f\"call_postprocess or call_postprocess_pipeline should not be invoked more than once\")\n    has_called_postprocess_ = True\n    outputs_count = _get_stage_outputs_count('call_postprocess')\n    return add_stage(postprocess_fun, *args, outputs_count=outputs_count, tag=\"Postprocess\")\n\n\n@deprecated(\"1.5.0\", \"mindspore_serving.server.register.add_stage\")\ndef call_postprocess_pipeline(postprocess_fun, *args):\n    r\"\"\"For method registration, define the postprocessing pipeline function and its' parameters.\n\n    .. warning::\n        'call_postprocess_pipeline' is deprecated from version 1.5.0 and will be removed in a future version, use\n        :class:`mindspore_serving.server.register.add_stage` instead.\n\n    A single request can include multiple instances, so multiple queued requests will also have multiple instances.\n    If you need to process multiple instances through multi thread or other parallel processing capability\n    in `preprocess` or `postprocess`, such as using MindData concurrency ability to process multiple input\n    images in `preprocess`, MindSpore Serving provides 'call_preprocess_pipeline' and 'call_postprocess_pipeline'\n    to register such preprocessing and postprocessing. For more detail,\n    please refer to `Resnet50 model configuration example\n    <https://gitee.com/mindspore/serving/blob/master/example/resnet/resnet50/servable_config.py>`_.\n\n    Args:\n        postprocess_fun (function): Python pipeline function for postprocess.\n        args: Preprocess inputs. The length of 'args' should equal to the input parameters number\n            of implemented python function.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid, or other error happened.\n    \"\"\"\n    global method_def_context_\n    global has_called_postprocess_\n    if has_called_postprocess_:\n        raise RuntimeError(f\"Check failed in method '{method_def_context_.method_name}', \"\n                           f\"call_postprocess or call_postprocess_pipeline should not be invoked more than once\")\n    has_called_postprocess_ = True\n    outputs_count = _get_stage_outputs_count('call_postprocess_pipeline')\n    return add_stage(postprocess_fun, *args, outputs_count=outputs_count, batch_size=0, tag=\"Postprocess\")\n\n\n@deprecated(\"1.5.0\", \"mindspore_serving.server.register.add_stage\")\ndef call_servable(*args):\n    r\"\"\"For method registration, define the inputs data of model inference.\n\n    .. warning::\n        'call_servable' is deprecated from version 1.5.0 and will be removed in a future version, use\n        :class:`mindspore_serving.server.register.add_stage` instead.\n\n    Note:\n        The length of 'args' should be equal to the inputs number of model.\n\n    Args:\n        args: Model's inputs, the length of 'args' should be equal to the inputs number of model.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid, or other error happened.\n\n    Examples:\n        >>> from mindspore_serving.server import register\n        >>> register.declare_servable(servable_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n        >>>\n        >>> @register.register_method(output_names=[\"y\"]) # register add_common method in add\n        >>> def add_common(x1, x2):\n        ...     y = register.call_servable(x1, x2)\n        ...     return y\n    \"\"\"\n    global method_def_context_\n    global has_called_servable_, has_called_postprocess_\n    method_name = method_def_context_.method_name\n    if has_called_servable_:\n        raise RuntimeError(f\"Check failed in method '{method_name}', \"\n                           f\"call_servable should not be invoked more than once\")\n    if has_called_postprocess_:\n        raise RuntimeError(f\"Check failed in method '{method_name}', \"\n                           f\"call_postprocess or call_postprocess_pipeline should be invoked after call_servable\")\n    has_called_servable_ = True\n\n    if not g_declared_models:\n        raise RuntimeError(f\"There is no model declared, you can use declare_model to declare models.\")\n    outputs_count = _get_stage_outputs_count(\"call_servable\")\n    if len(g_declared_models) == 1:\n        model = g_declared_models[0]\n    else:\n        raise RuntimeError(\n            f\"There are more than one servable declared when the interface 'call_servable' is used, use 'add_stage'\"\n            f\" to replace 'call_servable'\")\n    return add_stage(model, *args, outputs_count=outputs_count)\n\n\ndef add_stage(stage, *args, outputs_count, batch_size=None, tag=None):\n    r\"\"\"In the `servable_config.py` file of one servable, we use `register_method` to wrap a Python function to define\n    a `method` of the servable, and `add_stage` is used to define a stage of this `method`, which can be a Python\n    function or a model.\n\n    Note:\n        The length of 'args' should be equal to the inputs number of function or model.\n\n    Args:\n        stage (Union(function, Model)): User-defined python function or `Model` object return by declare_model.\n        outputs_count (int): Outputs count of the user-defined python function or model.\n        batch_size (int, optional): This parameter is valid only when stage is a function and the function\n            can process multi instances at a time. default ``None``.\n\n            - ``None``, The input of the function will be the inputs of one instance.\n            - ``0``, The input of the function will be tuple object of instances, and the maximum number\n              of the instances is determined by the server based on the batch size of models.\n            - int value >= 1, The input of the function will be tuple object of instances, and the maximum number\n              of the instances is the value specified by 'batch_size'.\n\n        args: Stage inputs placeholders, which come from the inputs of the function wrapped by register_method or the\n            outputs of add_stage. The length of 'args' should equal to the input number of the function or model.\n        tag (str, optional): Customized flag of the stage, such as ``\"Preprocess\"``, default ``None``.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid, or other error happened.\n\n    Examples:\n        >>> import numpy as np\n        >>> from mindspore_serving.server import register\n        >>> add_model = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n        >>>\n        >>> def preprocess(x1, x2):\n        ...     return x1.astype(np.float32), x2.astype(np.float32)\n        >>>\n        >>> @register.register_method(output_names=[\"y\"]) # register add_common method in add\n        >>> def add_common(x1, x2):\n        ...     x1, x2 = register.add_stage(preprocess, x1, x2, outputs_count=2) # call preprocess in stage 1\n        ...     y = register.add_stage(add_model, x1, x2, outputs_count=1) # call add model in stage 2\n        ...     return y\n    \"\"\"\n    global method_def_context_\n    global cur_stage_index_\n    method_name = method_def_context_.method_name\n    if tag is not None:\n        check_type.check_str(\"tag\", tag)\n    else:\n        tag = \"\"\n    for item in args:\n        if not isinstance(item, _TensorDef):\n            raise RuntimeError(f\"Each value of parameter *args is a placeholder for data and must come from the method\"\n                               f\" inputs or the outputs of add_stage\")\n    func_inputs = [item.as_pair() for item in args]\n\n    inputs_count = len(args)\n    if isinstance(stage, Model):\n        if stage not in g_declared_models:\n            raise RuntimeError(\n                f\"Check failed in method '{method_name}', the parameter 'stage' of add_stage must be function \"\n                f\"or Model returned by declare_model, and ensure that interface 'declare_model' can take effect \"\n                f\"when importing servable_config.py by the serving server\")\n        model = stage\n        model_key = model.model_key\n        ServableRegister_.register_model_input_output_info(model_key, inputs_count, outputs_count, 0)\n        method_def_context_.add_stage_model(model_key, func_inputs, 0, tag)\n    elif inspect.isfunction(stage):\n        if batch_size is None:\n            register_stage_function(method_name, _wrap_fun_to_batch(stage, inputs_count),\n                                    inputs_count=inputs_count, outputs_count=outputs_count, use_with_size=False)\n            batch_size = 0\n        else:\n            check_type.check_int(\"batch_size\", batch_size, 0)\n            register_stage_function(method_name, stage, inputs_count=inputs_count, outputs_count=outputs_count,\n                                    use_with_size=True)\n        func_name = get_servable_dir() + \".\" + get_func_name(stage)\n        method_def_context_.add_stage_function(func_name, func_inputs, batch_size, tag)\n    else:\n        if not isinstance(stage, str):\n            raise RuntimeError(\n                f\"Check failed in method '{method_name}', the parameter 'stage' of add_stage must be function \"\n                f\"or Model returned by declare_model, now is {type(stage)}\")\n        func_name = stage\n        check_stage_function(method_name, func_name, inputs_count=inputs_count, outputs_count=outputs_count)\n        method_def_context_.add_stage_function(func_name, func_inputs, 0, tag)\n\n    cur_stage_index_ += 1  # call_xxx stage index start begin 1\n    return _create_tensor_def_outputs(cur_stage_index_, outputs_count)\n\n\n_call_servable_name = call_servable.__name__\n_call_stage_names = [call_preprocess.__name__, call_postprocess.__name__]\n_call_stage_batch_names = [call_preprocess_pipeline.__name__, call_postprocess_pipeline.__name__]\n\n\ndef _ast_node_info(method_def_func, ast_node):\n    \"\"\"Ast node code info\"\"\"\n    func_name = method_def_func.__name__\n    func_codes = inspect.getsource(method_def_func)\n    func_codes_lines = func_codes.split(\"\\n\")\n    _, start_lineno = inspect.findsource(method_def_func)\n\n    codes = \"\"\n    if hasattr(ast_node, \"end_lineno\"):\n        end_lineno = ast_node.end_lineno\n    else:\n        end_lineno = ast_node.lineno\n    for line in range(ast_node.lineno, end_lineno + 1):\n        codes += func_codes_lines[line - 1] + \"\\n\"\n    lineno = ast_node.lineno + start_lineno\n    end_lineno = end_lineno + start_lineno\n    if lineno != end_lineno:\n        line_info = f\"{lineno}~{end_lineno}\"\n    else:\n        line_info = f\"{lineno}\"\n    return f\"line {line_info} in {func_name}, code: \\n\" + codes\n\n\ndef _get_method_def_stage_meta(method_def_func):\n    \"\"\"Parse register_method func, and get the input and output count of preprocess, servable and postprocess\"\"\"\n    source = inspect.getsource(method_def_func)\n    method_name = method_def_func.__name__\n    call_list = ast.parse(source).body[0].body\n    func_meta = {}\n    code_infos = []\n    code_other = None\n\n    def update_other_code(code):\n        nonlocal code_other\n        if not code_other:\n            code_other = code\n\n    for call_item in call_list:\n        if isinstance(call_item, ast.Return):\n            continue\n        if isinstance(call_item, ast.Expr):\n            continue\n        if not isinstance(call_item, ast.Assign):\n            update_other_code(call_item)\n            continue\n        target = call_item.targets[0]\n        if isinstance(target, ast.Name):\n            outputs_count = 1\n        elif isinstance(target, ast.Tuple):\n            outputs_count = len(target.elts)\n        else:\n            continue\n\n        call = call_item.value\n        if not isinstance(call, ast.Call):\n            continue\n        func = call.func\n        if isinstance(func, ast.Attribute):\n            func_name = func.attr\n        elif isinstance(func, ast.Name):\n            func_name = func.id\n        else:\n            update_other_code(call_item)\n            continue\n\n        inputs_count = len(call.args)\n        if func_name in _call_stage_names or func_name in _call_stage_batch_names:\n            inputs_count -= 1\n        elif func_name == _call_servable_name:\n            pass\n        else:\n            update_other_code(call_item)\n            continue\n        if inputs_count <= 0:\n            raise RuntimeError(f\"Invalid '{func_name}' invoke args\")\n\n        logger.info(f\"stage {len(func_meta) + 1} call type '{func_name}', inputs count {inputs_count}, \"\n                    f\"outputs count {outputs_count}\")\n        func_meta[func_name] = [inputs_count, outputs_count]\n        code_infos.append([call_item, func_name])\n    if code_infos and code_other:\n        call_names = [item[1] for item in code_infos]\n        call_names = \";\".join(call_names)\n        raise RuntimeError(\n            f\"Failed to parse method '{method_name}', complex statements such as conditions and loops are not supported\"\n            f\" in register_method when the interface '{call_names}' is used, use 'add_stage' to replace '{call_names}',\"\n            f\" code {type(code_other)}: {_ast_node_info(method_def_func, code_other)}\")\n\n    if code_infos and _call_servable_name not in func_meta:\n        raise RuntimeError(f\"Not find the invoke of '{_call_servable_name}'\")\n    return func_meta\n\n\ndef register_method(output_names):\n    \"\"\"Define a method of the servable when importing servable_config.py of one servable. One servable can include one\n    or more methods, and eache method provides different services base on models. A client needs to specify the\n    servable name and method name when accessing one service. MindSpore Serving supports a service consisting of\n    multiple python functions and multiple models.\n\n    Note:\n        This interface should take effect when importing servable_config.py by the serving server. Therefore, it's\n        recommended that this interface be used globally in servable_config.py.\n\n    This interface will define the signatures and pipeline of the method.\n\n    The signatures include the method name, input and outputs names of the method. When accessing a service, the client\n    needs to specify the servable name, the method name, and provide one or more inference instances. Each instance\n    specifies the input data by the input names and obtains the output data by the outputs names.\n\n    The pipeline consists of one or more stages, each stage can be a python function or a model. This is, a pipline can\n    include one or more python functions and one or more models. In addition, the interface also defines the data flow\n    of these stages.\n\n    Args:\n        output_names (Union[str, tuple[str], list[str]]): The output names of method. The input names is\n            the args names of the registered function.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid, or other error happened.\n\n    Examples:\n        >>> from mindspore_serving.server import register\n        >>> add_model = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n        >>> sub_model = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\")\n        >>>\n        >>> @register.register_method(output_names=[\"y\"]) # register predict method in servable\n        >>> def predict(x1, x2, x3): # x1+x2-x3\n        ...     y = register.add_stage(add_model, x1, x2, outputs_count=1)\n        ...     y = register.add_stage(sub_model, y, x3, outputs_count=1)\n        ...     return y\n    \"\"\"\n    output_names = check_type.check_and_as_str_tuple_list('output_names', output_names)\n\n    def register(func):\n        name = get_func_name(func)\n        sig = inspect.signature(func)\n        input_names = []\n        for k, v in sig.parameters.items():\n            if v.kind == inspect.Parameter.VAR_POSITIONAL:\n                raise RuntimeError(f\"'{name}' input {k} cannot be VAR_POSITIONAL !\")\n            if v.kind == inspect.Parameter.VAR_KEYWORD:\n                raise RuntimeError(f\"'{name}' input {k} cannot be VAR_KEYWORD !\")\n            if v.kind == inspect.Parameter.KEYWORD_ONLY:\n                raise RuntimeError(f\"'{name}' input {k} cannot be KEYWORD_ONLY !\")\n            input_names.append(k)\n\n        input_tensors = []\n        for i in range(len(input_names)):\n            input_tensors.append(_TensorDef(0, i))\n\n        servable_name = get_servable_dir()\n        global method_def_context_\n        method_def_context_ = MethodSignature_()\n        method_def_context_.servable_name = servable_name\n        method_def_context_.method_name = name\n        method_def_context_.inputs = input_names\n        method_def_context_.outputs = output_names\n\n        global method_def_ast_meta_\n        method_def_ast_meta_ = _get_method_def_stage_meta(func)\n        global cur_stage_index_\n        cur_stage_index_ = 0\n\n        global has_called_preprocess_, has_called_servable_, has_called_postprocess_\n        has_called_preprocess_ = False\n        has_called_servable_ = False\n        has_called_postprocess_ = False\n\n        output_tensors = func(*tuple(input_tensors))\n        if method_def_ast_meta_ and cur_stage_index_ != len(method_def_ast_meta_):\n            raise RuntimeError(f\"Failed to parse method {name}, the number of stages obtained through the AST \"\n                               f\"{len(method_def_ast_meta_)} is inconsistent with the running result {cur_stage_index_}\"\n                               f\". Condition and loop statements are not supported in methods currently.\")\n\n        if isinstance(output_tensors, _TensorDef):\n            output_tensors = (output_tensors,)\n\n        for item in output_tensors:\n            if not isinstance(item, _TensorDef):\n                raise RuntimeError(f\"Each value returned is a placeholder for data and must come from the method\"\n                                   f\" inputs or the outputs of add_stage\")\n\n        if len(output_tensors) != len(output_names):\n            raise RuntimeError(\n                f\"Method return output size {len(output_tensors)} not match registered {len(output_names)}\")\n\n        return_inputs = [item.as_pair() for item in output_tensors]\n        method_def_context_.set_return(return_inputs)\n        logger.info(f\"Register method: method_name {method_def_context_.method_name}, \"\n                    f\"inputs: {input_names}, outputs: {output_names}\")\n\n        ServableRegister_.register_method(method_def_context_)\n        return func\n\n    return register\n"
  },
  {
    "path": "mindspore_serving/server/register/model.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Servable declaration interface\"\"\"\n\nfrom mindspore_serving._mindspore_serving import ModelMeta_, ServableRegister_, ModelContext_\n\nfrom mindspore_serving import log as logger\nfrom mindspore_serving.server.common import check_type, deprecated\nfrom .utils import get_servable_dir\n\ng_declared_models = []\n\n\n@deprecated(\"1.5.0\", \"mindspore_serving.server.register.declare_model\")\ndef declare_servable(servable_file, model_format, with_batch_dim=True, options=None, without_batch_dim_inputs=None):\n    r\"\"\"\n    declare one model.\n\n    .. warning::\n        'register.declare_servable' is deprecated from version 1.5.0 and will be removed in a future version, use\n        :class:`mindspore_serving.server.register.declare_model` instead.\n\n    Args:\n        servable_file (Union[str, list[str]]): Model files name.\n        model_format (str): Model format, ``\"OM\"`` or ``\"MindIR\"``, case ignored.\n        with_batch_dim (bool, optional): Whether the first shape dim of the inputs and outputs of model is batch dim.\n            Default: ``True``.\n        options (Union[AclOptions, GpuOptions], optional): Options of model, supports AclOptions or GpuOptions.\n            Default: ``None``.\n        without_batch_dim_inputs (Union[int, tuple[int], list[int]], optional): Index of inputs that without batch\n            dim when `with_batch_dim` is ``True``. Default: ``None``.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid.\n\n    Return:\n        Model, identification of this model, used as input of add_stage.\n    \"\"\"\n    return declare_model(servable_file, model_format, with_batch_dim, options, without_batch_dim_inputs)\n\n\nclass Model:\n    \"\"\"Indicate a model. User should not construct Model object directly, it's need to be returned from `declare_model`\n    or `declare_servable`\n\n    Args:\n        model_key (str): Model key identifies the model.\n    \"\"\"\n\n    def __init__(self, model_key):\n        self.model_key = model_key\n\n    def call(self, *args, subgraph=0):\n        r\"\"\"Invoke the model inference interface based on instances.\n\n        Args:\n            args : tuple/list of instances, or inputs of one instance.\n            subgraph (int, optional): Subgraph index, used when there are multiply sub-graphs in one model.\n                Default: ``0``.\n\n        Return:\n            Tuple of instances when input parameter 'args' is tuple/list, or outputs of one instance.\n\n        Raises:\n            RuntimeError: Inputs are invalid.\n\n        Examples:\n            >>> import numpy as np\n            >>> from mindspore_serving.server import register\n            >>> import mindspore.dataset.vision.c_transforms as VC\n            >>> model = register.declare_model(model_file=\"resnet_bs32.mindir\", model_format=\"MindIR\") # batch_size=32\n            >>>\n            >>> def preprocess(image):\n            ...     decode = VC.Decode()\n            ...     resize = VC.Resize([224, 224])\n            ...     normalize = VC.Normalize(mean=[125.307, 122.961, 113.8575], std=[51.5865, 50.847, 51.255])\n            ...     hwc2chw = VC.HWC2CHW()\n            ...     image = decode(image)\n            ...     image = resize(image) # [3,224,224]\n            ...     image = normalize(image) # [3,224,224]\n            ...     image = hwc2chw(image) # [3,224,224]\n            ...     return input\n            >>>\n            >>> def postprocess(score):\n            >>>     return np.argmax(score)\n            >>>\n            >>> def call_resnet_model(image):\n            ...     image = preprocess(image)\n            ...     score = model.call(image)  # for only one instance\n            ...     return postprocess(score)\n            >>>\n            >>> def call_resnet_model_batch(instances):\n            ...     input_instances = []\n            ...     for instance in instances:\n            ...         image = instance[0] # only one input\n            ...         image = preprocess(image) # [3,224,224]\n            ...         input_instances.append([image])\n            ...     output_instances = model.call(input_instances)  # for multiply instances\n            ...     for instance in output_instances:\n            ...         score = instance[0]  # only one output for each instance\n            ...         index = postprocess(score)\n            ...         yield index\n            >>>\n            >>> @register.register_method(output_names=[\"index\"])\n            >>> def predict_v1(image):  # without pipeline, call model with only one instance a time\n            ...     index = register.add_stage(call_resnet_model, image, outputs_count=1)\n            ...     return index\n            >>>\n            >>> @register.register_method(output_names=[\"index\"])\n            >>> def predict_v2(image):  # without pipeline, call model with maximum 32 instances a time\n            ...     index = register.add_stage(call_resnet_model_batch, image, outputs_count=1, batch_size=32)\n            ...     return index\n            >>>\n            >>> @register.register_method(output_names=[\"index\"])\n            >>> def predict_v3(image):  # pipeline\n            ...     image = register.add_stage(preprocess, image, outputs_count=1)\n            ...     score = register.add_stage(model, image, outputs_count=1)\n            ...     index = register.add_stage(postprocess, score, outputs_count=1)\n            ...     return index\n        \"\"\"\n        check_type.check_int(\"subgraph\", subgraph, 0)\n        subgraph_str = \"\"\n        if subgraph != 0:\n            subgraph_str = \" ,subgraph=\" + str(subgraph)\n        if not args:\n            raise RuntimeError(f\"Model({self.model_key}{subgraph_str}).call() failed: no inputs provided, the inputs \"\n                               f\"can be call(x1, x2) for single instance or call([[x1, x2], [x1, x2]]) for multi \"\n                               f\"instances.\")\n        instances = []\n        instance_format = False\n        if len(args) == 1 and isinstance(args[0], (tuple, list)):\n            instance_format = True\n            inputs = args[0]\n            for instance in inputs:\n                if not isinstance(instance, (tuple, list)):\n                    raise RuntimeError(f\"Model({self.model_key}{subgraph_str}).call() failed: inputs format invalid, \"\n                                       f\"the inputs can be call(x1, x2) for single instance or \"\n                                       f\" call([[x1, x2], [x1, x2]]) for multi instances.\")\n                instances.append(tuple(instance))\n        else:\n            instances.append(tuple(args))\n\n        output = ServableRegister_.run(self.model_key, tuple(instances), subgraph)\n        if not instance_format:\n            output = output[0]\n            if len(output) == 1:\n                return output[0]\n            return output\n        return output\n\n\ndef append_declared_model(model_key):\n    global g_declared_models\n    model = Model(model_key)\n    g_declared_models.append(model)\n    return model\n\n\ndef declare_model(model_file, model_format, with_batch_dim=True, options=None, without_batch_dim_inputs=None,\n                  context=None, config_file=None):\n    r\"\"\"\n    Declare one model when importing servable_config.py of one servable.\n\n    Note:\n        This interface should take effect when importing servable_config.py by the serving server. Therefore, it's\n        recommended that this interface be used globally in servable_config.py.\n\n    .. warning::\n        The parameter 'options' is deprecated from version 1.6.0 and will be removed in a future version, use\n        parameter 'context' instead.\n\n    Args:\n        model_file (Union[str, list[str]]): Model files name.\n        model_format (str): Model format, ``\"MindIR\"`` or ``\"MindIR_Lite\"``, case ignored.\n        with_batch_dim (bool, optional): Whether the first shape dim of the inputs and outputs of model is batch dim.\n            Default: ``True``.\n        options (Union[AclOptions, GpuOptions], optional): Options of model, supports AclOptions or GpuOptions.\n            Default: ``None``.\n        context (Context): Context is used to store environment variables during execution. If the value is ``None``,\n            Serving uses the default device context based on the deployed device. Default: ``None``.\n        without_batch_dim_inputs (Union[int, tuple[int], list[int]], optional): Index of inputs that without batch\n            dim when `with_batch_dim` is ``True``. For example, if the shape of input 0 does not include the\n            batch dimension, `without_batch_dim_inputs` can be set to `(0,)`. Default: ``None``.\n        config_file (str, optional): Config file for model to set mix precision inference. The file path can be an\n            absolute path or a relative path to the directory in which servable_config.py resides.\n            Default: ``None``.\n\n    Return:\n        Model, identification of this model, can be used for `Model.call` or as the inputs of `add_stage`.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid.\n    \"\"\"\n\n    check_type.check_bool('with_batch_dim', with_batch_dim)\n\n    meta = ModelMeta_()\n    model_file = check_type.check_and_as_str_tuple_list('model_file', model_file)\n    meta.common_meta.servable_name = get_servable_dir()\n    meta.common_meta.model_key = \";\".join(model_file)\n    meta.common_meta.with_batch_dim = with_batch_dim\n    if without_batch_dim_inputs:\n        without_batch_dim_inputs = check_type.check_and_as_int_tuple_list('without_batch_dim_inputs',\n                                                                          without_batch_dim_inputs, 0)\n        meta.common_meta.without_batch_dim_inputs = without_batch_dim_inputs\n\n    # init local servable meta info\n    check_type.check_str('model_format', model_format)\n    model_format = model_format.lower()\n    if model_format not in (\"om\", \"mindir\", \"mindir_opt\", \"mindir_lite\"):\n        raise RuntimeError(\"model format can only be OM, MindIR or MindIR_Lite, case ignored\")\n\n    meta.local_meta.model_file = model_file\n    meta.local_meta.set_model_format(model_format)\n\n    if context is not None:\n        if not isinstance(context, Context):\n            raise RuntimeError(f\"Parameter 'context' should be Context, but gotten {type(context)}\")\n        meta.local_meta.model_context = context.model_context\n    elif isinstance(options, (GpuOptions, AclOptions)):\n        logger.warning(\n            \"'options' will be deprecated in the future, we recommend using 'context', if these two parameters \"\n            \"are both set, options will be ignored\")\n        meta.local_meta.model_context = options.context.model_context\n    elif options is not None:\n        raise RuntimeError(f\"Parameter 'options' should be None, GpuOptions or AclOptions, but \"\n                           f\"gotten {type(options)}\")\n\n    if config_file is not None:\n        check_type.check_str(\"config_file\", config_file)\n        meta.local_meta.config_file = config_file\n\n    ServableRegister_.declare_model(meta)\n    logger.info(f\"Declare model, model_file: {model_file} , model_format: {model_format},  with_batch_dim: \"\n                f\"{with_batch_dim}, options: {options}, without_batch_dim_inputs: {without_batch_dim_inputs}\"\n                f\", context: {context}, config file: {config_file}\")\n\n    return append_declared_model(meta.common_meta.model_key)\n\n\nclass Context:\n    \"\"\"\n    Context is used to customize device configurations. If Context is not specified, MindSpore Serving uses the default\n    device configurations. When inference backend is MindSpore Lite and the device type is Ascend or Gpu, the extra\n    `CPUDeviceInfo` will be used.\n\n    Args:\n        thread_num (int, optional): Set the number of threads at runtime. Only valid when using mindspore lite.\n        thread_affinity_core_list (tuple[int], list[int], optional): Set the thread lists to CPU cores.\n            Only valid when inference backend is MindSpore Lite.\n        enable_parallel (bool, optional): Set the status whether to perform model inference or training in parallel.\n            Only valid when inference backend is MindSpore Lite.\n\n    Raises:\n        RuntimeError: type or value of input parameters are invalid.\n\n    Examples:\n            >>> from mindspore_serving.server import register\n            >>> import numpy as np\n            >>> context = register.Context(thread_num=1, thread_affinity_core_list=[1,2], enable_parallel=True)\n            >>> context.append_device_info(register.GPUDeviceInfo(precision_mode=\"fp16\"))\n            >>> model = declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", context=context)\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        self.model_context = ModelContext_()\n        val_set_fun = {\n            \"thread_num\": self._set_thread_num,\n            \"thread_affinity_core_list\": self._set_thread_affinity_core_list,\n            \"enable_parallel\": self._set_enable_parallel\n        }\n        for k, v in kwargs.items():\n            if k not in val_set_fun:\n                raise RuntimeError(\"Set context failed, unsupported option \" + k)\n            val_set_fun[k](v)\n        self.device_types = []\n\n    def append_device_info(self, device_info):\n        \"\"\"Append one user-defined device info to the context\n\n         Args:\n            device_info (Union[CPUDeviceInfo, GPUDeviceInfo, AscendDeviceInfo]): User-defined device info for one\n                device, otherwise default values are used. You can customize device info for each device, and the system\n                selects the required device info based on the actual backend device and MindSpore inference package.\n\n         Raises:\n            RuntimeError: type or value of input parameters are invalid.\n        \"\"\"\n        if not isinstance(device_info, DeviceInfoContext):\n            raise RuntimeError(f\"Parameter 'device_info' should instance of CPUDeviceInfo, GPUDeviceInfo, or \"\n                               f\"AscendDeviceInfo, but actually {type(device_info)}\")\n        # pylint: disable=protected-access\n        info_map = device_info._as_context_map()\n        if not info_map[\"device_type\"]:\n            raise RuntimeError(\"Invalid DeviceInfoContext, device_type cannot be empty\")\n        device_type = info_map[\"device_type\"]\n        if device_type in self.device_types:\n            raise RuntimeError(f\"Device info of type {device_type} has already been appended\")\n        self.device_types.append(device_type)\n        self.model_context.append_device_info(info_map)\n\n    def _set_thread_num(self, val):\n        check_type.check_int(\"thread_num\", val, 1)\n        self.model_context.thread_num = val\n\n    def _set_thread_affinity_core_list(self, val):\n        check_type.check_int_tuple_list(\"thread_affinity_core_list\", val, 0)\n        self.model_context.thread_affinity_core_list = val\n\n    def _set_enable_parallel(self, val):\n        check_type.check_bool(\"enable_parallel\", val)\n        if val:\n            self.model_context.enable_parallel = 1\n        else:\n            self.model_context.enable_parallel = 0\n\n    def __str__(self):\n        res = f\"thread_num: {self.model_context.thread_num}, thread_affinity_core_list: \" \\\n              f\"{self.model_context.thread_affinity_core_list}, enable_parallel: \" \\\n              f\"{self.model_context.enable_parallel}, device_list, {self.model_context.device_list}\"\n        return res\n\n\nclass DeviceInfoContext:\n    def __init__(self):\n        \"\"\" Initialize context\"\"\"\n\n    def _as_context_map(self):\n        \"\"\"Transfer device info to dict of str,str\"\"\"\n        raise NotImplementedError\n\n\nclass CPUDeviceInfo(DeviceInfoContext):\n    \"\"\"\n    Helper class to set cpu device info.\n\n    Args:\n        precision_mode(str, optional): Option of model precision, and the value can be ``\"origin\"``, ``\"fp16\"``.\n            ``\"origin\"`` indicates that inference is performed with the preciesion defined in the model, and\n            ``\"fp16\"`` indicates that inference is performed based on FP16 precision.\n            Default: ``\"origin\"``.\n\n    Raises:\n        RuntimeError: Cpu option is invalid, or value is not str.\n\n    Examples:\n        >>> from mindspore_serving.server import register\n        >>> context = register.Context()\n        >>> context.append_device_info(register.CPUDeviceInfo(precision_mode=\"fp16\"))\n        >>> model = register.declare_model(model_file=\"deeptext.ms\", model_format=\"MindIR_Lite\", context=context)\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super(CPUDeviceInfo, self).__init__()\n        self.precision_mode = \"\"\n        val_set_fun = {\"precision_mode\": self._set_precision_mode}\n        for k, w in kwargs.items():\n            if k not in val_set_fun:\n                raise RuntimeError(\"Set cpu device info failed, unsupported option \" + k)\n            val_set_fun[k](w)\n        self.context_map = self._as_context_map()\n\n    def _set_precision_mode(self, val):\n        check_type.check_str(\"precision_mode\", val)\n        if val not in (\"origin\", \"fp16\"):\n            raise RuntimeError(f\"Cpu device info 'precision_mode' can only be 'origin', 'fp16'. given '{val}'\")\n        self.precision_mode = val\n\n    def _as_context_map(self):\n        \"\"\"Transfer cpu device info to dict of str,str\"\"\"\n        context_map = {}\n        if self.precision_mode:\n            context_map[\"precision_mode\"] = self.precision_mode\n        context_map[\"device_type\"] = \"cpu\"\n        return context_map\n\n\nclass GPUDeviceInfo(DeviceInfoContext):\n    \"\"\"\n    Helper class to set gpu device info.\n\n    Args:\n        precision_mode(str, optional): Option of model precision, and the value can be ``\"origin\"``, ``\"fp16\"``.\n            ``\"origin\"`` indicates that inference is performed with the preciesion defined in the model, and\n            ``\"fp16\"`` indicates that inference is performed based on FP16 precision.\n            Default: ``\"origin\"``.\n\n    Raises:\n        RuntimeError: Gpu option is invalid, or value is not str.\n\n    Examples:\n        >>> from mindspore_serving.server import register\n        >>> context = register.Context()\n        >>> context.append_device_info(register.GPUDeviceInfo(precision_mode=\"fp16\"))\n        >>> model = register.declare_model(model_file=\"deeptext.mindir\", model_format=\"MindIR\", context=context)\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super(GPUDeviceInfo, self).__init__()\n        self.precision_mode = \"\"\n        val_set_fun = {\"precision_mode\": self._set_precision_mode}\n        for k, w in kwargs.items():\n            if k not in val_set_fun:\n                raise RuntimeError(\"Set gpu device info failed, unsupported option \" + k)\n            val_set_fun[k](w)\n        self.context_map = self._as_context_map()\n\n    def _set_precision_mode(self, val):\n        \"\"\"Set option 'precision_mode', which means inference operator selection, and the value can be \"origin\",\n        \"fp16\", default \"origin\".\n\n        Args:\n            val (str): Value of option 'precision_mode'. \"origin\" inference with model definition.\n            \"fp16\" enable FP16 operator selection, with FP32 fallback. Default: \"origin\".\n\n        Raises:\n            RuntimeError: The type of value is not str, or the value is invalid.\n        \"\"\"\n        check_type.check_str('precision_mode', val)\n        if val not in (\"origin\", \"fp16\"):\n            raise RuntimeError(f\"Gpu device info 'precision_mode' can only be 'origin', 'fp16'. given '{val}'\")\n        self.precision_mode = val\n\n    def _as_context_map(self):\n        \"\"\"Transfer gpu device info to dict of str,str\"\"\"\n        context_map = {}\n        if self.precision_mode:\n            context_map[\"precision_mode\"] = self.precision_mode\n        context_map[\"device_type\"] = \"gpu\"\n        return context_map\n\n\nclass AscendDeviceInfo(DeviceInfoContext):\n    \"\"\"\n    Helper class to set Ascend device infos.\n\n    Args:\n        insert_op_cfg_path (str, optional): Path of aipp config file.\n        input_format (str, optional): Manually specify the model input format, the value can be ``\"ND\"``, ``\"NCHW\"``,\n            ``\"NHWC\"``, ``\"CHWN\"``, ``\"NC1HWC0\"``, or ``\"NHWC1C0\"``.\n        input_shape (str, optional): Manually specify the model input shape, such as\n            ``\"input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1\"``.\n        output_type (str, optional): Manually specify the model output type, the value can be ``\"FP16\"``, ``\"UINT8\"``\n            or ``\"FP32\"``. Default: ``\"FP32\"``.\n        precision_mode (str, optional): Model precision mode, the value can be ``\"force_fp16\"``,\n            ``\"allow_fp32_to_fp16\"``, ``\"must_keep_origin_dtype\"`` or ``\"allow_mix_precision\"``.\n            Default: ``\"force_fp16\"``.\n        op_select_impl_mode (str, optional): The operator selection mode, the value can be ``\"high_performance\"`` or\n            ``\"high_precision\"``. Default: ``\"high_performance\"``.\n        fusion_switch_config_path (str, optional): Configuration file path of the convergence rule, including graph\n             convergence and UB convergence. The system has built-in graph convergence and UB convergence rules, which\n             are enableed by default. You can disable the rules specified in the file by setting this parameter.\n        buffer_optimize_mode (str, optional): The value can be ``\"l1_optimize\"``, ``\"l2_optimize\"``,\n            ``\"off_optimize\"`` or ``\"l1_and_l2_optimize\"``. Default: ``\"l2_optimize\"``.\n    Raises:\n        RuntimeError: Ascend device info is invalid.\n\n    Examples:\n        >>> from mindspore_serving.server import register\n        >>> context = register.Context()\n        >>> context.append_device_info(register.AscendDeviceInfo(input_format=\"NCHW\"))\n        >>> model = register.declare_model(model_file=\"deeptext.ms\", model_format=\"MindIR_Lite\", context=context)\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super(AscendDeviceInfo, self).__init__()\n        self.insert_op_cfg_path = \"\"\n        self.input_format = \"\"\n        self.input_shape = \"\"\n        self.output_type = \"\"\n        self.precision_mode = \"\"\n        self.op_select_impl_mode = \"\"\n        self.fusion_switch_config_path = \"\"\n        self.buffer_optimize_mode = \"\"\n        val_set_fun = {\"insert_op_cfg_path\": self._set_insert_op_cfg_path,\n                       \"input_format\": self._set_input_format,\n                       \"input_shape\": self._set_input_shape,\n                       \"output_type\": self._set_output_type,\n                       \"precision_mode\": self._set_precision_mode,\n                       \"op_select_impl_mode\": self._set_op_select_impl_mode,\n                       \"fusion_switch_config_path\": self._set_fusion_switch_config_path,\n                       \"buffer_optimize_mode\": self._set_buffer_optimize_mode}\n\n        for k, w in kwargs.items():\n            if k not in val_set_fun:\n                raise RuntimeError(\"Set ascend device info failed, unsupported parameter \" + k)\n            val_set_fun[k](w)\n        self.context_map = self._as_context_map()\n\n    def _set_insert_op_cfg_path(self, val):\n        \"\"\"Set option 'insert_op_cfg_path'\n\n        Args:\n            val (str): Value of option 'insert_op_cfg_path'.\n\n        Raises:\n            RuntimeError: The type of value is not str.\n        \"\"\"\n        check_type.check_str('insert_op_cfg_path', val)\n        self.insert_op_cfg_path = val\n\n    def _set_input_format(self, val):\n        \"\"\"Set option 'input_format', manually specify the model input format, and the value can be\n        \"ND\", \"NCHW\", \"NHWC\", \"CHWN\", \"NC1HWC0\", or \"NHWC1C0\".\n\n        Args:\n            val (str): Value of option 'input_format', and the value can be \"ND\", \"NCHW\", \"NHWC\",\n                \"CHWN\", \"NC1HWC0\", or \"NHWC1C0\".\n\n        Raises:\n            RuntimeError: The type of value is not str, or the value is invalid.\n        \"\"\"\n        check_type.check_str('input_format', val)\n        if val not in (\"ND\", \"NCHW\", \"NHWC\", \"CHWN\", \"NC1HWC0\", \"NHWC1C0\"):\n            raise RuntimeError(f\"Ascend device info 'input_format' can only be 'ND', 'NCHW', 'NHWC', 'CHWN', 'NC1HWC0'\"\n                               f\", or 'NHWC1C0', actually given '{val}'\")\n        self.input_format = val\n\n    def _set_input_shape(self, val):\n        \"\"\"Set option 'input_shape', manually specify the model input shape, such as\n        \"input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1\".\n\n        Args:\n            val (str): Value of option 'input_shape'.\n\n        Raises:\n            RuntimeError: The type of value is not str, or the value is invalid.\n        \"\"\"\n        check_type.check_str('input_shape', val)\n        self.input_shape = val\n\n    def _set_output_type(self, val):\n        \"\"\"Set option 'output_type', manually specify the model output type, and the value can be \"FP16\", \"UINT8\", or\n        \"FP32\", default \"FP32\".\n\n        Args:\n            val (str): Value of option 'output_type', and the value can be \"FP16\", \"UINT8\", or \"FP32\", default \"FP32\".\n\n        Raises:\n            RuntimeError: The type of value is not str, or the value is invalid.\n        \"\"\"\n        check_type.check_str('output_type', val)\n        if val not in (\"FP32\", \"FP16\", \"UINT8\"):\n            raise RuntimeError(f\"Ascend device info 'op_select_impl_mode' can only be 'FP32'(default), 'FP16' or \"\n                               f\"'UINT8', actually given '{val}'\")\n        self.output_type = val\n\n    def _set_precision_mode(self, val):\n        \"\"\"Set option 'precision_mode',  which means operator selection mode, and the value can be \"force_fp16\"，\n        \"force_fp16\", \"must_keep_origin_dtype\", or \"allow_mix_precision\", default \"force_fp16\".\n\n        Args:\n            val (str): Value of option 'precision_mode', and the value can be \"force_fp16\"， \"force_fp16\",\n                \"must_keep_origin_dtype\", or \"allow_mix_precision\", default \"force_fp16\".\n\n        Raises:\n            RuntimeError: The type of value is not str, or the value is invalid.\n        \"\"\"\n        check_type.check_str('precision_mode', val)\n        if val not in (\"force_fp16\", \"allow_fp32_to_fp16\", \"must_keep_origin_dtype\", \"allow_mix_precision\"):\n            raise RuntimeError(f\"Ascend device info 'precision_mode' can only be 'force_fp16'(default), \"\n                               f\"'allow_fp32_to_fp16' 'must_keep_origin_dtype' or 'allow_mix_precision', \"\n                               f\"actually given '{val}'\")\n        self.precision_mode = val\n\n    def _set_op_select_impl_mode(self, val):\n        \"\"\"Set option 'op_select_impl_mode', which means model precision mode, and the value can be \"high_performance\"\n        or \"high_precision\",  default \"high_performance\".\n\n        Args:\n            val (str): Value of option 'op_select_impl_mode'，which can be \"high_performance\" or \"high_precision\",\n                default \"high_performance\".\n\n        Raises:\n            RuntimeError: The type of value is not str, or the value is invalid.\n        \"\"\"\n        check_type.check_str('op_select_impl_mode', val)\n        if val not in (\"high_performance\", \"high_precision\"):\n            raise RuntimeError(f\"Ascend device info 'op_select_impl_mode' can only be 'high_performance'(default) or \"\n                               f\"'high_precision', actually given '{val}'\")\n        self.op_select_impl_mode = val\n\n    def _set_fusion_switch_config_path(self, val):\n        check_type.check_str('fusion_switch_config_path', val)\n        self.fusion_switch_config_path = val\n\n    def _set_buffer_optimize_mode(self, val):\n        check_type.check_str('buffer_optimize_mode', val)\n        if val not in (\"l1_optimize\", \"l2_optimize\", \"off_optimize\", \"l1_and_l2_optimize\"):\n            raise RuntimeError(f\"Ascend device info 'buffer_optimize_mode' can only be 'off_optimize'(default), \"\n                               f\"'l1_optimize', 'l2_optimize' or 'l1_and_l2_optimize', actually given '{val}'\")\n        self.buffer_optimize_mode = val\n\n    def _as_context_map(self):\n        \"\"\"Transfer acl device info to dict of str,str\"\"\"\n        context_map = {}\n        if self.insert_op_cfg_path:\n            context_map[\"insert_op_cfg_path\"] = self.insert_op_cfg_path\n        if self.input_format:\n            context_map[\"input_format\"] = self.input_format\n        if self.input_shape:\n            context_map[\"input_shape\"] = self.input_shape\n        if self.output_type:\n            context_map[\"output_type\"] = self.output_type\n        if self.precision_mode:\n            context_map[\"precision_mode\"] = self.precision_mode\n        if self.op_select_impl_mode:\n            context_map[\"op_select_impl_mode\"] = self.op_select_impl_mode\n        if self.buffer_optimize_mode:\n            context_map[\"buffer_optimize_mode\"] = self.buffer_optimize_mode\n        if self.fusion_switch_config_path:\n            context_map[\"fusion_switch_config_path\"] = self.fusion_switch_config_path\n        context_map[\"device_type\"] = \"ascend\"\n        return context_map\n\n\nclass AclOptions:\n    \"\"\"\n    Helper class to set Ascend device infos.\n\n    .. warning::\n        'AclOptions' is deprecated from version 1.6.0 and will be removed in a future version, use\n        :class:`mindspore_serving.server.register.AscendDeviceInfo` instead.\n\n    Args:\n        insert_op_cfg_path (str, optional): Path of aipp config file.\n        input_format (str, optional): Manually specify the model input format, the value can be ``\"ND\"``, ``\"NCHW\"``,\n            ``\"NHWC\"``, ``\"CHWN\"``, ``\"NC1HWC0\"``, or ``\"NHWC1C0\"``.\n        input_shape (str, optional): Manually specify the model input shape, such as\n            ``\"input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1\"``.\n        output_type (str, optional): Manually specify the model output type, the value can be ``\"FP16\"``, ``\"UINT8\"`` or\n            ``\"FP32\"``. Default: ``\"FP32\"``.\n        precision_mode (str, optional): Model precision mode, the value can be ``\"force_fp16\"``,\n            ``\"allow_fp32_to_fp16\"``, ``\"must_keep_origin_dtype\"`` or ``\"allow_mix_precision\"``.\n            Default: ``\"force_fp16\"``.\n        op_select_impl_mode (str, optional): The operator selection mode, the value can be ``\"high_performance\"`` or\n            ``\"high_precision\"``. Default: ``\"high_performance\"``.\n\n    Raises:\n        RuntimeError: Acl option is invalid, or value is not str.\n\n    Examples:\n        >>> from mindspore_serving.server import register\n        >>> options = register.AclOptions(op_select_impl_mode=\"high_precision\", precision_mode=\"allow_fp32_to_fp16\")\n        >>> register.declare_servable(servable_file=\"deeptext.mindir\", model_format=\"MindIR\", options=options)\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super(AclOptions, self).__init__()\n        logger.warning(\"'AclOptions' is deprecated from version 1.6.0 and will be removed in a future version, \"\n                       \"use 'mindspore_serving.server.register.AscendDeviceInfo' instead.\")\n        device_info = AscendDeviceInfo(**kwargs)\n        self.context = Context()\n        self.context.append_device_info(device_info)\n\n\nclass GpuOptions:\n    \"\"\"\n    Helper class to set gpu options.\n\n    .. warning::\n        'GpuOptions' is deprecated from version 1.6.0 and will be removed in a future version, use\n        :class:`mindspore_serving.server.register.GPUDeviceInfo` instead.\n\n    Args:\n        precision_mode(str, optional): inference operator selection, and the value can be ``\"origin\"``, ``\"fp16\"``.\n            Default: ``\"origin\"``.\n\n    Raises:\n        RuntimeError: Gpu option is invalid, or value is not str.\n\n    Examples:\n        >>> from mindspore_serving.server import register\n        >>> options = register.GpuOptions(precision_mode=\"origin\")\n        >>> register.declare_servable(servable_file=\"deeptext.mindir\", model_format=\"MindIR\", options=options)\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super(GpuOptions, self).__init__()\n        logger.warning(\"'GpuOptions' is deprecated from version 1.6.0 and will be removed in a future version, \"\n                       \"use 'mindspore_serving.server.register.GPUDeviceInfo' instead.\")\n        device_info = GPUDeviceInfo(**kwargs)\n        self.context = Context()\n        self.context.append_device_info(device_info)\n"
  },
  {
    "path": "mindspore_serving/server/register/stage_function.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Postprocessing registration interface\"\"\"\n\nfrom mindspore_serving._mindspore_serving import StageFunctionStorage_\nfrom mindspore_serving import log as logger\nfrom .utils import get_servable_dir, get_func_name\n\n\ndef check_stage_function(method_name, function_name, inputs_count, outputs_count):\n    \"\"\"Check whether inputs and outputs count is equal with last registered\"\"\"\n    func_info = get_stage_info(function_name)\n    if not func_info:\n        return\n    last_inputs_count, last_output_count = func_info\n    if last_inputs_count != inputs_count:\n        raise RuntimeError(f\"Stage function '{function_name}' inputs count {inputs_count} not match \"\n                           f\"last registered count {last_inputs_count}, method name '{method_name}'\")\n    if last_output_count != outputs_count:\n        raise RuntimeError(f\"Stage function '{function_name}' outputs count {outputs_count} not match \"\n                           f\"last registered count {last_output_count}, method name '{method_name}'\")\n\n\ndef get_stage_info(function_name):\n    \"\"\"Get cpp and python function inputs and outputs count\"\"\"\n    func_info = StageFunctionStorage_.get_instance().get_pycpp_function_info(function_name)\n    if not func_info:\n        return None\n    return func_info\n\n\nclass StageFunctionStorage:\n    \"\"\"Register and get stage function info: func, name, input and output count\"\"\"\n\n    def __init__(self):\n        self.function = {}\n        self.storage = StageFunctionStorage_.get_instance()\n\n    def register(self, method_name, fun, function_name, inputs_count, outputs_count, use_with_size):\n        check_stage_function(method_name, function_name, inputs_count, outputs_count)\n        if function_name in self.function:\n            if self.function[function_name][\"use_with_size\"] != use_with_size:\n                raise RuntimeError(f\"Failed to add stage function {function_name}: parameter 'batch_size' in \"\n                                   f\"multiple 'add_stage' should be enabled or disabled consistently\")\n        self.function[function_name] = {\"fun\": fun, \"inputs_count\": inputs_count, \"outputs_count\": outputs_count,\n                                        \"use_with_size\": use_with_size}\n        self.storage.register(function_name, inputs_count, outputs_count)\n\n    def get(self, function_name):\n        func = self.function.get(function_name, None)\n        if func is None:\n            raise RuntimeError(f\"Stage function '{function_name}' not found\")\n        return func\n\n\nstage_function_storage = StageFunctionStorage()\n\n\ndef register_stage_function(method_name, func, inputs_count, outputs_count, use_with_size):\n    \"\"\"register stage function\"\"\"\n    servable_name = get_servable_dir()\n    func_name = get_func_name(func)\n    name = servable_name + \".\" + func_name\n\n    logger.info(f\"Register stage function {name} {inputs_count} {outputs_count}, use batch size: {use_with_size}\")\n    stage_function_storage.register(method_name, func, name, inputs_count, outputs_count, use_with_size)\n"
  },
  {
    "path": "mindspore_serving/server/register/utils.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Common implement for worker\"\"\"\nimport inspect\nimport os\n\n\ndef get_servable_dir():\n    \"\"\"Get the directory where servable is located. The name of the directory is the name of servable\"\"\"\n    stack = inspect.stack()\n    for item in stack:\n        if item.filename.endswith(\"servable_config.py\"):\n            abs_path = os.path.realpath(item.filename)\n            last_dir = os.path.split(abs_path)[0]\n            last_dir = os.path.split(last_dir)[1]\n            if not last_dir:\n                continue\n            return last_dir\n    raise RuntimeError(\"Failed to obtain the directory of servable_config.py\")\n\n\ndef get_func_name(func):\n    \"\"\"Get function name for preprocess and postprocess, as the identification name\"\"\"\n    return func.__name__\n"
  },
  {
    "path": "mindspore_serving/server/start_extra_worker.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Start worker process with single core servable\"\"\"\n\nimport os\nimport signal\nimport argparse\n\nfrom mindspore_serving.server import worker\nfrom mindspore_serving.server.common import check_type\nfrom mindspore_serving._mindspore_serving import ExitSignalHandle_\nfrom mindspore_serving._mindspore_serving import Worker_\n\n\ndef start_extra_worker(servable_directory, servable_name, version_number, device_type, device_ids_empty,\n                       index, master_address, dec_key, dec_mode, listening_master, enable_lite):\n    \"\"\"Start worker process with single core servable\"\"\"\n    signal.signal(signal.SIGCHLD, signal.SIG_DFL)  # for ccec compiler\n    check_type.check_str('servable_directory', servable_directory)\n    check_type.check_str('servable_name', servable_name)\n    check_type.check_int('version_number', version_number, 0)\n    check_type.check_str('device_type', device_type)\n    check_type.check_bool('device_ids_empty', device_ids_empty)\n    check_type.check_int('index', index, 0)\n\n    check_type.check_str('master_address', master_address)\n    check_type.check_bool('listening_master', listening_master)\n    check_type.check_bool('enable_lite', enable_lite)\n\n    ExitSignalHandle_.start()  # Set flag to running and receive Ctrl+C message\n\n    worker_pid = os.getpid()\n    unix_socket_dir = \"unix_socket_files\"\n    try:\n        os.mkdir(unix_socket_dir)\n    except FileExistsError:\n        pass\n    worker_address = f\"unix:{unix_socket_dir}/serving_worker_{servable_name}_version{version_number}_extra{index}\" \\\n                     f\"_{worker_pid}\"\n    if len(worker_address) > 90:  # limit maximum unix domain socket address length\n        worker_address = worker_address[:40] + \"___\" + worker_address[-40:]\n    try:\n        worker.start_extra_servable(servable_directory=servable_directory, servable_name=servable_name,\n                                    version_number=version_number, device_type=device_type,\n                                    device_ids_empty=device_ids_empty, dec_key=dec_key, dec_mode=dec_mode,\n                                    master_address=master_address, worker_address=worker_address,\n                                    enable_lite=enable_lite)\n    except Exception as ex:\n        Worker_.notify_failed(master_address,\n                              f\"{{servable:{servable_name}, version:{version_number}, extra:{index}, <{ex}>}}\")\n        raise\n\n\ndef parse_args_and_start():\n    \"\"\"Parse args and start distributed worker\"\"\"\n    parser = argparse.ArgumentParser(description=\"Serving start extra worker\")\n    parser.add_argument('--servable_directory', type=str, required=True, help=\"servable directory\")\n    parser.add_argument('--servable_name', type=str, required=True, help=\"servable name\")\n    parser.add_argument('--version_number', type=int, required=True, help=\"version numbers\")\n    parser.add_argument('--device_type', type=str, required=True, help=\"device type\")\n    parser.add_argument('--device_ids_empty', type=str, required=True, help=\"device id\")\n    parser.add_argument('--index', type=int, required=True, help=\"device id\")\n    parser.add_argument('--enable_lite', type=str, required=True, help=\"enable lite\")\n    parser.add_argument('--master_address', type=str, required=True, help=\"master address\")\n    parser.add_argument('--dec_key_pipe_file', type=str, required=True, help=\"dec key pipe file\")\n    parser.add_argument('--dec_mode', type=str, required=True, help=\"dec mode\")\n    parser.add_argument('--listening_master', type=str, required=True, help=\"whether listening master\")\n    args = parser.parse_args()\n\n    servable_directory = args.servable_directory\n    servable_name = args.servable_name\n    version_number = int(args.version_number)\n    device_type = args.device_type\n    # pylint: disable=simplifiable-if-expression\n    device_ids_empty = True if args.device_ids_empty.lower() == \"true\" else False\n    index = int(args.index)\n    master_address = args.master_address\n    dec_key_pipe = args.dec_key_pipe_file\n    if dec_key_pipe != \"None\":\n        with open(dec_key_pipe, \"rb\") as fp:\n            dec_key = fp.read()\n        prefix = \"serving_temp_dec_\"\n        if dec_key_pipe[:len(prefix)] == prefix:\n            os.remove(dec_key_pipe)\n    else:\n        dec_key = None\n    dec_mode = args.dec_mode\n    # pylint: disable=simplifiable-if-expression\n    listening_master = True if args.listening_master.lower() == \"true\" else False\n\n    # pylint: disable=simplifiable-if-expression\n    enable_lite = True if args.enable_lite.lower() == \"true\" else False\n    start_extra_worker(servable_directory, servable_name, version_number, device_type, device_ids_empty,\n                       index, master_address, dec_key, dec_mode, listening_master, enable_lite)\n\n\nif __name__ == '__main__':\n    parse_args_and_start()\n"
  },
  {
    "path": "mindspore_serving/server/start_worker.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Start worker process with single core servable\"\"\"\n\nimport os\nimport signal\nimport argparse\n\nfrom mindspore_serving.server import worker\nfrom mindspore_serving.server.common import check_type\nfrom mindspore_serving._mindspore_serving import ExitSignalHandle_\nfrom mindspore_serving._mindspore_serving import Worker_\n\n\ndef start_worker(servable_directory, servable_name, version_number,\n                 device_type, device_id, master_address, dec_key, dec_mode, listening_master, enable_lite):\n    \"\"\"Start worker process with single core servable\"\"\"\n    signal.signal(signal.SIGCHLD, signal.SIG_DFL)  # for ccec compiler\n    check_type.check_str('servable_directory', servable_directory)\n    check_type.check_str('servable_name', servable_name)\n    check_type.check_int('version_number', version_number, 0)\n    check_type.check_str('device_type', device_type)\n    check_type.check_int('device_id', device_id, 0)\n\n    check_type.check_str('master_address', master_address)\n    check_type.check_bool('listening_master', listening_master)\n    check_type.check_bool('enable_lite', enable_lite)\n\n    ExitSignalHandle_.start()  # Set flag to running and receive Ctrl+C message\n\n    # for servable_config.py to get device id of current worker.\n    os.environ[\"SERVING_DEVICE_ID\"] = str(device_id)\n    worker_pid = os.getpid()\n    unix_socket_dir = \"unix_socket_files\"\n    try:\n        os.mkdir(unix_socket_dir)\n    except FileExistsError:\n        pass\n    worker_address = f\"unix:{unix_socket_dir}/serving_worker_{servable_name}_device{device_id}_{worker_pid}\"\n    if len(worker_address) > 90:  # limit maximum unix domain socket address length\n        worker_address = worker_address[:40] + \"___\" + worker_address[-40:]\n    try:\n        worker.start_servable(servable_directory=servable_directory, servable_name=servable_name,\n                              version_number=version_number, device_type=device_type, device_id=device_id,\n                              master_address=master_address, worker_address=worker_address,\n                              dec_key=dec_key, dec_mode=dec_mode, enable_lite=enable_lite)\n    except Exception as ex:\n        Worker_.notify_failed(master_address,\n                              f\"{{servable name:{servable_name}, device id:{device_id}, <{ex}>}}\")\n        raise\n\n\ndef parse_args_and_start():\n    \"\"\"Parse args and start distributed worker\"\"\"\n    parser = argparse.ArgumentParser(description=\"Serving start extra worker\")\n    parser.add_argument('--servable_directory', type=str, required=True, help=\"servable directory\")\n    parser.add_argument('--servable_name', type=str, required=True, help=\"servable name\")\n    parser.add_argument('--version_number', type=int, required=True, help=\"version numbers\")\n    parser.add_argument('--device_type', type=str, required=True, help=\"device type\")\n    parser.add_argument('--device_id', type=str, required=True, help=\"device id\")\n    parser.add_argument('--master_address', type=str, required=True, help=\"master address\")\n    parser.add_argument('--enable_lite', type=str, required=True, help=\"enable lite\")\n    parser.add_argument('--dec_key_pipe_file', type=str, required=True, help=\"dec key pipe file\")\n    parser.add_argument('--dec_mode', type=str, required=True, help=\"dec mode\")\n    parser.add_argument('--listening_master', type=str, required=True, help=\"whether listening master\")\n    args = parser.parse_args()\n\n    servable_directory = args.servable_directory\n    servable_name = args.servable_name\n    version_number = int(args.version_number)\n    device_type = args.device_type\n    device_id = int(args.device_id)\n    master_address = args.master_address\n    dec_key_pipe = args.dec_key_pipe_file\n    if dec_key_pipe != \"None\":\n        with open(dec_key_pipe, \"rb\") as fp:\n            dec_key = fp.read()\n        prefix = \"serving_temp_dec_\"\n        if dec_key_pipe[:len(prefix)] == prefix:\n            os.remove(dec_key_pipe)\n    else:\n        dec_key = None\n    dec_mode = args.dec_mode\n    # pylint: disable=simplifiable-if-expression\n    listening_master = True if args.listening_master.lower() == \"true\" else False\n\n    # pylint: disable=simplifiable-if-expression\n    enable_lite = True if args.enable_lite.lower() == \"true\" else False\n    start_worker(servable_directory, servable_name, version_number, device_type, device_id, master_address,\n                 dec_key, dec_mode, listening_master, enable_lite)\n\n\nif __name__ == '__main__':\n    parse_args_and_start()\n"
  },
  {
    "path": "mindspore_serving/server/worker/__init__.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MindSpore worker implement\"\"\"\n\nfrom ._worker import start_servable, start_extra_servable, stop, get_newest_version_number\n"
  },
  {
    "path": "mindspore_serving/server/worker/_worker.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Interface for start up servable\"\"\"\n\nimport os\nimport sys\nfrom functools import wraps\nfrom mindspore_serving import log as logger\nfrom mindspore_serving.server.common import check_type, get_abs_path\nfrom mindspore_serving.server.worker import init_mindspore\n\nfrom mindspore_serving._mindspore_serving import ExitSignalHandle_\nfrom mindspore_serving._mindspore_serving import Worker_\nfrom mindspore_serving._mindspore_serving import ServableContext_\nfrom .task import _start_py_task\n\n_wait_and_clear_thread = None\n\n\ndef _set_enable_lite(enable_lite):\n    \"\"\"Set device id, default 0\"\"\"\n    ServableContext_.get_instance().set_enable_lite(enable_lite)\n\n\ndef _set_device_id(device_id):\n    \"\"\"Set device id, default 0\"\"\"\n    ServableContext_.get_instance().set_device_id(device_id)\n\n\ndef _set_device_type(device_type):\n    \"\"\"Set device type, now can be 'None'(default), 'GPU' and 'Ascend', 'Davinci'(same as 'Ascend'), case ignored. \"\"\"\n    if device_type is not None:\n        check_type.check_str('device_type', device_type)\n        ServableContext_.get_instance().set_device_type_str(device_type)\n    else:\n        ServableContext_.get_instance().set_device_type_str('None')  # depend on MindSpore build target\n\n\ndef get_newest_version_number(servable_directory, servable_name):\n    \"\"\"Get newest version number of servable\"\"\"\n    max_version = 0\n    servable_directory = get_abs_path(servable_directory)\n    version_root_dir = os.path.join(servable_directory, servable_name)\n    try:\n        files = os.listdir(version_root_dir)\n    except FileNotFoundError:\n        return 0\n    for file in files:\n        if not os.path.isdir(os.path.join(version_root_dir, file)):\n            continue\n        if not file.isdigit() or file == \"0\" and str(int(file)) != file:\n            continue\n        version = int(file)\n        if max_version < version:\n            max_version = version\n    return max_version\n\n\ndef stop():\n    r\"\"\"\n    Stop the running of worker.\n\n    Examples:\n        >>> import os\n        >>> from mindspore_serving import server\n        >>>\n        >>> servable_dir = os.path.abspath(\".\")\n        >>> config = server.ServableConfig(servable_dir, \"lenet\", device_ids=0)\n        >>> server.start_servables(servable_configs=config)\n        >>> server.start_grpc_server(\"0.0.0.0:5500\")\n        >>> ...\n        >>> server.stop()\n    \"\"\"\n\n    Worker_.stop_and_clear()\n\n\ndef stop_on_except(func):\n    \"\"\"Wrap of clear environment and exit on Serving exception\"\"\"\n\n    @wraps(func)\n    def handle_except(*args, **kwargs):\n        try:\n            ExitSignalHandle_.start()  # Set flag to running and receive Ctrl+C message\n            func(*args, **kwargs)\n        except:\n            stop()\n            raise\n\n    return handle_except\n\n\ndef _load_servable_config(servable_directory, servable_name):\n    \"\"\"Load servable config named servable_config.py in directory `servable_directory`/`servable_name` \"\"\"\n    config_dir = os.path.join(servable_directory, servable_name)\n    if not os.path.isdir(config_dir):\n        raise RuntimeError(f\"Load servable config failed, directory '{config_dir}' not exist, \"\n                           f\"servable directory '{servable_directory}', servable name '{servable_name}'\")\n    config_file = os.path.join(config_dir, \"servable_config.py\")\n    if not os.path.isfile(config_file):\n        raise RuntimeError(f\"Load servable config failed, file '{config_file}' not exist, \"\n                           f\"servable directory '{servable_directory}', servable name '{servable_name}'\")\n    sys.path.append(servable_directory)\n    try:\n        __import__(servable_name + \".servable_config\")\n    except Exception as e:\n        logger.error(f\"import {servable_name}.servable_config failed, {str(e)}\")\n        raise RuntimeError(f\"import {servable_name}.servable_config failed, {str(e)}\")\n\n\n@stop_on_except\ndef start_servable(servable_directory, servable_name, version_number,\n                   device_type, device_id, master_address, worker_address, dec_key, dec_mode, enable_lite):\n    r\"\"\"\n    Start up the servable named 'servable_name' defined in 'servable_directory', and link the worker to the master\n    through gRPC master_address and worker_address.\n    \"\"\"\n    check_type.check_str('servable_directory', servable_directory)\n    check_type.check_str('servable_name', servable_name)\n    check_type.check_int('version_number', version_number, 0)\n    check_type.check_int('device_id', device_id, 0)\n    check_type.check_str('master_address', master_address)\n    check_type.check_str('worker_address', worker_address)\n    if dec_key is not None:\n        check_type.check_bytes('dec_key', dec_key)\n    else:\n        dec_key = ''\n    check_type.check_str('dec_mode', dec_mode)\n    check_type.check_bool('enable_lite', enable_lite)\n    _set_enable_lite(enable_lite)\n\n    _load_servable_config(servable_directory, servable_name)\n    model_names = Worker_.get_declared_model_names()\n    if model_names:\n        init_mindspore.init_mindspore_cxx_env(enable_lite)\n        newest_version_number = get_newest_version_number(servable_directory, servable_name)\n        if not newest_version_number:\n            raise RuntimeError(\n                f\"There is no valid version directory of models while there are models declared in servable_config.py, \"\n                f\"servable directory: {servable_directory}, servable name: {servable_name}\")\n    if version_number == 0:\n        version_number = 1\n\n    _set_device_type(device_type)\n    _set_device_id(device_id)\n    Worker_.start_servable(servable_directory, servable_name, version_number, master_address, worker_address,\n                           dec_key, dec_mode)\n    _start_py_task()\n\n\n@stop_on_except\ndef start_extra_servable(servable_directory, servable_name, version_number, device_type, device_ids_empty,\n                         dec_key, dec_mode, master_address, worker_address, enable_lite):\n    r\"\"\"\n    Start up the servable named 'servable_name' defined in 'servable_directory', and link the worker to the master\n    through gRPC master_address and worker_address.\n    \"\"\"\n    check_type.check_str('servable_directory', servable_directory)\n    check_type.check_str('servable_name', servable_name)\n    check_type.check_int('version_number', version_number, 0)\n    check_type.check_str('device_type', device_type)\n    check_type.check_bool('device_ids_empty', device_ids_empty)\n    check_type.check_str('master_address', master_address)\n    check_type.check_str('worker_address', worker_address)\n    if dec_key is not None:\n        check_type.check_bytes('dec_key', dec_key)\n    else:\n        dec_key = ''\n    check_type.check_str('dec_mode', dec_mode)\n    check_type.check_bool('enable_lite', enable_lite)\n    _set_enable_lite(enable_lite)\n\n    _load_servable_config(servable_directory, servable_name)\n    model_names = Worker_.get_declared_model_names()\n    if model_names:\n        init_mindspore.init_mindspore_cxx_env(enable_lite)\n        newest_version_number = get_newest_version_number(servable_directory, servable_name)\n        if not newest_version_number:\n            raise RuntimeError(\n                f\"There is no valid version directory of models while there are models declared in servable_config.py, \"\n                f\"servable directory: {servable_directory}, servable name: {servable_name}\")\n    if version_number == 0:\n        version_number = 1\n\n    _set_device_type(device_type)\n    Worker_.start_extra_servable(servable_directory, servable_name, version_number, device_ids_empty,\n                                 dec_key, dec_mode, master_address, worker_address)\n    _start_py_task()\n"
  },
  {
    "path": "mindspore_serving/server/worker/check_version.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"version and config check\"\"\"\nimport os\nimport sys\nimport subprocess\nfrom pathlib import Path\nfrom packaging import version\nfrom mindspore_serving import log as logger\n\n\nclass AscendEnvChecker:\n    \"\"\"ascend environment check\"\"\"\n\n    def __init__(self):\n        atlas_nnae_version = \"/usr/local/Ascend/nnae/latest/compiler/version.info\"\n        atlas_toolkit_version = \"/usr/local/Ascend/ascend-toolkit/latest/compiler/version.info\"\n        hisi_fwk_version = \"/usr/local/Ascend/latest/compiler/version.info\"\n        if os.path.exists(atlas_nnae_version):\n            # atlas default path\n            self.fwk_path = \"/usr/local/Ascend/nnae/latest\"\n            self.op_impl_path = \"/usr/local/Ascend/nnae/latest/opp/built-in/op_impl/ai_core/tbe\"\n            self.tbe_path = self.fwk_path + \"/lib64\"\n            self.cce_path = self.fwk_path + \"/compiler/ccec_compiler/bin\"\n            self.fwk_version = atlas_nnae_version\n            self.op_path = \"/usr/local/Ascend/nnae/latest/opp\"\n            self.aicpu_path = \"/usr/local/Ascend/nnae/latest\"\n        elif os.path.exists(atlas_toolkit_version):\n            # atlas default path\n            self.fwk_path = \"/usr/local/Ascend/ascend-toolkit/latest\"\n            self.op_impl_path = \"/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe\"\n            self.tbe_path = self.fwk_path + \"/lib64\"\n            self.cce_path = self.fwk_path + \"/compiler/ccec_compiler/bin\"\n            self.fwk_version = atlas_toolkit_version\n            self.op_path = \"/usr/local/Ascend/ascend-toolkit/latest/opp\"\n            self.aicpu_path = \"/usr/local/Ascend/ascend-toolkit/latest\"\n        elif os.path.exists(hisi_fwk_version):\n            # hisi default path\n            self.fwk_path = \"/usr/local/Ascend/latest\"\n            self.op_impl_path = \"/usr/local/Ascend/latest/opp/built-in/op_impl/ai_core/tbe\"\n            self.tbe_path = self.fwk_path + \"/lib64\"\n            self.cce_path = self.fwk_path + \"/compiler/ccec_compiler/bin\"\n            self.fwk_version = hisi_fwk_version\n            self.op_path = \"/usr/local/Ascend/latest/opp\"\n            self.aicpu_path = \"/usr/local/Ascend/latest\"\n        else:\n            # custom or unknown environment\n            self.fwk_path = \"\"\n            self.op_impl_path = \"\"\n            self.tbe_path = \"\"\n            self.cce_path = \"\"\n            self.fwk_version = \"\"\n            self.op_path = \"\"\n            self.aicpu_path = \"\"\n\n        # env\n        self.path = os.getenv(\"PATH\")\n        self.python_path = os.getenv(\"PYTHONPATH\")\n        self.ld_lib_path = os.getenv(\"LD_LIBRARY_PATH\")\n        self.ascend_opp_path = os.getenv(\"ASCEND_OPP_PATH\")\n        self.ascend_aicpu_path = os.getenv(\"ASCEND_AICPU_PATH\")\n\n        # check content\n        self.path_check = \"/compiler/ccec_compiler/bin\"\n        self.python_path_check = \"opp/built-in/op_impl/ai_core/tbe\"\n        self.ld_lib_path_check_fwk = \"/lib64\"\n        self.ld_lib_path_check_addons = \"/add-ons\"\n        self.ascend_opp_path_check = \"/op\"\n        self.v = \"\"\n\n    def check_env(self, e):\n        \"\"\"check system env\"\"\"\n        self._check_env()\n        raise e\n\n    def set_env(self):\n        \"\"\"set env: LD_LIBRARY_PATH, PATH, ASCEND_OPP_PATH\"\"\"\n        if not self.tbe_path:\n            self._check_env()\n            return\n\n        if Path(self.tbe_path).is_dir():\n            if os.getenv('LD_LIBRARY_PATH'):\n                os.environ['LD_LIBRARY_PATH'] = self.tbe_path + \":\" + os.environ['LD_LIBRARY_PATH']\n            else:\n                os.environ['LD_LIBRARY_PATH'] = self.tbe_path\n        else:\n            logger.warning(f\"No such directory: {self.tbe_path}, Please check if Ascend 910 AI software package is \"\n                           f\"installed correctly.\")\n\n        if Path(self.op_impl_path).is_dir():\n            # python path for sub process\n            if os.getenv('PYTHONPATH'):\n                os.environ['PYTHONPATH'] = self.op_impl_path + \":\" + os.environ['PYTHONPATH']\n            else:\n                os.environ['PYTHONPATH'] = self.op_impl_path\n            # sys path for this process\n            sys.path.append(self.op_impl_path)\n\n            os.environ['TBE_IMPL_PATH'] = self.op_impl_path\n        else:\n            logger.warning(\n                f\"No such directory: {self.op_impl_path}, Please check if Ascend AI software package (Ascend Data \"\n                \"Center Solution) is installed correctly.\")\n            return\n\n        if Path(self.cce_path).is_dir():\n            os.environ['PATH'] = self.cce_path + \":\" + os.environ['PATH']\n        else:\n            logger.warning(\n                f\"No such directory: {self.cce_path}, Please check if Ascend AI software package (Ascend Data Center \"\n                \"Solution) is installed correctly.\")\n            return\n\n        if self.op_path is None:\n            pass\n        elif Path(self.op_path).is_dir():\n            os.environ['ASCEND_OPP_PATH'] = self.op_path\n        else:\n            logger.warning(\n                f\"No such directory: {self.op_path}, Please check if Ascend AI software package (Ascend Data Center \"\n                \"Solution) is installed correctly.\")\n            return\n\n        if self.aicpu_path is None:\n            pass\n        elif Path(self.aicpu_path).is_dir():\n            os.environ['ASCEND_AICPU_PATH'] = self.aicpu_path\n        else:\n            logger.warning(\n                f\"No such directory: {self.aicpu_path}, Please check if Ascend AI software package (Ascend Data Center\"\n                \" Solution) is installed correctly.\")\n            return\n\n    def try_set_env_lib(self):\n        \"\"\"try set env but with no warning: LD_LIBRARY_PATH\"\"\"\n        if Path(self.tbe_path).is_dir():\n            if os.getenv('LD_LIBRARY_PATH'):\n                os.environ['LD_LIBRARY_PATH'] = self.tbe_path + \":\" + os.environ['LD_LIBRARY_PATH']\n            else:\n                os.environ['LD_LIBRARY_PATH'] = self.tbe_path\n\n    def _check_env(self):\n        \"\"\"ascend dependence path check\"\"\"\n        if self.path is None or self.path_check not in self.path:\n            logger.warning(\"Can not find ccec_compiler(need by mindspore-ascend), please check if you have set env \"\n                           \"PATH, you can reference to the installation guidelines https://www.mindspore.cn/install\")\n\n        if self.python_path is None or self.python_path_check not in self.python_path:\n            logger.warning(\n                \"Can not find tbe op implement(need by mindspore-ascend), please check if you have set env \"\n                \"PYTHONPATH, you can reference to the installation guidelines \"\n                \"https://www.mindspore.cn/install\")\n\n        if self.ld_lib_path is None or not (self.ld_lib_path_check_fwk in self.ld_lib_path and\n                                            self.ld_lib_path_check_addons in self.ld_lib_path):\n            logger.warning(\"Can not find driver so(need by mindspore-ascend), please check if you have set env \"\n                           \"LD_LIBRARY_PATH, you can reference to the installation guidelines \"\n                           \"https://www.mindspore.cn/install\")\n\n        if self.ascend_opp_path is None or self.ascend_opp_path_check not in self.ascend_opp_path:\n            logger.warning(\n                \"Can not find opp path (need by mindspore-ascend), please check if you have set env ASCEND_OPP_PATH, \"\n                \"you can reference to the installation guidelines https://www.mindspore.cn/install\")\n\n\nclass GPUEnvChecker():\n    \"\"\"GPU environment check.\"\"\"\n\n    def __init__(self):\n        self.version = [\"10.1\"]\n        # env\n        self.path = os.getenv(\"PATH\")\n        self.ld_lib_path = os.getenv(\"LD_LIBRARY_PATH\")\n\n        # check\n        self.v = \"0\"\n        self.cuda_lib_path = self._get_lib_path(\"libcu\")\n        self.cuda_bin_path = self._get_bin_path(\"cuda\")\n\n    def _get_bin_path(self, bin_name):\n        \"\"\"Get bin path by bin name.\"\"\"\n        if bin_name == \"cuda\":\n            return self._get_cuda_bin_path()\n        return []\n\n    def _get_cuda_bin_path(self):\n        \"\"\"Get cuda bin path by lib path.\"\"\"\n        path_list = []\n        for path in self.cuda_lib_path:\n            path = os.path.abspath(path.strip() + \"/bin/\")\n            if Path(path).is_dir():\n                path_list.append(path)\n        return list(set(path_list))\n\n    def _get_nvcc_version(self, is_set_env):\n        \"\"\"Get cuda version by nvcc command.\"\"\"\n        nvcc_result = subprocess.run([\"nvcc --version | grep release\"],\n                                     timeout=3, text=True, capture_output=True, check=False, shell=True)\n        if nvcc_result.returncode:\n            if not is_set_env:\n                for path in self.cuda_bin_path:\n                    if Path(path + \"/nvcc\").is_file():\n                        os.environ['PATH'] = path + \":\" + os.environ['PATH']\n                        return self._get_nvcc_version(True)\n            return \"\"\n        result = nvcc_result.stdout\n        for line in result.split('\\n'):\n            if line:\n                return line.strip().split(\"release\")[1].split(\",\")[0].strip()\n        return \"\"\n\n    def check_env(self):\n        \"\"\"Check cuda version.\"\"\"\n        version_match = False\n        for path in self.cuda_lib_path:\n            version_file = path + \"/version.txt\"\n            if not Path(version_file).is_file():\n                continue\n            if self._check_version(version_file):\n                version_match = True\n                break\n        if not version_match:\n            if self.v == \"0\":\n                logger.warning(\"Cuda version file version.txt is not found, please confirm that the correct \"\n                               \"cuda version has been installed, you can refer to the \"\n                               \"installation guidelines: https://www.mindspore.cn/install\")\n            else:\n                logger.warning(f\"MindSpore version and cuda version {self.v} does not match, \"\n                               \"please refer to the installation guide for version matching \"\n                               \"information: https://www.mindspore.cn/install\")\n        nvcc_version = self._get_nvcc_version(False)\n        if nvcc_version and (nvcc_version not in self.version):\n            logger.warning(f\"MindSpore version and nvcc(cuda bin) version {nvcc_version} \"\n                           \"does not match, please refer to the installation guide for version matching \"\n                           \"information: https://www.mindspore.cn/install\")\n\n    def _check_version(self, version_file):\n        \"\"\"Check cuda version by version.txt.\"\"\"\n        v = self._read_version(version_file)\n        v = version.parse(v)\n        v_str = str(v.major) + \".\" + str(v.minor)\n        if v_str not in self.version:\n            return False\n        return True\n\n    def _get_lib_path(self, lib_name):\n        \"\"\"Get gpu lib path by ldd command.\"\"\"\n        path_list = []\n        current_path = os.path.split(os.path.realpath(__file__))[0]\n        mindspore_path = os.path.dirname(os.path.dirname(current_path)) + \"/mindspore\"\n        ldd_result = subprocess.run([\"ldd \" + mindspore_path + \"/_c_expression*.so* | grep \" + lib_name],\n                                    timeout=3, text=True, capture_output=True, check=False, shell=True)\n        if ldd_result.returncode:\n            logger.warning(f\"{lib_name} so(need by mndspore-gpu) is not found, please confirm that \"\n                           f\"_c_experssion.so depend on {lib_name}, \"\n                           f\"and _c_expression.so in directory:{mindspore_path}\")\n            return path_list\n        result = ldd_result.stdout\n        for i in result.split('\\n'):\n            path = i.partition(\"=>\")[2]\n            if path.lower().find(\"not found\") > 0:\n                logger.warning(f\"Cuda {self.version} version(need by mindspore-gpu) is not found, please confirm \"\n                               \"that the path of cuda is set to the env LD_LIBRARY_PATH, please refer to the \"\n                               \"installation guidelines: https://www.mindspore.cn/install\")\n                continue\n            path = path.partition(lib_name)[0]\n            if path:\n                path_list.append(os.path.abspath(path.strip() + \"../\"))\n        return list(set(path_list))\n\n    def _read_version(self, file_path):\n        \"\"\"Get gpu version info in version.txt.\"\"\"\n        with open(file_path, 'r') as f:\n            all_info = f.readlines()\n            for line in all_info:\n                if line.startswith(\"CUDA Version\"):\n                    self.v = line.strip().split(\"CUDA Version\")[1]\n                    return self.v\n        return self.v\n\n\ndef check_version_and_env_config(device_type):\n    \"\"\"check version and env config\"\"\"\n    if device_type == \"Ascend\":\n        env_checker = AscendEnvChecker()\n        try:\n            env_checker.set_env()\n        except ImportError as e:\n            env_checker.check_env(e)\n    elif device_type == \"Gpu\":\n        env_checker = GPUEnvChecker()\n        env_checker.check_env()\n    elif device_type == \"Cpu\":\n        pass\n\n\ndef check_version_and_try_set_env_lib():\n    \"\"\"check version and try set env LD_LIBRARY_PATH\"\"\"\n    env_checker = AscendEnvChecker()\n    env_checker.try_set_env_lib()\n"
  },
  {
    "path": "mindspore_serving/server/worker/distributed/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MindSpore Serving Distributed Worker.\"\"\"\n\nfrom .agent_startup import startup_agents\nfrom .register import declare_servable\nfrom .distributed_worker import start_servable\n"
  },
  {
    "path": "mindspore_serving/server/worker/distributed/agent_startup.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Serving, distributed worker agent startup\"\"\"\n\nimport os\nimport time\nimport sys\nimport traceback\nimport signal\nfrom multiprocessing import Process, Pipe\nimport threading\nimport psutil\n\nfrom mindspore_serving._mindspore_serving import ExitSignalHandle_\nfrom mindspore_serving._mindspore_serving import WorkerAgent_, AgentStartUpConfig_\nfrom mindspore_serving._mindspore_serving import DistributedServableConfig_, OneRankConfig_\n\nfrom mindspore_serving import log as logger\nfrom mindspore_serving.server.common import check_type\nfrom mindspore_serving.server.worker.distributed import worker_agent\n\n\ndef _get_local_ip(rank_list, port):\n    \"\"\"Get the local ip from the rank table config\"\"\"\n    import socket\n    ip_list = set()\n    for item in rank_list:\n        ip_list.add(item.ip)\n    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n        for ip in ip_list:\n            try:\n                s.bind((ip, port))\n                logger.info(f\"Get local machine ip success, ip {ip}\")\n                return ip\n            # pylint: disable=bare-except\n            except:\n                pass\n    raise RuntimeError(f\"Get local machine ip failed, rank table ips: {ip_list}, bind port {port}\")\n\n\ndef _check_local_ip(agent_ip, port):\n    \"\"\"Check the local ip\"\"\"\n    import socket\n    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n        for i in range(8):\n            try:\n                s.bind((agent_ip, port + i))\n                logger.info(f\"Check local machine ip success, ip {agent_ip}\")\n                return True\n            # pylint: disable=bare-except\n            except:\n                pass\n    return False\n\n\ndef _check_model_files(num, files, model_files, group_config_files):\n    \"\"\"Check the number of model files or group config files\"\"\"\n    if isinstance(files, tuple):\n        for item in files:\n            if isinstance(item, list):\n                if num == -1:\n                    num = len(item)\n                else:\n                    if num != len(item):\n                        raise RuntimeError(f\"please check the number of  model files and group config files, \"\n                                           f\"model files: {model_files}, group config files: {group_config_files}\")\n            else:\n                if num not in (-1, 1):\n                    raise RuntimeError(f\"please check the number of  model files and group config files, \"\n                                       f\"model files: {model_files}, group config files: {group_config_files}\")\n                num = 1\n    return num\n\n\ndef _check_model_num(model_files, group_config_files):\n    \"\"\"Check the number of model files or group config files\"\"\"\n    num = _check_model_files(-1, model_files, model_files, group_config_files)\n    if group_config_files is not None:\n        num = _check_model_files(-1, group_config_files, model_files, group_config_files)\n        if num != 1:\n            raise RuntimeError(f\"please check the number of  group config files, currently only support one at most\")\n\n\ndef _update_model_files_path(model_files, group_config_files):\n    \"\"\"Check and return model files or group config files\"\"\"\n    script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n    logger.info(f\"input model files: {model_files}\")\n    logger.info(f\"input group config files: {group_config_files}\")\n    model_files_temp = []\n    for item in model_files:\n        if isinstance(item, list):\n            inner_files = []\n            for inner in item:\n                file_name = os.path.realpath(os.path.join(script_dir, inner))\n                if not os.access(file_name, os.R_OK):\n                    raise RuntimeError(f\"Cannot access model file '{file_name}'\")\n                inner_files.append(file_name)\n            model_files_temp.append(inner_files)\n        else:\n            file_name = os.path.realpath(os.path.join(script_dir, item))\n            if not os.access(file_name, os.R_OK):\n                raise RuntimeError(f\"Cannot access model file '{file_name}'\")\n            model_files_temp.append(file_name)\n\n    if group_config_files is not None:\n        group_files_temp = []\n        for item in group_config_files:\n            if isinstance(item, list):\n                inner_files = []\n                for inner in item:\n                    file_name = os.path.realpath(os.path.join(script_dir, inner))\n                    if not os.access(file_name, os.R_OK):\n                        raise RuntimeError(f\"Cannot access group config file '{file_name}'\")\n                    inner_files.append(file_name)\n                group_files_temp.append(inner_files)\n            else:\n                file_name = os.path.realpath(os.path.join(script_dir, item))\n                if not os.access(file_name, os.R_OK):\n                    raise RuntimeError(f\"Cannot access group config file '{file_name}'\")\n                group_files_temp.append(file_name)\n    else:\n        group_files_temp = None\n    logger.info(f\"absolute model files: {model_files_temp}\")\n    logger.info(f\"absolute group config files: {group_files_temp}\")\n    return model_files_temp, group_files_temp\n\n\ndef _make_json_table_file(distributed_config):\n    \"\"\"Make rank table json file\"\"\"\n    rank_size = len(distributed_config.rank_list)\n    runtime_dir = os.path.abspath(\".\")\n    time_stamp = str(time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time())))\n    rank_table_dir = os.path.join(runtime_dir, \"temp_rank_table\")\n    try:\n        os.mkdir(rank_table_dir)\n    except FileExistsError:\n        pass\n    rank_table_file_name = os.path.join(rank_table_dir, f\"hccl_rank_table_{time_stamp}_{rank_size}p.json\")\n    with open(rank_table_file_name, \"w\") as fp:\n        fp.write(distributed_config.rank_table_content)\n    return rank_table_file_name\n\n\nsignal_success = \"Success\"\nsignal_exit = \"Exit\"\n\n\ndef _recv_parent(parent_process, index, recv_pipe, handle_stop_signal=True):\n    \"\"\"Receive message from Start up process.\n    Return False on Ctrl+C(and worker Stop message) Exit Signal, heartbeat failed, and signal_exit.\n    Return True on receiving signal_success.\n    \"\"\"\n    try:\n        while True:\n            while not recv_pipe.poll(0.1):\n                if handle_stop_signal and ExitSignalHandle_.has_stopped():\n                    logger.warning(f\"Child {index}: Exit on Ctrl+C or stop message from worker\")\n                    return False\n                if not parent_process.is_running():  # 3s\n                    logger.warning(f\"Child {index}: Exit on failure of exit of parent process\")\n                    return False\n            parent_signal = recv_pipe.recv()\n            break\n        if parent_signal == signal_success:\n            logger.info(f\"Child {index}: Receive success\")\n            return True\n        if parent_signal == signal_exit:\n            logger.warning(f\"Child {index}: Exit on receiving exit message\")\n    # pylint: disable=broad-except\n    except Exception as e:\n        logger.warning(f\"Child {index}: Exit on exception: {e}\")\n    return False\n\n\ndef _agent_process(send_pipe, recv_pipe, index, start_config, dec_key, dec_mode):\n    \"\"\"Agent process\"\"\"\n    parent_process = psutil.Process(os.getppid())\n    try:\n        # listening success or failed message from parent process\n        worker_agent.start_worker_agent(start_config=start_config, dec_key=dec_key, dec_mode=dec_mode)\n        send_pipe.send((index, signal_success))\n        success_msg = _recv_parent(parent_process, index, recv_pipe)\n        if not success_msg:\n            worker_agent.stop()\n        send_pipe.close()\n        recv_pipe.close()\n        while not ExitSignalHandle_.has_stopped():\n            if not parent_process.is_running():\n                logger.warning(f\"Child {index}, detect parent pid={parent_process.pid} has exited, child begin to exit\")\n                worker_agent.stop()\n                return\n            time.sleep(0.1)\n    # pylint: disable=broad-except\n    except Exception as e:\n        traceback.print_exc()\n        logger.error(f\"Child {index}: Catch exception and notify exit of others\")\n        exception = RuntimeError(f\"Child {index} exception happen: {e}\")\n        send_pipe.send((index, exception))\n        _recv_parent(parent_process, index, recv_pipe, False)\n        logger.error(f\"Child {index}: end send message to parent\")\n\n\ndef _send_pipe_msg(send_pipe, msg):\n    \"\"\"Send pipe message\"\"\"\n    try:\n        send_pipe.send(msg)\n    # pylint: disable=broad-except\n    except Exception as e:\n        logger.warning(f\"Send pipe message exception happen: {e}\")\n\n\ndef _send_exit_signal_to_children(subprocess_list):\n    \"\"\"Send exit signal to all child processes, and terminate all child processes when they are still alive\n    in some seconds later\"\"\"\n\n    def wait_exit(wait_seconds, msg):\n        for i in range(wait_seconds):\n            all_exit = True\n            for process in subprocess_list:\n                if process.is_alive():\n                    logger.warning(f\"There are still child processes that have not exited and {msg} in \"\n                                   f\"{wait_seconds - i} seconds.\")\n                    time.sleep(1)\n                    all_exit = False\n                    break\n            if all_exit:\n                logger.info(f\"All Child process exited\")\n                return True\n        return False\n\n    if wait_exit(3, \"SIGINT will be sent\"):\n        return\n    # Send signal SIGINT\n    for index, process in enumerate(subprocess_list):\n        if process.is_alive():\n            logger.warning(f\"Send signal SIGINT to {index}\")\n            try:\n                child_process = psutil.Process(process.pid)\n                children_of_child = child_process.children(recursive=True)\n                for item in children_of_child:\n                    os.kill(item.pid, signal.SIGINT)\n            # pylint: disable=broad-except\n            except Exception as e:\n                logger.warning(f\"Get exception when send signal SIGINT to children of child {index}, exception: {e}\")\n            os.kill(process.pid, signal.SIGINT)\n\n    if wait_exit(10, \"will be forcibly killed\"):\n        return\n\n    for index, process in enumerate(subprocess_list):\n        if process.is_alive():\n            logger.warning(f\"Kill Child process {index}\")\n            try:\n                child_process = psutil.Process(process.pid)\n                children_of_child = child_process.children(recursive=True)\n                for item in children_of_child:\n                    os.kill(item.pid, signal.SIGKILL)\n            # pylint: disable=broad-except\n            except Exception as e:\n                logger.warning(f\"Get exception when send signal SIGKILL to children of child {index}, exception: {e}\")\n            os.kill(process.pid, signal.SIGKILL)\n\n\ndef _send_exit_msg_to_children(send_pipe_list, subprocess_list):\n    \"\"\"Send exit msg to all child processes, and terminate all child processes when they are still alive\n    in some seconds later.\n    \"\"\"\n    index = 0\n    for send_pipe, process in zip(send_pipe_list, subprocess_list):\n        if process.is_alive():\n            logger.warning(f\"Send exit message to Child {index}\")\n            _send_pipe_msg(send_pipe, signal_exit)\n            logger.warning(f\"End send exit message to Child {index}\")\n        else:\n            logger.warning(f\"Child {index} is not alive\")\n        index += 1\n    _send_exit_signal_to_children(subprocess_list)\n\n\ndef _listening_agents_when_startup(p_recv_pipe, send_pipe_list, subprocess_list):\n    \"\"\"Listening child process\"\"\"\n    count = len(send_pipe_list)\n    for _ in range(count):\n        while True:\n            if p_recv_pipe.poll(0.1):\n                break\n            if ExitSignalHandle_.has_stopped():\n                logger.warning(\"Fail to start agents because of Ctrl+C\")\n                _send_exit_msg_to_children(send_pipe_list, subprocess_list)\n                raise RuntimeError(\"Fail to start agents because of Ctrl+C\")\n            for send_pipe, process in zip(send_pipe_list, subprocess_list):\n                if process.is_alive():\n                    continue\n                logger.warning(\"Fail to start agents because of death of one agent\")\n                _send_exit_msg_to_children(send_pipe_list, subprocess_list)\n                raise RuntimeError(\"Fail to start agents because of death of one agent\")\n\n        index, msg = p_recv_pipe.recv()\n        logger.info(f\"Receive msg from Child {index}: {msg}\")\n        if isinstance(msg, Exception):\n            logger.warning(\"Fail to start agents because of exception raise by one agent\")\n            _send_exit_msg_to_children(send_pipe_list, subprocess_list)\n            raise msg\n\n    for send_pipe in send_pipe_list:\n        _send_pipe_msg(send_pipe, signal_success)\n\n\ndef _listening_agents_after_startup(subprocess_list, distributed_address, agent_ip):\n    \"\"\"Listening agent status after success start up of agents\"\"\"\n\n    def wait_child_exit():\n        while not ExitSignalHandle_.has_stopped():\n            for index, process in enumerate(subprocess_list):\n                if not process.is_alive():\n                    logger.warning(f\"Child {index}, pid={process.pid} has exited\")\n                    return\n            time.sleep(0.1)\n\n    def listening_thread_fun():\n        wait_child_exit()\n        WorkerAgent_.startup_notify_exit(distributed_address, agent_ip)\n        _send_exit_signal_to_children(subprocess_list)\n\n    thread = threading.Thread(target=listening_thread_fun)\n    thread.start()\n\n\ndef _startup_agents(common_meta, distributed_address,\n                    agent_ip, agent_start_port, device_id_list, rank_id_list,\n                    model_files, group_config_files, rank_table_file,\n                    dec_key, dec_mode):\n    \"\"\"Start up all agents in one machine\"\"\"\n    servable_name = common_meta.model_key\n    send_pipe_list = []\n    subprocess_list = []\n    c_send_pipe, p_recv_pipe = Pipe()\n    group_file = \"\"\n    agents_count = len(device_id_list)\n    for index in range(agents_count):\n        device_id, rank_id, model_file = device_id_list[index], rank_id_list[index], model_files[index]\n        if group_config_files is not None:\n            group_file = group_config_files[index]\n\n        p_send_pipe, c_recv_pipe = Pipe()\n        send_pipe_list.append(p_send_pipe)\n\n        agent_port = agent_start_port + index\n\n        start_config = AgentStartUpConfig_()\n        start_config.rank_id = rank_id\n        start_config.device_id = device_id\n        start_config.model_file_names = model_file\n        if group_config_files is not None:\n            start_config.group_file_names = group_file\n        start_config.rank_table_json_file_name = rank_table_file\n        start_config.agent_address = agent_ip + \":\" + str(agent_port)\n        start_config.distributed_address = distributed_address\n        start_config.common_meta = common_meta\n\n        process = Process(target=_agent_process,\n                          args=(c_send_pipe, c_recv_pipe, index, start_config, dec_key, dec_mode),\n                          name=f\"{servable_name}_worker_agent_rank{rank_id}_device{device_id}\")\n        process.start()\n        subprocess_list.append(process)\n\n    msg = f\"distributed worker_address: {distributed_address}, agent_ip: {agent_ip}, \" \\\n          f\"agent_start_port: {agent_start_port}, device ids: {device_id_list}, rank ids: {rank_id_list}, \" \\\n          f\"rank table file: {rank_table_file}, model files: {model_files}, group config files: {group_config_files}\"\n\n    try:\n        _listening_agents_when_startup(p_recv_pipe, send_pipe_list, subprocess_list)\n    # pylint: disable=broad-except\n    except Exception as e:\n        WorkerAgent_.notify_failed(distributed_address)\n        logger.error(f\"Failed to start agents, {msg}\")\n        print(f\"Failed to start agents, {msg}\")\n        raise e\n\n    logger.info(f\"Success to start agents, {msg}\")\n    print(f\"Success to start agents, {msg}\")\n    _listening_agents_after_startup(subprocess_list, distributed_address, agent_ip)\n\n\nclass DistributedServableConfig:\n    \"\"\"Python DistributedServableConfig\"\"\"\n\n    def __init__(self):\n        self.rank_table_content = \"\"\n        self.rank_list = None\n        self.common_meta = None\n        self.distributed_meta = None\n\n    def set(self, config):\n        \"\"\"Set from C++ DistributedServableConfig_ obj\"\"\"\n        self.rank_table_content = config.rank_table_content\n        self.rank_list = []\n        for item in config.rank_list:\n            new_item = {\"device_id\": item.device_id, \"ip\": item.ip}\n            self.rank_list.append(new_item)\n        self.common_meta = {\"model_key\": config.common_meta.model_key,\n                            \"with_batch_dim\": config.common_meta.with_batch_dim,\n                            \"without_batch_dim_inputs\": config.common_meta.without_batch_dim_inputs,\n                            \"inputs_count\": config.common_meta.inputs_count,\n                            \"outputs_count\": config.common_meta.outputs_count}\n\n        self.distributed_meta = {\"rank_size\": config.distributed_meta.rank_size,\n                                 \"stage_size\": config.distributed_meta.stage_size}\n\n    def get(self):\n        \"\"\"Get as C++ DistributedServableConfig_ obj\"\"\"\n        config = DistributedServableConfig_()\n        config.rank_table_content = self.rank_table_content\n        rank_list = []\n        for item in self.rank_list:\n            new_item = OneRankConfig_()\n            new_item.device_id = item[\"device_id\"]\n            new_item.ip = item[\"ip\"]\n            rank_list.append(new_item)\n        config.rank_list = rank_list\n        config.common_meta.model_key = self.common_meta[\"model_key\"]\n        config.common_meta.with_batch_dim = self.common_meta[\"with_batch_dim\"]\n        config.common_meta.without_batch_dim_inputs = self.common_meta[\"without_batch_dim_inputs\"]\n        config.common_meta.inputs_count = self.common_meta[\"inputs_count\"]\n        config.common_meta.outputs_count = self.common_meta[\"outputs_count\"]\n\n        config.distributed_meta.rank_size = self.distributed_meta[\"rank_size\"]\n        config.distributed_meta.stage_size = self.distributed_meta[\"stage_size\"]\n        return config\n\n\ndef _get_worker_distributed_config(distributed_address):\n    \"\"\"Get worker distributed config from worker through sub process\"\"\"\n    c_send_pipe, p_recv_pipe = Pipe()\n\n    def process_fun(c_send_pipe):\n        try:\n            distributed_config = WorkerAgent_.get_agents_config_from_worker(distributed_address)\n            config = DistributedServableConfig()\n            config.set(distributed_config)\n            c_send_pipe.send(config)\n        # pylint: disable=broad-except\n        except Exception as e:\n            c_send_pipe.send(e)\n\n    process = Process(target=process_fun, args=(c_send_pipe,),\n                      name=f\"worker_agent_get_agents_config_from_worker\")\n    process.start()\n    process.join()\n    assert not process.is_alive()\n    if p_recv_pipe.poll(0.1):\n        config = p_recv_pipe.recv()\n        if isinstance(config, Exception):\n            raise config\n        distributed_config = config.get()\n        return distributed_config\n    raise RuntimeError(f\"Failed to get agents config from worker\")\n\n\ndef startup_agents(distributed_address, model_files, group_config_files=None,\n                   agent_start_port=7000, agent_ip=None, rank_start=None,\n                   dec_key=None, dec_mode='AES-GCM'):\n    r\"\"\"\n    Start all required worker agents on the current machine. These worker agent processes are responsible for inference\n    tasks on the local machine. For details, please refer to\n    `MindSpore Serving-based Distributed Inference Service Deployment <https://www.mindspore.cn/serving/docs/en/master/serving_distributed_example.html>`_.\n\n    Args:\n        distributed_address (str): The distributed worker address the agents linked to.\n        model_files (Union[list[str], tuple[str]]): All model files need in current machine, absolute path or path\n            relative to this startup python script.\n        group_config_files (Union[list[str], tuple[str]], optional): All group config files need in current machine,\n            absolute path or path relative to this startup python script, default ``None``, which means there are no\n            configuration files. Default: ``None``.\n        agent_start_port (int, optional): The starting agent port of the agents link to worker. Default: ``7000``.\n        agent_ip (str, optional): The local agent ip, if it's ``None``, the agent ip will be obtained from rank\n            table file. Default ``None``. Parameter `agent_ip` and parameter `rank_start` must have values at\n            the same time, or both ``None`` at the same time. Default: ``None``.\n        rank_start (int, optional): The starting rank id of this machine, if it's ``None``, the rank id will be obtained\n            from rank table file. Default ``None``. Parameter `agent_ip` and parameter     must have values at the same\n            time, or both ``None`` at the same time. Default: ``None``.\n        dec_key (bytes, optional): Byte type key used for decryption. The valid length is 16, 24, or 32.\n            Default: ``None``.\n        dec_mode (str, optional): Specifies the decryption mode, take effect when `dec_key` is set.\n            Option: ``'AES-GCM'`` or ``'AES-CBC'``. Default: ``'AES-GCM'``.\n\n    Raises:\n        RuntimeError: Failed to start agents.\n\n    Examples:\n        >>> import os\n        >>> from mindspore_serving.server import distributed\n        >>> model_files = []\n        >>> for i in range(8):\n        >>>    model_files.append(f\"models/device{i}/matmul.mindir\")\n        >>> distributed.startup_agents(distributed_address=\"127.0.0.1:6200\", model_files=model_files)\n    \"\"\"\n    check_type.check_str(\"distributed_address\", distributed_address)\n    check_type.check_int(\"agent_start_port\", agent_start_port, 1, 65535 - 7)\n    model_files = check_type.check_and_as_tuple_with_str_list(\"model_files\", model_files)\n    if group_config_files is not None:\n        group_config_files = check_type.check_and_as_tuple_with_str_list(\"group_config_files\", group_config_files)\n\n    # check dec_key and dec_mode\n    if dec_key is not None:\n        if not isinstance(dec_key, bytes):\n            raise RuntimeError(f\"Parameter 'dec_key' should be bytes, but actually {type(dec_key)}\")\n        if not dec_key:\n            raise RuntimeError(f\"Parameter 'dec_key' should not be empty bytes\")\n        if len(dec_key) not in (16, 24, 32):\n            raise RuntimeError(f\"Parameter 'dec_key' length {len(dec_key)} expected to be 16, 24 or 32\")\n    check_type.check_str(\"dec_mode\", dec_mode)\n    if dec_mode not in ('AES-GCM', 'AES-CBC'):\n        raise RuntimeError(f\"Parameter 'dec_mode' expected to be 'AES-GCM' or 'AES-CBC'\")\n\n    ExitSignalHandle_.start()\n    distributed_config = _get_worker_distributed_config(distributed_address)\n\n    # get machine ip\n    rank_list = distributed_config.rank_list\n    local_device_id_list = []\n    local_rank_id_list = []\n    if agent_ip is None:\n        if rank_start is not None:\n            raise RuntimeError(\"Parameter 'agent_ip' and parameter 'rank_start' must have values at the same time, \"\n                               \"or both None at the same time.\")\n        local_ip = _get_local_ip(rank_list, agent_start_port)\n        # get all device_id and rank_id\n        for rank_id, item in enumerate(rank_list):\n            if item.ip == local_ip:\n                local_device_id_list.append(item.device_id)\n                local_rank_id_list.append(rank_id)\n    else:\n        if rank_start is None:\n            raise RuntimeError(\"Parameter 'agent_ip' and parameter 'rank_start' must have values at the same time, \"\n                               \"or both None at the same time.\")\n        check_type.check_str(\"agent_ip\", agent_ip)\n        check_type.check_int(\"rank_start\", rank_start, 0)\n        if rank_start >= len(rank_list):\n            raise RuntimeError(f\"Parameter 'rank_start' cannot equal or larger than rank size {len(rank_list)}.\")\n        if not _check_local_ip(agent_ip, agent_start_port):\n            raise RuntimeError(f\"Check ip 'agent_ip' valid failed, agent_ip: {agent_ip}\")\n        local_ip = agent_ip\n        rank_table_ip = rank_list[rank_start].ip\n        for rank_id, item in enumerate(rank_list):\n            if item.ip == rank_table_ip:\n                local_device_id_list.append(item.device_id)\n                local_rank_id_list.append(rank_id)\n\n    # handle model files and group config files\n    if len(local_device_id_list) != len(model_files):\n        raise RuntimeError(f\"Card count {local_device_id_list} described rank table does not equal to model files size \"\n                           f\"{len(model_files)}, model files: {model_files}\")\n\n    if group_config_files is not None and len(model_files) != len(group_config_files):\n        raise RuntimeError(f\"Model files count {len(model_files)} does not equal to group config files \"\n                           f\"count {len(group_config_files)} when group_config_files is not None, \"\n                           f\"model files: {model_files}, group config files: {group_config_files}\")\n\n    _check_model_num(model_files, group_config_files)\n    model_files, group_config_files = _update_model_files_path(model_files, group_config_files)\n\n    # make json table file and export env\n    rank_table_file = _make_json_table_file(distributed_config)\n    _startup_agents(distributed_config.common_meta, distributed_address, local_ip, agent_start_port,\n                    local_device_id_list, local_rank_id_list,\n                    model_files, group_config_files, rank_table_file, dec_key, dec_mode)\n"
  },
  {
    "path": "mindspore_serving/server/worker/distributed/distributed_worker.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Serving, distributed worker startup\"\"\"\nfrom mindspore_serving._mindspore_serving import Worker_\n\nfrom mindspore_serving.server.common import check_type\nfrom mindspore_serving.server.worker._worker import _start_py_task\nfrom mindspore_serving.server.worker._worker import stop_on_except, _load_servable_config\n\n\n@stop_on_except\ndef start_servable(servable_directory, servable_name, rank_table_json_file, version_number,\n                   distributed_address, wait_agents_time_in_seconds,\n                   master_address, worker_address):\n    r\"\"\"\n    Start up the servable named 'servable_name' defined in 'servable_directory'.\n    \"\"\"\n    check_type.check_str('servable_directory', servable_directory)\n    check_type.check_str('servable_name', servable_name)\n    check_type.check_int('version_number', version_number, 1)\n    check_type.check_str('rank_table_json_file', rank_table_json_file)\n    check_type.check_str('distributed_address', distributed_address)\n\n    _load_servable_config(servable_directory, servable_name)\n    Worker_.start_distributed_servable(servable_directory, servable_name, rank_table_json_file, version_number,\n                                       distributed_address, master_address, worker_address,\n                                       wait_agents_time_in_seconds)\n    _start_py_task()\n"
  },
  {
    "path": "mindspore_serving/server/worker/distributed/register.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Serving, distributed worker register\"\"\"\n\nfrom mindspore_serving import log as logger\nfrom mindspore_serving.server.common import check_type\nfrom mindspore_serving.server.register.utils import get_servable_dir\nfrom mindspore_serving.server.register.model import append_declared_model\nfrom mindspore_serving._mindspore_serving import ModelMeta_, ServableRegister_\n\n\ndef declare_servable(rank_size, stage_size, with_batch_dim=True, without_batch_dim_inputs=None,\n                     enable_pipeline_infer=False):\n    \"\"\"declare distributed servable in servable_config.py. For details, please refer to\n    `MindSpore Serving-based Distributed Inference Service Deployment <https://www.mindspore.cn/serving/docs/en/master/serving_distributed_example.html>`_.\n\n    Args:\n        rank_size (int): The rank size of the distributed model.\n        stage_size (int): The stage size of the distributed model.\n        with_batch_dim (bool, optional): Whether the first shape dim of the inputs and outputs of model is batch.\n            Default: ``True``.\n        without_batch_dim_inputs (Union[int, tuple[int], list[int]], optional): Index of inputs that without batch dim\n            when `with_batch_dim` is ``True``. Default: ``None``.\n        enable_pipeline_infer (bool, optional): Whether to enable pipeline parallel inference. Pipeline parallelism can\n            effectively improve inference performance. For details, see\n            `Pipeline Parallelism <https://www.mindspore.cn/tutorials/experts/en/master/parallel/pipeline_parallel.html>`_.\n            Default: ``False``.\n\n    Return:\n        Model, identification of this model, can be used for `Model.call` or as the inputs of `add_stage`.\n\n    Raises:\n        RuntimeError: The type or value of the parameters are invalid.\n\n    Examples:\n        >>> from mindspore_serving.server import distributed\n        >>> model = distributed.declare_servable(rank_size=8, stage_size=1)\n    \"\"\"\n    check_type.check_bool('with_batch_dim', with_batch_dim)\n    check_type.check_bool('enable_pipeline_infer', enable_pipeline_infer)\n\n    meta = ModelMeta_()\n    meta.common_meta.servable_name = get_servable_dir()\n    meta.common_meta.model_key = get_servable_dir()  # used to identify model\n    meta.common_meta.with_batch_dim = with_batch_dim\n    if without_batch_dim_inputs:\n        without_batch_dim_inputs = check_type.check_and_as_int_tuple_list('without_batch_dim_inputs',\n                                                                          without_batch_dim_inputs, 0)\n        meta.common_meta.without_batch_dim_inputs = without_batch_dim_inputs\n\n    # init distributed servable meta info\n    check_type.check_int(\"rank_size\", rank_size, 1)\n    check_type.check_int(\"stage_size\", stage_size, 1)\n    meta.distributed_meta.rank_size = rank_size\n    meta.distributed_meta.stage_size = stage_size\n    meta.distributed_meta.enable_pipeline_infer = enable_pipeline_infer\n    ServableRegister_.declare_distributed_model(meta)\n    logger.info(f\"Declare distributed servable, servable name: {meta.common_meta.model_key} \"\n                f\", rank_size: {rank_size} , stage_size: {stage_size},  with_batch_dim: {with_batch_dim} \"\n                f\", without_batch_dim_inputs: {without_batch_dim_inputs} \"\n                f\", enable_pipeline_infer: {enable_pipeline_infer}\")\n    return append_declared_model(meta.common_meta.model_key)\n"
  },
  {
    "path": "mindspore_serving/server/worker/distributed/worker_agent.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Serving, distributed worker agent\"\"\"\n\nimport os\nimport threading\n\nfrom mindspore_serving._mindspore_serving import WorkerAgent_, AgentStartUpConfig_, ExitSignalHandle_\n\nfrom mindspore_serving import log as logger\nfrom mindspore_serving.server.worker import init_mindspore\n\n\ndef start_worker_agent(start_config, dec_key, dec_mode):\n    \"\"\"Start up one worker agent on one device id, invoke by agent_startup.startup_worker_agents\n    \"\"\"\n    if not isinstance(start_config, AgentStartUpConfig_):\n        raise RuntimeError(\"Parameter 'start_config' should be instance of AgentStartUpConfig_\")\n    logger.info(f\"rank_id={start_config.rank_id}, device_id={start_config.device_id}, \"\n                f\"model_file='{start_config.model_file_names}', group_file='{start_config.group_file_names}', \"\n                f\"rank_table_file='{start_config.rank_table_json_file_name}',\"\n                f\"agent_address='{start_config.agent_address}', \"\n                f\"distributed_address='{start_config.distributed_address}'\"\n                f\"with_batch_dim={start_config.common_meta.with_batch_dim}, \"\n                f\"without_batch_dim_inputs={start_config.common_meta.without_batch_dim_inputs}\")\n\n    ExitSignalHandle_.start()  # Set flag to running and receive Ctrl+C message\n\n    init_mindspore.init_mindspore_cxx_env(False)\n    os.environ[\"RANK_ID\"] = str(start_config.rank_id)\n    os.environ[\"DEVICE_ID\"] = str(start_config.device_id)\n    os.environ[\"MS_ENABLE_HCCL\"] = \"1\"\n    if start_config.group_file_names:\n        os.environ[\"PARA_GROUP_FILE\"] = ';'.join(start_config.group_file_names)\n\n    os.environ[\"RANK_TABLE_FILE\"] = start_config.rank_table_json_file_name\n\n    for item in (\"RANK_ID\", \"DEVICE_ID\", \"MS_ENABLE_HCCL\", \"PARA_GROUP_FILE\", \"RANK_TABLE_FILE\",\n                 \"LD_LIBRARY_PATH\", \"PYTHONPATH\"):\n        logger.info(f\"Env {item}: {os.getenv(item, None)}\")\n    if dec_key is None:\n        dec_key = ''\n    WorkerAgent_.start_agent(start_config, dec_key, dec_mode)\n\n    start_wait_and_clear()\n\n\n_wait_and_clear_thread = None\n\n\ndef start_wait_and_clear():\n    \"\"\"Waiting for Ctrl+C, and clear up environment\"\"\"\n\n    def thread_func():\n        logger.info(\"Serving worker Agent: wait for Ctrl+C to exit ------------------------------------\")\n        print(\"Serving worker Agent: wait for Ctrl+C to exit ------------------------------------\")\n        WorkerAgent_.wait_and_clear()\n        logger.info(\"Serving worker Agent: exited ------------------------------------\")\n        print(\"Serving worker Agent: exited ------------------------------------\")\n\n    global _wait_and_clear_thread\n    if not _wait_and_clear_thread:\n        _wait_and_clear_thread = threading.Thread(target=thread_func)\n        _wait_and_clear_thread.start()\n\n\ndef stop():\n    r\"\"\"\n    Stop the running of agent.\n    \"\"\"\n    WorkerAgent_.stop_and_clear()\n"
  },
  {
    "path": "mindspore_serving/server/worker/init_mindspore.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Init MindSpore Cxx\"\"\"\nimport os\nimport importlib.util\n\nfrom mindspore_serving import log as logger\nfrom mindspore_serving._mindspore_serving import Worker_\nfrom .check_version import check_version_and_env_config, check_version_and_try_set_env_lib\n\n_flag_set_mindspore_cxx_env = False\n\n\ndef get_mindspore_whl_path():\n    \"\"\"Get MindSpore whl install path\"\"\"\n    model_spec = importlib.util.find_spec(\"mindspore\")\n    if not model_spec or not model_spec.submodule_search_locations:\n        return \"\"\n    if not isinstance(model_spec.submodule_search_locations, list):\n        return \"\"\n    ms_dir = model_spec.submodule_search_locations[0]\n    return ms_dir\n\n\ndef check_mindspore_version(ms_dir):\n    \"\"\"check MindSpore version number\"\"\"\n    try:\n        from mindspore_serving.version import __version__\n    except ModuleNotFoundError:\n        logger.warning(f\"Get MindSpore Serving version failed\")\n        return\n    try:\n        with open(os.path.join(ms_dir, \"version.py\"), \"r\") as fp:\n            version_str = fp.readline().replace(\"\\n\", \"\").replace(\"\\r\", \"\").replace(\" \", \"\") \\\n                .replace(\"'\", \"\").replace(\"\\\"\", \"\")\n            prefix = \"__version__=\"\n            if version_str[:len(prefix)] != prefix:\n                logger.warning(f\"Get MindSpore version failed\")\n                return\n            ms_version = version_str[len(prefix):]\n    except FileNotFoundError:\n        logger.warning(f\"Get MindSpore version failed\")\n        return\n    serving_versions = __version__.split(\".\")\n    ms_versions = ms_version.split(\".\")\n    if serving_versions[:2] != ms_versions[:2]:\n        logger.warning(f\"MindSpore version {ms_version} and MindSpore Serving version {__version__} are expected \"\n                       f\"to be consistent. If not, there may be compatibility problems.\")\n        return\n\n\ndef set_mindspore_cxx_env():\n    \"\"\"Append MindSpore CXX lib path to LD_LIBRARY_PATH\"\"\"\n    global _flag_set_mindspore_cxx_env\n    if _flag_set_mindspore_cxx_env:\n        return\n    _flag_set_mindspore_cxx_env = True\n\n    ld_lib_path = os.getenv('LD_LIBRARY_PATH', \"\")\n    check_version_and_try_set_env_lib()  # try set env LD_LIBRARY_PATH\n    logger.info(f\"Update env LD_LIBRARY_PATH from '{ld_lib_path}' to '{os.getenv('LD_LIBRARY_PATH')}'\")\n\n    ld_lib_path = os.getenv('LD_LIBRARY_PATH', \"\")\n    ms_dir = get_mindspore_whl_path()\n    if not ms_dir:\n        logger.info(f\"find mindspore failed, LD_LIBRARY_PATH will not add MindSpore lib path\")\n        return\n    check_mindspore_version(ms_dir)\n    ms_dir = os.path.join(ms_dir, \"lib\")\n\n    if ld_lib_path:\n        if ms_dir not in ld_lib_path.split(\":\"):\n            os.environ['LD_LIBRARY_PATH'] = ld_lib_path + \":\" + ms_dir\n    else:\n        os.environ['LD_LIBRARY_PATH'] = ms_dir\n    logger.info(f\"Update env LD_LIBRARY_PATH from '{ld_lib_path}' to '{os.getenv('LD_LIBRARY_PATH')}'\")\n\n\ndef init_mindspore_cxx_env(enable_lite):\n    \"\"\"Init env for load libmindspore.so\"\"\"\n    set_mindspore_cxx_env()\n    device_type = Worker_.get_device_type(\"none\", enable_lite)\n    if not device_type:\n        logger.warning(\"Failed to get device type\")\n        return\n    check_version_and_env_config(device_type)\n"
  },
  {
    "path": "mindspore_serving/server/worker/task.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Python run preprocess and postprocess in python\"\"\"\n\nimport time\nimport logging\nimport numpy as np\nfrom mindspore_serving._mindspore_serving import Worker_\nfrom mindspore_serving._mindspore_serving import ExitSignalHandle_\nfrom mindspore_serving.server.register.stage_function import stage_function_storage\nfrom mindspore_serving import log as logger\n\n\nclass ServingSystemException(Exception):\n    \"\"\"Exception notify system error of worker, and need to exit py task\"\"\"\n\n    def __init__(self, msg):\n        super(ServingSystemException, self).__init__()\n        self.msg = msg\n\n    def __str__(self):\n        return self.msg\n\n\ndef has_worker_stopped():\n    \"\"\"Whether worker has stopped\"\"\"\n    return ExitSignalHandle_.has_stopped()\n\n\nclass PyTaskHandler:\n    \"\"\"Handling preprocess and postprocess\"\"\"\n\n    def run(self):\n        \"\"\"Run tasks of preprocess and postprocess, switch to other type of process when some instances are handled\"\"\"\n        logger.info(f\"start python task handling thread\")\n        while True:\n            try:\n                if has_worker_stopped():\n                    logger.info(\"Worker has exited, exit python task handling thread\")\n                    break\n                task = Worker_.get_py_task()\n                if task.has_stopped:\n                    logger.info(\"Worker has exited, exit python task handling thread\")\n                    break\n                self.run_inner(task)\n            except Exception as e:  # pylint: disable=broad-except\n                logger.error(f\"py task catch exception and exit: {e}\")\n                logging.exception(e)\n                break\n        logger.info(\"end python task handling thread\")\n        Worker_.stop_and_clear()\n\n    @staticmethod\n    def run_inner(task):\n        \"\"\"Iterator get result, and push it to c++\"\"\"\n        task_name = task.task_name\n        task_info = stage_function_storage.get(task_name)\n        instance_list = task.instance_list\n        # check input\n        inputs_count = task_info[\"inputs_count\"]\n        for item in instance_list:\n            if not isinstance(item, tuple) or len(item) != inputs_count:\n                raise RuntimeError(f\"The inputs number {len(item)} provided is not equal to the inputs number \"\n                                   f\"{inputs_count} required by function {task_name}, stage index {task.stage_index}\")\n\n        instances_size = len(task.instance_list)\n        index = 0\n        while index < instances_size:\n            get_result_time_end = time.time()\n            try:\n                result = task_info[\"fun\"](instance_list[index:])  # user-defined, may raise Exception\n                if isinstance(result, (tuple, list)):  # convert return result to yield\n                    result = iter(result)\n            # pylint: disable=broad-except\n            except Exception as e:\n                logger.warning(f\"{task_name} invoke catch exception: \")\n                logging.exception(e)\n                PyTaskHandler.push_failed(instances_size - index, str(e))\n                return  # return will not terminate thread\n\n            try:\n                start_index = index\n                for _ in range(index, instances_size):\n                    output = next(result)  # user-defined, may raise Exception\n                    if not isinstance(output, (tuple, list)):\n                        output = (output,)\n                    # check output count\n                    if len(output) != task_info[\"outputs_count\"]:\n                        error_msg = f\"The outputs number {len(output)} of one instance returned by function \" \\\n                                    f\"'{task_name}' is not equal to the outputs number {task_info['outputs_count']} \" \\\n                                    f\" registered in method {task.method_name}\"\n                        PyTaskHandler.push_system_failed(error_msg)\n                        raise ServingSystemException(error_msg)\n                    instance_result = []\n                    for item in output:\n                        # convert MindSpore Tensor to numpy\n                        if callable(getattr(item, \"asnumpy\", None)):\n                            item = item.asnumpy()\n                        if isinstance(item, np.ndarray) and (not item.flags['FORC']):\n                            item = np.ascontiguousarray(item)\n                        instance_result.append(item)\n                    # raise ServingSystemException when user-defined output is invalid\n                    PyTaskHandler.push_result(instance_result)  # push outputs of one instance\n                    index += 1\n\n                get_result_time = time.time()\n                logger.info(f\"method {task.method_name} stage {task.stage_index} function {task_name} get result \"\n                            f\"{start_index} ~ {instances_size - 1} cost time \"\n                            f\"{(get_result_time - get_result_time_end) * 1000} ms\")\n\n            except StopIteration:  # raise by next\n                error_msg = f\"The number {index} of instances returned by function '{task_name}' is \" \\\n                            f\"not equal to the number {instances_size} of instances provided to this function.\"\n                PyTaskHandler.push_system_failed(error_msg)\n                raise RuntimeError(error_msg)\n            except ServingSystemException as e:\n                logger.error(f\"{task_name} handling catch exception: {e}\")\n                PyTaskHandler.push_system_failed(e.msg)\n                raise\n            except Exception as e:  # pylint: disable=broad-except\n                # catch exception and try next\n                logger.warning(f\"{task_name} get result catch exception: {e}\")\n                logging.exception(e)\n                PyTaskHandler.push_failed(1, str(e))  # push success results and a failed result\n                index += 1\n\n    @staticmethod\n    def push_failed(count, failed_msg):\n        \"\"\"Push failed result\"\"\"\n        Worker_.push_pytask_failed(count, failed_msg)\n\n    @staticmethod\n    def push_system_failed(failed_msg):\n        \"\"\"Push failed result\"\"\"\n        Worker_.push_pytask_system_failed(failed_msg)\n\n    @staticmethod\n    def push_result(instance_result):\n        \"\"\"Push success result\"\"\"\n        try:\n            Worker_.push_pytask_result(tuple(instance_result))\n        except Exception as e:\n            raise ServingSystemException(f\"Push py task result cause exception: {e}\")\n\n\ndef _start_py_task():\n    \"\"\"Start python thread for python task\"\"\"\n    if Worker_.enable_pytask_que():\n        PyTaskHandler().run()\n    else:\n        Worker_.wait_and_clear()\n"
  },
  {
    "path": "requirements_test.txt",
    "content": "numpy >= 1.17.0\nprotobuf >= 3.13.0\ngrpcio >= 1.36.0, <= 1.47.0\nrequests >= 2.22.0\npsutil >= 5.9.1\n"
  },
  {
    "path": "scripts/check_clang_format.sh",
    "content": "#!/bin/bash\n# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nset -e\n\nCLANG_FORMAT=$(which clang-format) || (echo \"Please install 'clang-format' tool first\"; exit 1)\n\nversion=$(\"${CLANG_FORMAT}\" --version | sed -n \"s/.*\\ \\([0-9]*\\)\\.[0-9]*\\.[0-9]*.*/\\1/p\")\nif [[ \"${version}\" -lt \"8\" ]]; then\n  echo \"clang-format's version must be at least 8.0.0\"\n  exit 1\nfi\n\nCURRENT_PATH=$(pwd)\nSCRIPTS_PATH=$(dirname \"$0\")\n\necho \"CURRENT_PATH=$CURRENT_PATH\"\necho \"SCRIPTS_PATH=$SCRIPTS_PATH\"\n\n# print usage message\nfunction usage()\n{\n  echo \"Check whether the specified source files were well formatted\"\n  echo \"Usage:\"\n  echo \"bash $0 [-a] [-c] [-l] [-h]\"\n  echo \"e.g. $0 -a\"\n  echo \"\"\n  echo \"Options:\"\n  echo \"    -a Check code format of all files, default case\"\n  echo \"    -c Check code format of the files changed compared to last commit\"\n  echo \"    -l Check code format of the files changed in last commit\"\n  echo \"    -h Print usage\"\n}\n\n# check and set options\nfunction checkopts()\n{\n  # init variable\n  mode=\"all\"    # default check all files\n\n  # Process the options\n  while getopts 'aclh' opt\n  do\n    case \"${opt}\" in\n      a)\n        mode=\"all\"\n        ;;\n      c)\n        mode=\"changed\"\n        ;;\n      l)\n        mode=\"lastcommit\"\n        ;;\n      h)\n        usage\n        exit 0\n        ;;\n      *)\n        echo \"Unknown option ${opt}!\"\n        usage\n        exit 1\n    esac\n  done\n}\n\n# init variable\n# check options\ncheckopts \"$@\"\n\n# switch to project root path, which contains clang-format config file '.clang-format'\ncd \"${SCRIPTS_PATH}/..\" || exit 1\n\nCHECK_LIST_FILE='__checked_files_list__'\n\nif [ \"X${mode}\" == \"Xall\" ]; then\n  find mindspore_serving/ccsrc -type f -name \"*\" | grep \"\\.h$\\|\\.cc$\\|\\.c$\" > \"${CHECK_LIST_FILE}\" || true\nelif [ \"X${mode}\" == \"Xchanged\" ]; then\n  # --diff-filter=ACMRTUXB will ignore deleted files in commit\n  git diff --diff-filter=ACMRTUXB --name-only | grep \"mindspore_serving/ccsrc\" | grep \"\\.h$\\|\\.cc$\\|\\.c$\" > \"${CHECK_LIST_FILE}\" || true\nelse  # \"X${mode}\" == \"Xlastcommit\"\n  git diff --diff-filter=ACMRTUXB --name-only HEAD~ HEAD | grep \"mindspore_serving/ccsrc\" | grep \"\\.h$\\|\\.cc$\\|\\.c$\" > \"${CHECK_LIST_FILE}\" || true\nfi\n\nCHECK_RESULT_FILE=__code_format_check_result__\necho \"0\" > \"$CHECK_RESULT_FILE\"\n\n# check format of files modified in the latest commit\nwhile read line; do\n  BASE_NAME=$(basename \"${line}\")\n  TEMP_FILE=\"__TEMP__${BASE_NAME}\"\n  cp \"${line}\" \"${TEMP_FILE}\"\n  ${CLANG_FORMAT} -i \"${TEMP_FILE}\"\n  diff \"${TEMP_FILE}\" \"${line}\"\n  ret=$?\n  rm \"${TEMP_FILE}\"\n  if [[ \"${ret}\" -ne 0 ]]; then\n    echo \"File ${line} is not formatted, please format it.\"\n    echo \"1\" > \"${CHECK_RESULT_FILE}\"\n    break\n  fi\ndone < \"${CHECK_LIST_FILE}\"\n\nresult=$(cat \"${CHECK_RESULT_FILE}\")\nrm \"${CHECK_RESULT_FILE}\"\nrm \"${CHECK_LIST_FILE}\"\ncd \"${CURRENT_PATH}\" || exit 1\nif [[ \"X${result}\" == \"X0\" ]]; then\n  echo \"Check PASS: specified files are well formatted!\"\nfi\nexit \"${result}\"\n"
  },
  {
    "path": "scripts/format_source_code.sh",
    "content": "#!/bin/bash\n# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nset -e\n\nCLANG_FORMAT=$(which clang-format) || (echo \"Please install 'clang-format' tool first\"; exit 1)\n\nversion=$(\"${CLANG_FORMAT}\" --version | sed -n \"s/.*\\ \\([0-9]*\\)\\.[0-9]*\\.[0-9]*.*/\\1/p\")\nif [[ \"${version}\" -lt \"8\" ]]; then\n  echo \"clang-format's version must be at least 8.0.0\"\n  exit 1\nfi\n\nCURRENT_PATH=$(pwd)\nSCRIPTS_PATH=$(dirname \"$0\")\n\necho \"CURRENT_PATH=${CURRENT_PATH}\"\necho \"SCRIPTS_PATH=${SCRIPTS_PATH}\"\n\n# print usage message\nfunction usage()\n{\n  echo \"Format the specified source files to conform the code style.\"\n  echo \"Usage:\"\n  echo \"bash $0 [-a] [-c] [-l] [-h]\"\n  echo \"e.g. $0 -c\"\n  echo \"\"\n  echo \"Options:\"\n  echo \"    -a format of all files\"\n  echo \"    -c format of the files changed compared to last commit, default case\"\n  echo \"    -l format of the files changed in last commit\"\n  echo \"    -h Print usage\"\n}\n\n# check and set options\nfunction checkopts()\n{\n  # init variable\n  mode=\"changed\"    # default format changed files\n\n  # Process the options\n  while getopts 'aclh' opt\n  do\n    case \"${opt}\" in\n      a)\n        mode=\"all\"\n        ;;\n      c)\n        mode=\"changed\"\n        ;;\n      l)\n        mode=\"lastcommit\"\n        ;;\n      h)\n        usage\n        exit 0\n        ;;\n      *)\n        echo \"Unknown option ${opt}!\"\n        usage\n        exit 1\n    esac\n  done\n}\n\n# init variable\n# check options\ncheckopts \"$@\"\n\n# switch to project root path, which contains clang-format config file '.clang-format'\ncd \"${SCRIPTS_PATH}/../..\" || exit 1\n\nFMT_FILE_LIST='__format_files_list__'\n\nif [[ \"X${mode}\" == \"Xall\" ]]; then\n  find ./ -type f -name \"*\" | grep \"\\.h$\\|\\.cc$\" > \"${FMT_FILE_LIST}\" || true\nelif [[ \"X${mode}\" == \"Xchanged\" ]]; then\n  git diff --name-only | grep \"\\.h$\\|\\.cc$\" > \"${FMT_FILE_LIST}\" || true\nelse  # \"X${mode}\" == \"Xlastcommit\"\n  git diff --name-only HEAD~ HEAD | grep \"\\.h$\\|\\.cc$\" > \"${FMT_FILE_LIST}\" || true\nfi\n\nwhile read line; do\n  if [ -f \"${line}\" ]; then\n    ${CLANG_FORMAT} -i \"${line}\"\n  fi\ndone < \"${FMT_FILE_LIST}\"\n\nrm \"${FMT_FILE_LIST}\"\ncd \"${CURRENT_PATH}\" || exit 1\n\necho \"Specified cpp source files have been format successfully.\"\n"
  },
  {
    "path": "setup.py",
    "content": "#!/usr/bin/env python3\n# encoding: utf-8\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"setup package.\"\"\"\nimport os\nimport stat\nimport platform\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.egg_info import egg_info\nfrom setuptools.command.build_py import build_py\n\nversion = '2.0.2'\n\nbackend_policy = os.getenv('BACKEND_POLICY')\ncommit_id = os.getenv('COMMIT_ID').replace(\"\\n\", \"\")\npackage_name = os.getenv('MS_PACKAGE_NAME').replace(\"\\n\", \"\")\n\npwd = os.path.dirname(os.path.realpath(__file__))\npkg_dir = os.path.join(pwd, 'build/package')\n\n\ndef _read_file(filename):\n    with open(os.path.join(pwd, filename), encoding='UTF-8') as f:\n        return f.read()\n\n\nreadme = _read_file('README.md')\nrelease = _read_file('RELEASE.md')\n\n\ndef _write_version(file):\n    file.write(\"__version__ = '{}'\\n\".format(version))\n\n\ndef _write_config(file):\n    file.write(\"__backend__ = '{}'\\n\".format(backend_policy))\n\n\ndef _write_commit_file(file):\n    file.write(\"__commit_id__ = '{}'\\n\".format(commit_id))\n\n\ndef _write_package_name(file):\n    file.write(\"__package_name__ = '{}'\\n\".format(package_name))\n\n\ndef build_dependencies():\n    \"\"\"generate python file\"\"\"\n    version_file = os.path.join(pkg_dir, 'mindspore_serving', 'version.py')\n    with open(version_file, 'w') as f:\n        _write_version(f)\n\n    version_file = os.path.join(pwd, 'mindspore_serving', 'version.py')\n    with open(version_file, 'w') as f:\n        _write_version(f)\n\n    config_file = os.path.join(pkg_dir, 'mindspore_serving', 'default_config.py')\n    with open(config_file, 'w') as f:\n        _write_config(f)\n\n    config_file = os.path.join(pwd, 'mindspore_serving', 'default_config.py')\n    with open(config_file, 'w') as f:\n        _write_config(f)\n\n    package_info = os.path.join(pkg_dir, 'mindspore_serving', 'default_config.py')\n    with open(package_info, 'a') as f:\n        _write_package_name(f)\n\n    package_info = os.path.join(pwd, 'mindspore_serving', 'default_config.py')\n    with open(package_info, 'a') as f:\n        _write_package_name(f)\n\n    commit_file = os.path.join(pkg_dir, 'mindspore_serving', '.commit_id')\n    with open(commit_file, 'w') as f:\n        _write_commit_file(f)\n\n    commit_file = os.path.join(pwd, 'mindspore_serving', '.commit_id')\n    with open(commit_file, 'w') as f:\n        _write_commit_file(f)\n\n\nbuild_dependencies()\n\nrequired_package = [\n    'numpy >= 1.17.0',\n    'protobuf >= 3.13.0',\n    'grpcio >= 1.36.0, <= 1.47.0',\n    'psutil >= 5.9.1'\n]\n\npackage_data = {\n    '': [\n        '*.so*',\n        '*.pyd',\n        '*.dll',\n        'lib/*.so*',\n        'lib/*.a',\n        '.commit_id',\n        '_mindspore_serving',\n        'proto/*.py'\n    ]\n}\n\n\ndef update_permissions(path):\n    \"\"\"\n    Update permissions.\n\n    Args:\n        path (str): Target directory path.\n    \"\"\"\n    if platform.system() == \"Windows\":\n        return\n\n    for dirpath, dirnames, filenames in os.walk(path):\n        for dirname in dirnames:\n            dir_fullpath = os.path.join(dirpath, dirname)\n            os.chmod(dir_fullpath, stat.S_IREAD | stat.S_IWRITE |\n                     stat.S_IEXEC | stat.S_IRGRP | stat.S_IXGRP)\n        for filename in filenames:\n            file_fullpath = os.path.join(dirpath, filename)\n            os.chmod(file_fullpath, stat.S_IREAD)\n\n\ndef bin_files():\n    \"\"\"\n    Gets the binary files to be installed.\n    \"\"\"\n    data_files = []\n    binary_files = []\n\n    cache_server_bin = os.path.join('mindspore_serving', 'bin', 'cache_server')\n    if not os.path.exists(cache_server_bin):\n        return data_files\n    binary_files.append(cache_server_bin)\n    cache_admin_bin = os.path.join('mindspore_serving', 'bin', 'cache_admin')\n    if not os.path.exists(cache_admin_bin):\n        return data_files\n    binary_files.append(cache_admin_bin)\n    data_files.append(('bin', binary_files))\n    return data_files\n\n\nclass EggInfo(egg_info):\n    \"\"\"Egg info.\"\"\"\n\n    def run(self):\n        super().run()\n        egg_info_dir = os.path.join(pkg_dir, 'mindspore_serving.egg-info')\n        update_permissions(egg_info_dir)\n\n\nclass BuildPy(build_py):\n    \"\"\"BuildPy.\"\"\"\n\n    def run(self):\n        super().run()\n        mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'mindspore_serving')\n        update_permissions(mindspore_dir)\n        mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'akg')\n        update_permissions(mindspore_dir)\n\n\nsetup(\n    name=package_name,\n    version=version,\n    author='The MindSpore Authors',\n    author_email='contact@mindspore.cn',\n    url='https://www.mindspore.cn',\n    download_url='https://gitee.com/mindspore/serving/tags',\n    project_urls={\n        'Sources': 'https://gitee.com/mindspore/serving',\n        'Issue Tracker': 'https://gitee.com/mindspore/serving/issues',\n    },\n    description='MindSpore is a new open source deep learning training/inference '\n                'framework that could be used for mobile, edge and cloud scenarios.',\n    # long_description=\"\\n\\n\".join([readme, release]),\n    long_description=\"\\n\\n\".join([readme]),\n    long_description_content_type=\"text/markdown\",\n    data_files=bin_files(),\n    packages=find_packages(),\n    package_data=package_data,\n    include_package_data=True,\n    cmdclass={\n        'egg_info': EggInfo,\n        'build_py': BuildPy,\n    },\n    python_requires='>=3.7',\n    install_requires=required_package,\n    classifiers=[\n        'Development Status :: 4 - Beta',\n        'Environment :: Console',\n        'Intended Audience :: Science/Research',\n        'Intended Audience :: Developers',\n        'License :: OSI Approved :: Apache Software License',\n        'Programming Language :: Python :: 3 :: Only',\n        'Programming Language :: Python :: 3.7',\n        'Programming Language :: Python :: 3.8',\n        'Programming Language :: Python :: 3.9',\n        'Programming Language :: C++',\n        'Topic :: Scientific/Engineering',\n        'Topic :: Scientific/Engineering :: Artificial Intelligence',\n        'Topic :: Software Development',\n        'Topic :: Software Development :: Libraries',\n        'Topic :: Software Development :: Libraries :: Python Modules',\n    ],\n    license='Apache 2.0',\n    keywords='mindspore machine learning',\n)\n"
  },
  {
    "path": "tests/CMakeLists.txt",
    "content": "#add flags\nmessage(\"================START BUILD TESTCASES=================\")\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wno-sign-compare\")\n\nadd_subdirectory(\"ut\")\n"
  },
  {
    "path": "tests/st/add/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n"
  },
  {
    "path": "tests/st/add/add.sh",
    "content": "#!/bin/bash\n\nexport GLOG_v=1\ncd \"$(dirname $0)\" || exit;\nCURRPATH=$(pwd)\nCURRUSER=$(whoami)\nPROJECT_PATH=${CURRPATH}/../../../\necho \"CURRPATH:\"  ${CURRPATH}\necho \"CURRUSER:\"  ${CURRUSER}\necho \"PROJECT_PATH:\"  ${PROJECT_PATH}\n\necho \"LD_LIBRARY_PATH: \" ${LD_LIBRARY_PATH}\necho \"PYTHONPATH: \" ${PYTHONPATH}\n\nrm -rf serving *.log *.mindir *.dat kernel_meta\nrm -rf unix_socket_files serving_logs\nrm -rf add serving_client.py serving_client_with_check.py export_model serving_server.py\ncp -r ../../../example/tensor_add/* .\n\nclean_pid()\n{\n  ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  if [ $? -ne 0 ]\n  then\n    echo \"clean pip failed\"\n  fi\n  sleep 6\n}\n\nprepare_model()\n{\n  echo \"### begin to generate mode for serving test ###\"\n  cd export_model\n  python3 add_model.py &> add_model.log\n  echo \"### end to generate mode for serving test ###\"\n  result=`find . -name  tensor_add.mindir | wc -l`\n  if [ ${result} -ne 1 ]\n  then\n    cat add_model.log\n    echo \"### generate model for serving test failed ###\" && exit 1\n    clean_pid\n    cd -\n  fi\n  cd -\n}\n\nstart_service()\n{\n  echo \"### start serving service ###\"\n  unset http_proxy https_proxy\n  python3 serving_server.py > serving_server.log 2>&1 &\n  if [ $? -ne 0 ]\n  then\n    echo \"server failed to start.\"\n  fi\n\n  result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  count=0\n  while [[ ${result} -eq 0 && ${count} -lt 150 ]]\n  do\n    sleep 1\n    count=$(($count+1))\n    result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  done\n\n  if [ ${count} -eq 150 ]\n  then\n    clean_pid\n    cat serving_server.log\n    echo \"worker log begin----------------------------------\"\n    cat serving_logs/*.log\n    echo \"worker log end----------------------------------\"\n    echo \"start serving service failed!\" && exit 1\n  fi\n  echo \"### start serving service end ###\"\n}\n\npytest_serving()\n{\n  unset http_proxy https_proxy\n  echo \"###  client start ###\"\n  python3  serving_client_with_check.py > client.log 2>&1\n  if [ $? -ne 0 ]\n  then\n    clean_pid\n    cat client.log\n    echo \"client failed to start.\" && exit 1\n  fi\n  echo \"### client end ###\"\n}\n\ntest_add_model()\n{\n  start_service\n  pytest_serving\n  cat client.log\n  clean_pid\n}\n\necho \"-----serving start-----\"\nprepare_model\ntest_add_model\n"
  },
  {
    "path": "tests/st/add/test_serving.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_serving_add():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/add.sh\")\n    assert np.allclose(ret, 0)\n\n\nif __name__ == '__main__':\n    test_serving_add()\n"
  },
  {
    "path": "tests/st/add_sub_pipeline/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n"
  },
  {
    "path": "tests/st/add_sub_pipeline/add_sub.sh",
    "content": "#!/bin/bash\n\nexport GLOG_v=1\ncd \"$(dirname $0)\" || exit;\nCURRPATH=$(pwd)\nCURRUSER=$(whoami)\nPROJECT_PATH=${CURRPATH}/../../../\necho \"CURRPATH:\"  ${CURRPATH}\necho \"CURRUSER:\"  ${CURRUSER}\necho \"PROJECT_PATH:\"  ${PROJECT_PATH}\n\necho \"LD_LIBRARY_PATH: \" ${LD_LIBRARY_PATH}\necho \"PYTHONPATH: \" ${PYTHONPATH}\n\nrm -rf serving *.log *.mindir *.dat kernel_meta\nrm -rf unix_socket_files serving_logs\nrm -rf add serving_client.py export_model serving_server.py add_sub\ncp -r ../../../example/add_sub_pipeline/* .\n\nclean_pid()\n{\n  ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  if [ $? -ne 0 ]\n  then\n    echo \"clean pip failed\"\n  fi\n  sleep 6\n}\n\nprepare_model()\n{\n  echo \"### begin to generate mode for serving test ###\"\n  cd export_model\n  python3 add_sub_model.py &> add_sub_model.log\n  echo \"### end to generate mode for serving test ###\"\n  result=`find . -name  tensor_add.mindir | wc -l`\n  if [ ${result} -ne 1 ]\n  then\n    cat add_sub_model.log\n    echo \"### generate model for serving test failed ###\" && exit 1\n    clean_pid\n    cd -\n  fi\n  cd -\n}\n\nstart_service()\n{\n  echo \"### start serving service ###\"\n  unset http_proxy https_proxy\n  python3 serving_server.py > serving_server.log 2>&1 &\n  if [ $? -ne 0 ]\n  then\n    echo \"server failed to start.\"\n  fi\n\n  result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  count=0\n  while [[ ${result} -eq 0 && ${count} -lt 150 ]]\n  do\n    sleep 1\n    count=$(($count+1))\n    result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  done\n\n  if [ ${count} -eq 150 ]\n  then\n    clean_pid\n    cat serving_server.log\n    echo \"worker log begin----------------------------------\"\n    cat serving_logs/*.log\n    echo \"worker log end----------------------------------\"\n    echo \"start serving service failed!\" && exit 1\n  fi\n  echo \"### start serving service end ###\"\n}\n\npytest_serving()\n{\n  unset http_proxy https_proxy\n  echo \"###  client start ###\"\n  python3  serving_client.py > client.log 2>&1\n  if [ $? -ne 0 ]\n  then\n    clean_pid\n    cat client.log\n    echo \"client failed to start.\" && exit 1\n  fi\n  echo \"### client end ###\"\n}\n\ntest_add_sub_pipeline()\n{\n  start_service\n  pytest_serving\n  cat client.log\n  clean_pid\n}\n\necho \"-----serving start-----\"\nprepare_model\ntest_add_sub_pipeline\n"
  },
  {
    "path": "tests/st/add_sub_pipeline/test_serving.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_serving_add():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/add_sub.sh\")\n    assert np.allclose(ret, 0)\n\n\nif __name__ == '__main__':\n    test_serving_add()\n"
  },
  {
    "path": "tests/st/distributed_server_fault/__init__.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n"
  },
  {
    "path": "tests/st/distributed_server_fault/common.sh",
    "content": "#!/bin/bash\n\nexport GLOG_v=1\n\ncd \"$(dirname $0)\" || exit;\nCURRPATH=$(pwd)\nCURRUSER=$(whoami)\nPROJECT_PATH=${CURRPATH}/../../../\necho \"CURRPATH:\"  ${CURRPATH}\necho \"CURRUSER:\"  ${CURRUSER}\necho \"PROJECT_PATH:\"  ${PROJECT_PATH}\n\necho \"LD_LIBRARY_PATH: \" ${LD_LIBRARY_PATH}\necho \"PYTHONPATH: \" ${PYTHONPATH}\n\nget_serving_server_count()\n{\n  num=`ps -ef | grep serving_server.py | grep -v grep | wc -l`\n  return ${num}\n}\n\nget_serving_agent_count()\n{\n  num=`ps -ef | grep serving_agent.py | grep -v grep | wc -l`\n  return ${num}\n}\n\nclean_pid()\n{\n  get_serving_server_count\n  if [ $? -ne 0 ]\n  then\n    ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -15\n  fi\n\n  count=0\n  get_serving_server_count\n  while [[ $? -ne 0 && ${count} -lt 5 ]]\n  do\n    sleep 1\n    get_serving_server_count\n  done\n\n  get_serving_server_count\n  if [ $? -ne 0 ]\n  then\n    ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  fi\n  get_serving_agent_count\n  if [ $? -ne 0 ]\n  then\n    ps aux | grep 'serving_agent.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  fi\n}\n\nprepare_model()\n{\n  model_path=${CURRPATH}/../model\n  if [ -d $model_path ]\n  then\n    echo \"copy model path\"\n    cp -r ../model .\n  else\n    echo \"### begin to generate mode for serving test ###\"\n    cd export_model || exit\n    sh export_model.sh &> model.log\n    echo \"### end to generate mode for serving test ###\"\n    result=`find ../ -name  model | wc -l`\n    if [ ${result} -ne 1 ]\n    then\n      cat model.log\n      clean_pid\n      echo \"### generate model for serving test failed ###\" && exit 1\n    fi\n    cd - || exit\n    cp -r model ../\n  fi\n}\n\nstart_serving_server()\n{\n  echo \"### start serving server ###\"\n  unset http_proxy https_proxy\n  python3 serving_server.py > serving_server.log 2>&1 &\n  if [ $? -ne 0 ]\n  then\n    echo \"serving server failed to start.\"\n  fi\n\n  result=`grep -E 'Master server start success, listening on' serving_server.log | wc -l`\n  count=0\n  while [[ ${result} -eq 0 && ${count} -lt 150 ]]\n  do\n    sleep 1\n    get_serving_server_count\n    if [ $? -eq 0 ]\n    then\n      clean_pid\n\n      echo \"serving server log begin-------------------\"\n      cat serving_server.log\n      echo \"serving server log end-------------------\"\n\n      echo \"serving worker log begin-------------------\"\n      cat serving_logs/*.log\n      echo \"serving worker log end-------------------\"\n\n      echo \"start serving server failed!\" && exit 1\n    fi\n    count=$(($count+1))\n    result=`grep -E 'Master server start success, listening on' serving_server.log | wc -l`\n  done\n\n  if [ ${count} -eq 150 ]\n  then\n    clean_pid\n\n    echo \"serving server log begin-------------------\"\n    cat serving_server.log\n    echo \"serving server log end-------------------\"\n\n    echo \"serving worker log begin-------------------\"\n    cat serving_logs/*.log\n    echo \"serving worker log end-------------------\"\n\n    echo \"start serving server failed!\" && exit 1\n  fi\n  echo \"### start serving server end ###\"\n}\n\nstart_serving_agent()\n{\n  echo \"### start serving agent ###\"\n  unset http_proxy https_proxy\n  python3 serving_agent.py > serving_agent.log 2>&1 &\n  if [ $? -ne 0 ]\n  then\n    echo \"server agent failed to start.\"\n  fi\n\n  result=`grep -E 'Child 0: Receive success' serving_agent.log | wc -l`\n  count=0\n  while [[ ${result} -ne 1 && ${count} -lt 150 ]]\n  do\n    sleep 1\n    get_serving_agent_count\n    if [ $? -eq 0 ]\n    then\n      clean_pid\n      cat serving_agent.log\n      echo \"start serving agent failed!\" && exit 1\n    fi\n    count=$(($count+1))\n    result=`grep -E 'Child 0: Receive success' serving_agent.log | wc -l`\n  done\n\n  if [ ${count} -eq 150 ]\n  then\n    clean_pid\n    cat serving_agent.log\n    echo \"start serving agent failed!\" && exit 1\n  fi\n  echo \"### start serving agent end ###\"\n}\n\nwait_server_exit()\n{\n  get_serving_server_count\n  count=0\n  while [[ $? -ne 0 && ${count} -lt 15 ]]\n  do\n    sleep 1\n    count=$(($count+1))\n    get_serving_server_count\n  done\n\n  if [ ${count} -eq 15 ]\n  then\n    echo \"serving server exit failed\"\n    ps -ef | grep serving_server.py | grep -v grep\n    echo \"------------------------------ serving server failed log begin: \"\n    cat serving_server.log\n    echo \"------------------------------ serving server failed log end\"\n    clean_pid && exit 1\n  fi\n}\n\nwait_agent_exit()\n{\n  get_serving_agent_count\n  count=0\n  while [[ $? -ne 0 && ${count} -lt 15 ]]\n  do\n    sleep 1\n    count=$(($count+1))\n    get_serving_agent_count\n  done\n\n  if [ ${count} -eq 15 ]\n  then\n    echo \"serving agent exit failed\"\n    ps -ef | grep serving_agent.py | grep -v grep\n    echo \"------------------------------ serving agent failed log begin: \"\n    cat serving_agent.log\n    echo \"------------------------------ serving agent failed log end\"\n    clean_pid && exit 1\n  fi\n}\n\ninit()\n{\n  rm -rf serving *.log *.mindir *.dat matmul kernel_meta\n  rm -rf unix_socket_files serving_logs\n  rm -rf *.json export_model  serving_server.py serving_agent.py serving_client.py\n  cp -r ../../../example/matmul_distributed/* .\n  prepare_model\n}\n\n"
  },
  {
    "path": "tests/st/distributed_server_fault/kill_15_agent.sh",
    "content": "#!/bin/bash\n\nCURRPATH=$(cd \"$(dirname $0)\" || exit; pwd)\nsource ${CURRPATH}/common.sh\n\nkill_serving_agent()\n{\n  get_serving_server_count\n  if [ $? -ne 1 ]\n  then\n    echo \"serving server start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  get_serving_agent_count\n  if [ $? -ne 9 ]\n  then\n    echo \"serving agent start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  ps aux | grep 'serving_agent.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -15\n  if [ $? -ne 0 ]\n  then\n    echo \"kill agent failed\"\n  fi\n\n  wait_agent_exit\n  wait_server_exit\n}\n\ntest_kill_serving_agent()\n{\n  start_serving_server\n  start_serving_agent\n  kill_serving_agent\n  clean_pid\n}\n\necho \"-----serving start-----\"\ninit\ntest_kill_serving_agent\necho \"### end to serving test ###\"\n"
  },
  {
    "path": "tests/st/distributed_server_fault/kill_15_server.sh",
    "content": "#!/bin/bash\n\nCURRPATH=$(cd \"$(dirname $0)\" || exit; pwd)\nsource ${CURRPATH}/common.sh\n\nkill_serving_server()\n{\n  get_serving_server_count\n  if [ $? -ne 1 ]\n  then\n    echo \"master_with_worker start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  get_serving_agent_count\n  if [ $? -ne 9 ]\n  then\n    echo \"agent start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -15\n  if [ $? -ne 0 ]\n  then\n    echo \"kill master_with_worker failed\"\n  fi\n\n  wait_agent_exit\n  wait_server_exit\n}\n\ntest_kill_serving_server()\n{\n  start_serving_server\n  start_serving_agent\n  kill_serving_server\n  clean_pid\n}\n\necho \"-----serving start-----\"\ninit\ntest_kill_serving_server\necho \"### end to serving test ###\"\n"
  },
  {
    "path": "tests/st/distributed_server_fault/kill_9_agent.sh",
    "content": "#!/bin/bash\n\nCURRPATH=$(cd \"$(dirname $0)\" || exit; pwd)\nsource ${CURRPATH}/common.sh\n\nkill_serving_agent()\n{\n  get_serving_server_count\n  if [ $? -ne 1 ]\n  then\n    echo \"serving server start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  get_serving_agent_count\n  if [ $? -ne 9 ]\n  then\n    echo \"serving agent start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  num=`grep -E 'Recv Pong Time Out from' serving_logs/log_matmul*.log | wc -l`\n  if [ $num -ne 0 ]\n  then\n    echo \"serving agent has exited\"\n    echo $num\n    clean_pid && exit 1\n  fi\n  ps aux | grep 'serving_agent.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  if [ $? -ne 0 ]\n  then\n    echo \"kill serving agent failed\"\n  fi\n  sleep 25\n  get_serving_agent_count\n  if [ $? -ne 0 ]\n  then\n    echo \"agent exit failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  get_serving_server_count\n  if [ $? -ne 1 ]\n  then\n    echo \"serving server start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  num=`grep -E 'Recv Pong Time Out from' serving_logs/log_matmul*.log | wc -l`\n  if [ $num -ne 8 ]\n  then\n    echo \"catch agent exit failed\"\n    echo $num\n    clean_pid && exit 1\n  fi\n}\n\ntest_kill_serving_agent()\n{\n  start_serving_server\n  start_serving_agent\n  kill_serving_agent\n  clean_pid\n}\n\necho \"-----serving start-----\"\ninit\ntest_kill_serving_agent\necho \"### end to serving test ###\"\n"
  },
  {
    "path": "tests/st/distributed_server_fault/kill_9_server.sh",
    "content": "#!/bin/bash\n\nCURRPATH=$(cd \"$(dirname $0)\" || exit; pwd)\nsource ${CURRPATH}/common.sh\n\nkill_serving_server()\n{\n  get_serving_server_count\n  if [ $? -ne 1 ]\n  then\n    echo \"serving server start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  num=`ps -ef | grep start_distributed_worker.py | grep -v grep | wc -l`\n  if [ ${num} -ne 1 ]\n  then\n    echo \"serving worker start failed\"\n    echo ${num}\n    clean_pid && exit 1\n  fi\n  get_serving_agent_count\n  if [ $? -ne 9 ]\n  then\n    echo \"serving agent start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  num=`grep -E 'Recv Ping Time Out from' serving_server.log | wc -l`\n  if [ $num -ne 0 ]\n  then\n    echo \"serving agent has exited\"\n    echo $num\n    clean_pid && exit 1\n  fi\n  ps aux | grep 'start_distributed_worker.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  if [ $? -ne 0 ]\n  then\n    echo \"kill serving worker failed\"\n  fi\n  sleep 25\n  num=`grep -E 'Recv Ping Time Out from' serving_agent.log | wc -l`\n  if [ $num -ne 8 ]\n  then\n    echo \"catch serving server exit failed\"\n    echo $num\n    clean_pid && exit 1\n  fi\n}\n\ntest_kill_serving_server()\n{\n  start_serving_server\n  start_serving_agent\n  kill_serving_server\n  clean_pid\n}\n\necho \"-----serving start-----\"\ninit\ntest_kill_serving_server\necho \"### end to serving test ###\"\n"
  },
  {
    "path": "tests/st/distributed_server_fault/test_distributed_fault.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_distribute_fault_kill_15_agent():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/kill_15_agent.sh\")\n    assert np.allclose(ret, 0)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_distribute_fault_kill_9_agent():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/kill_9_agent.sh\")\n    assert np.allclose(ret, 0)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_distribute_fault_kill_15_server():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/kill_15_server.sh\")\n    assert np.allclose(ret, 0)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_distribute_fault_kill_9_server():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/kill_9_server.sh\")\n    assert np.allclose(ret, 0)\n\n\nif __name__ == '__main__':\n    test_distribute_fault_kill_9_server()\n    test_distribute_fault_kill_15_server()\n    test_distribute_fault_kill_9_agent()\n    test_distribute_fault_kill_15_agent()\n"
  },
  {
    "path": "tests/st/matmul_distributed/__init__.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n"
  },
  {
    "path": "tests/st/matmul_distributed/matmul_distribute.sh",
    "content": "#!/bin/bash\n\nexport GLOG_v=1\n\ncd \"$(dirname $0)\" || exit\nCURRPATH=$(pwd)\nCURRUSER=$(whoami)\nPROJECT_PATH=${CURRPATH}/../../../\necho \"CURRPATH:\"  ${CURRPATH}\necho \"CURRUSER:\"  ${CURRUSER}\necho \"PROJECT_PATH:\"  ${PROJECT_PATH}\n\necho \"LD_LIBRARY_PATH: \" ${LD_LIBRARY_PATH}\necho \"PYTHONPATH: \" ${PYTHONPATH}\n\nclean_server_pid()\n{\n  num=`ps -ef | grep serving_server.py | grep -v grep | wc -l`\n  if [ ${num} -ne 0 ]\n  then\n    ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -15\n    if [ $? -ne 0 ]\n    then\n      echo \"clean master pid failed\"\n    fi\n  fi\n\n  num=`ps -ef | grep start_distributed_worker.py | grep -v grep | wc -l`\n  count=0\n  while [[ ${num} -ne 0 && ${count} -lt 10 ]]\n  do\n    sleep 1\n    count=$(($count+1))\n    num=`ps -ef | grep start_distributed_worker.py | grep -v grep | wc -l`\n  done\n\n  if [ ${count} -eq 10 ]\n  then\n    echo \"worker exit failed\"\n    echo $num\n    ps -ef | grep start_distributed_worker.py | grep -v grep\n\n    echo \"------------------------------ worker failed master log begin: \"\n    cat serving_server.log\n    echo \"------------------------------ worker failed master log end\"\n\n    echo \"------------------------------ worker failed log begin: \"\n    cat serving_logs/*.log\n    echo \"------------------------------ worker failed log end\"\n    clean_pid && exit 1\n  fi\n\n  num=`ps -ef | grep serving_agent.py | grep -v grep | wc -l`\n  count=0\n  while [[ ${num} -ne 0 && ${count} -lt 10 ]]\n  do\n    sleep 1\n    count=$(($count+1))\n    num=`ps -ef | grep serving_agent.py | grep -v grep | wc -l`\n  done\n\n  if [ ${count} -eq 10 ]\n  then\n    echo \"agent exit failed\"\n    echo $num\n    ps -ef | grep serving_agent.py | grep -v grep\n    echo \"------------------------------ agent failed log begin: \"\n    cat serving_agent.log\n    echo \"------------------------------ agent failed log end\"\n    clean_pid && exit 1\n  fi\n}\n\nclean_pid()\n{\n  ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep\n  if [ $? -eq 0 ]\n  then\n    ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n    echo \"### master pid exist, clean master pip failed ###\"\n  fi\n  ps aux | grep 'start_distributed_worker.py' | grep ${CURRUSER} | grep -v grep\n  if [ $? -eq 0 ]\n  then\n    ps aux | grep 'start_distributed_worker.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n    echo \"### master pid is killed but worker pid exist ###\"\n  fi\n  ps aux | grep 'serving_agent.py' | grep ${CURRUSER} | grep -v grep\n  if [ $? -eq 0 ]\n  then\n    ps aux | grep 'serving_agent.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n    echo \"### worker pid is killed but agent pid exist ###\"\n  fi\n}\n\nprepare_model()\n{\n  model_path=${CURRPATH}/../model\n  if [ -d $model_path ]\n  then\n    echo \"copy model path\"\n    cp -r ../model .\n  else\n    echo \"### begin to generate mode for serving test ###\"\n    cd export_model || exit\n    sh export_model.sh &> model.log\n    echo \"### end to generate mode for serving test ###\"\n    result=`find ../ -name  model | wc -l`\n    if [ ${result} -ne 1 ]\n    then\n      echo \"### begin model generation log ###\"\n      cat model.log\n      echo \"### end model generation log ###\"\n      clean_pid\n      echo \"### generate model for serving test failed ###\" && exit 1\n    fi\n    cd - || exit\n    cp -r model ../\n  fi\n}\n\nstart_serving_server()\n{\n  echo \"### start serving server ###\"\n  unset http_proxy https_proxy\n  python3 serving_server.py > serving_server.log 2>&1 &\n  if [ $? -ne 0 ]\n  then\n    echo \"serving server failed to start.\"\n  fi\n\n  result=`grep -E 'Master server start success, listening on' serving_server.log | wc -l`\n  count=0\n  while [[ ${result} -eq 0 && ${count} -lt 150 ]]\n  do\n    sleep 1\n    num=`ps -ef | grep serving_server.py | grep -v grep | wc -l`\n    if [ ${num} -eq 0 ]\n    then\n      echo \"serving server log begin-------------------\"\n      cat serving_server.log\n      echo \"serving server log end-------------------\"\n\n      echo \"serving worker log begin-------------------\"\n      cat serving_logs/*.log\n      echo \"serving worker log end-------------------\"\n      clean_pid\n      echo \"start serving server failed!\" && exit 1\n    fi\n    count=$(($count+1))\n    result=`grep -E 'Master server start success, listening on' serving_server.log | wc -l`\n  done\n\n  if [ ${count} -eq 150 ]\n  then\n    echo \"serving server log begin-------------------\"\n    cat serving_server.log\n    echo \"serving server log end-------------------\"\n\n    echo \"serving worker log begin-------------------\"\n    cat serving_logs/*.log\n    echo \"serving worker log end-------------------\"\n    clean_pid\n    echo \"start serving server failed!\" && exit 1\n  fi\n  echo \"### start serving server end ###\"\n}\n\nstart_serving_agent()\n{\n  echo \"### start serving agent ###\"\n  unset http_proxy https_proxy\n  python3 serving_agent.py > serving_agent.log 2>&1 &\n  if [ $? -ne 0 ]\n  then\n    echo \"server agent failed to start.\"\n  fi\n\n  result=`grep -E 'Child 0: Receive success' serving_agent.log | wc -l`\n  count=0\n  while [[ ${result} -ne 1 && ${count} -lt 150 ]]\n  do\n    sleep 1\n    num=`ps -ef | grep serving_agent.py | grep -v grep | wc -l`\n    if [ ${num} -eq 0 ]\n    then\n      clean_pid\n      cat serving_agent.log\n      echo \"start serving agent failed!\" && exit 1\n    fi\n    count=$(($count+1))\n    result=`grep -E 'Child 0: Receive success' serving_agent.log | wc -l`\n  done\n\n  if [ ${count} -eq 150 ]\n  then\n    clean_pid\n    cat serving_agent.log\n    echo \"start serving agent failed!\" && exit 1\n  fi\n  echo \"### start serving agent end ###\"\n}\n\npytest_serving()\n{\n  unset http_proxy https_proxy\n  echo \"###  client start ###\"\n  python3  serving_client.py > serving_client.log 2>&1\n  if [ $? -ne 0 ]\n  then\n    cat serving_client.log\n    clean_server_pid\n    clean_pid\n    echo \"client failed to start.\" && exit 1\n  fi\n  echo \"### client end ###\"\n}\n\ntest_matmul_distribute()\n{\n  start_serving_server\n  start_serving_agent\n  pytest_serving\n  cat serving_client.log\n  clean_server_pid\n  clean_pid\n}\n\necho \"-----serving start-----\"\nrm -rf serving *.log *.dat matmul model kernel_meta somas_meta\nrm -rf unix_socket_files serving_logs\nrm -rf serving_client.py  export_model temp_rank_table serving_server.py serving_agent.py rank_table_8pcs.json\ncp -r ../../../example/matmul_distributed/* .\nprepare_model\ntest_matmul_distribute\n"
  },
  {
    "path": "tests/st/matmul_distributed/test_matmul_distribute.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_serving_matmul_distributed():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/matmul_distribute.sh\")\n    assert np.allclose(ret, 0)\n\n\nif __name__ == '__main__':\n    test_serving_matmul_distributed()\n"
  },
  {
    "path": "tests/st/matmul_multi_subgraphs/__init__.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n"
  },
  {
    "path": "tests/st/matmul_multi_subgraphs/matmul_multi_subgraphs.sh",
    "content": "#!/bin/bash\n\nexport GLOG_v=1\ncd \"$(dirname $0)\" || exit;\nCURRPATH=$(pwd)\nCURRUSER=$(whoami)\nPROJECT_PATH=${CURRPATH}/../../../\necho \"CURRPATH:\"  ${CURRPATH}\necho \"CURRUSER:\"  ${CURRUSER}\necho \"PROJECT_PATH:\"  ${PROJECT_PATH}\n\necho \"LD_LIBRARY_PATH: \" ${LD_LIBRARY_PATH}\necho \"PYTHONPATH: \" ${PYTHONPATH}\n\nrm -rf serving *.log *.mindir *.dat kernel_meta\nrm -rf unix_socket_files serving_logs\nrm -rf add serving_client.py export_model serving_server.py\ncp -r ../../../example/matmul_multi_subgraphs/* .\n\nclean_pid()\n{\n  ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  if [ $? -ne 0 ]\n  then\n    echo \"clean pip failed\"\n  fi\n  sleep 6\n}\n\nprepare_model()\n{\n  echo \"### begin to generate mode for serving test ###\"\n  cd export_model\n  python3 export_matmul.py &> export_matmul.log\n  echo \"### end to generate mode for serving test ###\"\n  result=`find . -name  matmul_0.mindir | wc -l`\n  if [ ${result} -ne 1 ]\n  then\n    cat export_matmul.log\n    echo \"### generate model for serving test failed ###\" && exit 1\n    clean_pid\n    cd -\n  fi\n  cd -\n}\n\nstart_service()\n{\n  echo \"### start serving service ###\"\n  unset http_proxy https_proxy\n  python3 serving_server.py > serving_server.log 2>&1 &\n  if [ $? -ne 0 ]\n  then\n    echo \"server failed to start.\"\n  fi\n\n  result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  count=0\n  while [[ ${result} -eq 0 && ${count} -lt 150 ]]\n  do\n    sleep 1\n    count=$(($count+1))\n    result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  done\n\n  if [ ${count} -eq 150 ]\n  then\n    clean_pid\n    cat serving_server.log\n    echo \"worker log begin----------------------------------\"\n    cat serving_logs/*.log\n    echo \"worker log end----------------------------------\"\n    echo \"start serving service failed!\" && exit 1\n  fi\n  echo \"### start serving service end ###\"\n}\n\npytest_serving()\n{\n  unset http_proxy https_proxy\n  echo \"###  client start ###\"\n  python3  serving_client.py > client.log 2>&1\n  if [ $? -ne 0 ]\n  then\n    clean_pid\n    cat client.log\n    echo \"client failed to start.\" && exit 1\n  fi\n  echo \"### client end ###\"\n}\n\ntest_matmul_model()\n{\n  start_service\n  pytest_serving\n  cat client.log\n  clean_pid\n}\n\necho \"-----serving start-----\"\nprepare_model\ntest_matmul_model\n"
  },
  {
    "path": "tests/st/matmul_multi_subgraphs/test_matmul_multi_subgraphs.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_serving_pipeline_distributed():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/matmul_multi_subgraphs.sh\")\n    assert np.allclose(ret, 0)\n\n\nif __name__ == '__main__':\n    test_serving_pipeline_distributed()\n"
  },
  {
    "path": "tests/st/resnet/__init__.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n"
  },
  {
    "path": "tests/st/resnet/resnet.sh",
    "content": "#!/bin/bash\n\nexport GLOG_v=1\ncd \"$(dirname $0)\" || exit\nCURRPATH=$(pwd)\nCURRUSER=$(whoami)\nPROJECT_PATH=${CURRPATH}/../../../\necho \"CURRPATH:\"  ${CURRPATH}\necho \"CURRUSER:\"  ${CURRUSER}\necho \"PROJECT_PATH:\"  ${PROJECT_PATH}\n\necho \"LD_LIBRARY_PATH: \" ${LD_LIBRARY_PATH}\necho \"PYTHONPATH: \" ${PYTHONPATH}\n\nclean_pid()\n{\n  ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  if [ $? -ne 0 ]\n  then\n    echo \"clean pip failed\"\n  fi\n  sleep 6\n}\n\nprepare_model()\n{\n  echo \"### begin to generate mode for serving test ###\"\n  cd export_model\n  python3 export_resnet.py False &> export_resnet.log\n  echo \"### end to generate mode for serving test ###\"\n  result=`find . -name resnet50_1b_cifar10.mindir | wc -l`\n  if [ ${result} -ne 1 ]\n  then\n    cat export_resnet.log\n    echo \"### generate model for serving test failed ###\" && exit 1\n    clean_pid\n    cd -\n  fi\n  cd -\n}\n\nstart_service()\n{\n  echo \"### start serving service ###\"\n  unset http_proxy https_proxy\n  python3 serving_server.py > serving_server.log 2>&1 &\n  if [ $? -ne 0 ]\n  then\n    echo \"server failed to start.\"\n  fi\n\n  result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  count=0\n  while [[ ${result} -eq 0 && ${count} -lt 150 ]]\n  do\n    sleep 1\n    count=$(($count+1))\n    result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  done\n\n  if [ ${count} -eq 150 ]\n  then\n    clean_pid\n    cat serving_server.log\n    echo \"start serving service failed!\" && exit 1\n  fi\n  echo \"### start serving service end ###\"\n}\n\npytest_serving()\n{\n  unset http_proxy https_proxy\n  echo \"###  client start ###\"\n  python3  serving_client.py > serving_client.log 2>&1\n  if [ $? -ne 0 ]\n  then\n    clean_pid\n    cat serving_client.log\n    echo \"client failed to start.\" && exit 1\n  fi\n  echo \"### client end ###\"\n}\n\ntest_renet_model()\n{\n  start_service\n  pytest_serving\n  cat serving_client.log\n  clean_pid\n}\n\necho \"-----serving start-----\"\nrm -rf serving *.log *.mindir *.dat kernel_meta\nrm -rf unix_socket_files serving_logs\nrm -rf serving_client.py  export_model  serving_server.py  resnet50  test_image\ncp -r ../../../example/resnet/* .\nprepare_model\ntest_renet_model\necho \"### end to serving test ###\"\n"
  },
  {
    "path": "tests/st/resnet/test_resnet.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_resnet():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/resnet.sh\")\n    assert np.allclose(ret, 0)\n\n\nif __name__ == '__main__':\n    test_resnet()\n"
  },
  {
    "path": "tests/st/serving_fault/__init__.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n"
  },
  {
    "path": "tests/st/serving_fault/common.sh",
    "content": "#!/bin/bash\n\nexport GLOG_v=1\n\ncd \"$(dirname $0)\" || exit\nCURRPATH=$(pwd)\nCURRUSER=$(whoami)\nPROJECT_PATH=${CURRPATH}/../../../\necho \"CURRPATH:\"  ${CURRPATH}\necho \"CURRUSER:\"  ${CURRUSER}\necho \"PROJECT_PATH:\"  ${PROJECT_PATH}\n\necho \"LD_LIBRARY_PATH: \" ${LD_LIBRARY_PATH}\necho \"PYTHONPATH: \" ${PYTHONPATH}\n\nclean_pid()\n{\n  get_master_count\n  if [ $? -ne 0 ]\n  then\n    ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -15\n  fi\n\n  count=0\n  get_master_count\n  while [[ $? -ne 0 && ${count} -lt 5 ]]\n  do\n    sleep 1\n    get_master_count\n  done\n\n  get_master_count\n  if [ $? -ne 0 ]\n  then\n    ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  fi\n  get_worker_count\n  if [ $? -ne 0 ]\n  then\n    ps aux | grep 'start_worker.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  fi\n}\n\nprepare_model()\n{\n  echo \"### begin to generate mode for serving test ###\"\n  cd export_model\n  python3 add_model.py &> add_model.log\n  echo \"### end to generate mode for serving test ###\"\n  result=`find . -name  tensor_add.mindir | wc -l`\n  if [ ${result} -ne 1 ]\n  then\n    cat add_model.log\n    echo \"### generate model for serving test failed ###\" && exit 1\n    clean_pid\n    cd -\n  fi\n  cd -\n}\n\nstart_serving_server()\n{\n  echo \"### start serving server ###\"\n  unset http_proxy https_proxy\n  python3 serving_server.py > serving_server.log 2>&1 &\n  if [ $? -ne 0 ]\n  then\n    echo \"server server failed to start.\"\n  fi\n\n  result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  count=0\n  while [[ ${result} -eq 0 && ${count} -lt 150 ]]\n  do\n    sleep 1\n    get_master_count\n    if [ $? -eq 0 ]\n    then\n      echo \"---------------------------------- server server log begin\"\n      cat serving_server.log\n      echo \"---------------------------------- server server log end\"\n\n      echo \"---------------------------------- server worker log begin\"\n      cat serving_logs/*.log\n      echo \"---------------------------------- server worker log end\"\n      echo \"start serving server failed!\" && exit 1\n    fi\n    count=$(($count+1))\n    result=`grep -E 'Serving gRPC server start success, listening on 127.0.0.1:5500' serving_server.log | wc -l`\n  done\n\n  if [ ${count} -eq 150 ]\n  then\n    clean_pid\n    echo \"---------------------------------- server server log begin\"\n    cat serving_server.log\n    echo \"---------------------------------- server server log end\"\n\n    echo \"---------------------------------- server worker log begin\"\n    cat serving_logs/*.log\n    echo \"---------------------------------- server worker log end\"\n    echo \"start serving server failed!\" && exit 1\n  fi\n  echo \"### start serving server end ###\"\n}\n\nget_master_count()\n{\n  num=`ps -ef | grep serving_server.py | grep -v grep | wc -l`\n  return ${num}\n}\n\nget_worker_count()\n{\n  num=`ps -ef | grep start_worker.py | grep -v grep | wc -l`\n  return ${num}\n}\n\nwait_master_exit()\n{\n    get_master_count\n    count=0\n    while [[ $? -ne 0 && ${count} -lt 15 ]]\n    do\n      sleep 1\n      count=$(($count+1))\n      get_master_count\n    done\n\n    if [ ${count} -eq 15 ]\n    then\n      echo \"serving master exit failed\"\n      ps -ef | grep serving_server.py | grep -v grep\n      echo \"---------------------------------- server server log begin\"\n      cat serving_server.log\n      echo \"---------------------------------- server server log end\"\n\n      echo \"---------------------------------- server worker log begin\"\n      cat serving_logs/*.log\n      echo \"---------------------------------- server worker log end\"\n      clean_pid && exit 1\n    fi\n}\n\nwait_worker_exit()\n{\n    get_worker_count\n    count=0\n    while [[ $? -ne 0 && ${count} -lt 15 ]]\n    do\n      sleep 1\n      count=$(($count+1))\n      get_worker_count\n    done\n\n    if [ ${count} -eq 15 ]\n    then\n      echo \"serving worker exit failed\"\n      ps -ef | grep start_worker.py | grep -v grep\n      echo \"---------------------------------- server server log begin\"\n      cat serving_server.log\n      echo \"---------------------------------- server server log end\"\n\n      echo \"---------------------------------- server worker log begin\"\n      cat serving_logs/*.log\n      echo \"---------------------------------- server worker log end\"\n      clean_pid && exit 1\n    fi\n}\n\ninit()\n{\n  rm -rf serving *.log *.mindir *.dat kernel_meta\n  rm -rf unix_socket_files serving_logs\n  rm -rf add export_model  serving_server.py serving_client.py serving_client_with_check.py\n  cp -r ../../../example/tensor_add/* .\n  prepare_model\n  clean_pid\n}\n\n"
  },
  {
    "path": "tests/st/serving_fault/kill_15_master.sh",
    "content": "#!/bin/bash\n\nCURRPATH=$(cd \"$(dirname $0)\" || exit; pwd)\nsource ${CURRPATH}/common.sh\n\nkill_master()\n{\n  get_master_count\n  if [ $? -ne 1 ]\n  then\n    echo \"serving server start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  get_worker_count\n  if [ $? -eq 0 ]\n  then\n    echo \"worker start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -15\n  if [ $? -ne 0 ]\n  then\n    echo \"kill master failed\"\n  fi\n  wait_master_exit\n  wait_worker_exit\n}\n\ntest_master_fault_model()\n{\n  start_serving_server\n  kill_master\n  clean_pid\n}\n\necho \"-----serving start-----\"\ninit\ntest_master_fault_model\necho \"### end to serving test ###\"\n"
  },
  {
    "path": "tests/st/serving_fault/kill_15_worker.sh",
    "content": "#!/bin/bash\n\nCURRPATH=$(cd \"$(dirname $0)\" || exit; pwd)\nsource ${CURRPATH}/common.sh\n\nkill_worker()\n{\n  get_master_count\n  if [ $? -ne 1 ]\n  then\n    echo \"serving server start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  get_worker_count\n  if [ $? -eq 0 ]\n  then\n    echo \"worker start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n\n  ps aux | grep 'start_worker.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -15\n  if [ $? -ne 0 ]\n  then\n    echo \"kill master failed\"\n  fi\n  wait_master_exit\n  wait_worker_exit\n}\n\ntest_worker_fault_model()\n{\n  start_serving_server\n  kill_worker\n  clean_pid\n}\n\necho \"-----serving start-----\"\ninit\ntest_worker_fault_model\necho \"### end to serving test ###\"\n"
  },
  {
    "path": "tests/st/serving_fault/kill_9_master.sh",
    "content": "#!/bin/bash\n\nCURRPATH=$(cd \"$(dirname $0)\" || exit; pwd)\nsource ${CURRPATH}/common.sh\n\nkill_master()\n{\n  get_master_count\n  if [ $? -ne 1 ]\n  then\n    echo \"serving server start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  get_worker_count\n  if [ $? -eq 0 ]\n  then\n    echo \"worker start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  ps aux | grep 'serving_server.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  if [ $? -ne 0 ]\n  then\n    echo \"kill master failed\"\n  fi\n  wait_worker_exit\n}\n\ntest_master_fault_model()\n{\n  start_serving_server\n  kill_master\n  clean_pid\n}\n\necho \"-----serving start-----\"\ninit\ntest_master_fault_model\necho \"### end to serving test ###\"\n"
  },
  {
    "path": "tests/st/serving_fault/kill_9_worker.sh",
    "content": "#!/bin/bash\n\nCURRPATH=$(cd \"$(dirname $0)\" || exit; pwd)\nsource ${CURRPATH}/common.sh\n\nkill_worker()\n{\n  get_master_count\n  if [ $? -ne 1 ]\n  then\n    echo \"serving server start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n  get_worker_count\n  if [ $? -eq 0 ]\n  then\n    echo \"worker start failed\"\n    echo $?\n    clean_pid && exit 1\n  fi\n\n  ps aux | grep 'start_worker.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}' | xargs kill -9\n  if [ $? -ne 0 ]\n  then\n    echo \"kill worker failed\"\n  fi\n  wait_master_exit\n}\n\ntest_worker_fault_model()\n{\n  start_serving_server\n  kill_worker\n  clean_pid\n}\n\necho \"-----serving start-----\"\ninit\ntest_worker_fault_model\necho \"### end to serving test ###\"\n"
  },
  {
    "path": "tests/st/serving_fault/restart.sh",
    "content": "#!/bin/bash\n\nCURRPATH=$(cd \"$(dirname $0)\" || exit; pwd)\nsource ${CURRPATH}/common.sh\n\nunset http_proxy https_proxy\n\nrun_client()\n{\n  echo \"###  client start ###\"\n  python3  serving_client_with_check.py > client.log 2>&1\n  if [ $? -ne 0 ]\n  then\n    clean_pid\n    cat client.log\n    echo \"client failed to start.\" && exit 1\n  fi\n  cat client.log\n  echo \"### client end ###\"\n}\n\nlistening_worker_restart()\n{\n  start_count=$1\n  echo \"### serving server worker restart begin ###\"\n  result=`grep -E 'Register success: worker address' serving_server.log | wc -l`\n  count=0\n  while [[ ${result} -le $start_count && ${count} -lt 30 ]]\n  do\n    sleep 1\n    get_master_count\n    if [ $? -eq 0 ]\n    then\n      echo \"---------------------------------- server server log begin\"\n      cat serving_server.log\n      echo \"---------------------------------- server server log end\"\n\n      echo \"---------------------------------- server worker log begin\"\n      cat serving_logs/*.log\n      echo \"---------------------------------- server worker log end\"\n      echo \"serving server worker restart failed! start count $start_count\" && exit 1\n    fi\n    count=$(($count+1))\n    result=`grep -E 'Register success: worker address' serving_server.log | wc -l`\n  done\n\n  if [ ${count} -eq 30 ]\n  then\n    clean_pid\n    echo \"---------------------------------- server server log begin\"\n    cat serving_server.log\n    echo \"---------------------------------- server server log end\"\n\n    echo \"---------------------------------- server worker log begin\"\n    cat serving_logs/*.log\n    echo \"---------------------------------- server worker log end\"\n    echo \"serving server worker restart failed! start count $start_count\" && exit 1\n  fi\n  echo \"### serving server worker restart end ###\"\n}\n\ntest_restart()\n{\n  start_serving_server\n  # shellcheck disable=SC2207\n  worker_pids=($(ps aux | grep 'start_worker.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}'))\n  if [ ${#worker_pids[*]} -ne 2 ]; then\n    echo \"worker process number is not 2, real count \" ${#worker_pids[*]}\n    ps -ef | grep start_worker.py\n    clean_pid && exit 1\n  fi\n\n  echo \"before restart\"\n  ps -ef | grep 'start_worker.py'\n\n  # test kill -9 and restart\n  run_client\n\n  echo \"kill first worker \" ${worker_pids[0]}\n  kill -s 9 ${worker_pids[0]}\n  echo \"after first kill\"\n  ps -ef | grep 'start_worker.py'\n\n  run_client\n  listening_worker_restart 2  # current has 2 Register success log\n  run_client\n\n  echo \"kill second worker \" ${worker_pids[1]}\n  kill -s 9 ${worker_pids[1]}\n  echo \"after second kill\"\n  ps -ef | grep 'start_worker.py'\n\n  listening_worker_restart 3  # current has 3 Register success log\n  # test kill -15\n  run_client\n  # shellcheck disable=SC2207\n  worker_pids=($(ps aux | grep 'start_worker.py' | grep ${CURRUSER} | grep -v grep | awk '{print $2}'))\n  if [ ${#worker_pids[*]} -ne 2 ]; then\n    echo \"restarted worker process number is not 2, real count \" ${#worker_pids[*]}\n    ps -ef | grep start_worker.py\n    clean_pid && exit 1\n  fi\n\n  echo \"end restart\"\n  ps -ef | grep 'start_worker.py'\n\n  kill -s 15 ${worker_pids[0]}\n  kill -s 15 ${worker_pids[1]}\n  wait_master_exit\n  clean_pid\n}\n\necho \"-----serving start-----\"\ninit\ntest_restart\necho \"-----serving end-----\"\n"
  },
  {
    "path": "tests/st/serving_fault/test_serving_fault.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_serving_fault_kill_15_master():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/kill_15_master.sh\")\n    assert np.allclose(ret, 0)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_serving_fault_kill_9_master():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/kill_9_master.sh\")\n    assert np.allclose(ret, 0)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_serving_fault_kill_15_worker():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/kill_15_worker.sh\")\n    assert np.allclose(ret, 0)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef test_serving_fault_kill_9_worker():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/kill_9_worker.sh\")\n    assert np.allclose(ret, 0)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.env_single\ndef serving_fault_restart():\n    \"\"\"test_serving\"\"\"\n    sh_path = os.path.split(os.path.realpath(__file__))[0]\n    ret = os.system(f\"sh {sh_path}/restart.sh\")\n    assert np.allclose(ret, 0)\n\n\nif __name__ == '__main__':\n    test_serving_fault_kill_9_master()\n    test_serving_fault_kill_15_master()\n    test_serving_fault_kill_9_worker()\n    test_serving_fault_kill_15_worker()\n"
  },
  {
    "path": "tests/ut/CMakeLists.txt",
    "content": "\nadd_subdirectory(python)\nadd_subdirectory(cpp)\n"
  },
  {
    "path": "tests/ut/coverage/cov_config",
    "content": "[run]\nomit = */__init__.py,*/*_pb2.py,*/*_pb2_grpc.py,*/tests/*"
  },
  {
    "path": "tests/ut/coverage/run_coverage.sh",
    "content": "#!/bin/bash\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nset -e\nBASEPATH=$(\n  cd \"$(dirname \"$0\")\"\n  pwd\n)\n\nPROJECT_PATH=$(\n  cd ${BASEPATH}/../../..\n  pwd\n)\n\nBUILD_PKG=${PROJECT_PATH}/build/package\n\nexport PYTHONPATH=${BUILD_PKG}:${PROJECT_PATH}/tests/ut/python:$PYTHONPATH\nexport LD_LIBRARY_PATH=${BUILD_PKG}/tests/mindspore/lib:${LD_LIBRARY_PATH}\n\necho \"PYTHONPATH=$PYTHONPATH\"\necho \"LD_LIBRARY_PATH=$LD_LIBRARY_PATH\"\nexport GLOG_v=1\n\nunset http_proxy\nunset https_proxy\n\nrm -rf cov_output htmlcov .coverage\n\n# run python ut\npytest -v ${PROJECT_PATH}/tests/ut/python/tests/ --cov=${BUILD_PKG}/mindspore_serving --cov-config=${BASEPATH}/cov_config --cov-report=html --cov-branch\n# run cpp ut\nbash ../cpp/runtest.sh\n\nmkdir cov_output && cd cov_output\nlcov --capture --directory ${PROJECT_PATH}/build/mindspore_serving/ --output-file coverage.info;\nlcov --extract coverage.info '*/ccsrc/*' -o coverage.info;\ngenhtml coverage.info --output-directory ./ --sort --legend\n"
  },
  {
    "path": "tests/ut/cpp/CMakeLists.txt",
    "content": "# This branch assumes that gRPC and all its dependencies are already installed\n# on this system, so they can be located by find_package().\n\n# Find Protobuf installation\n# Looks for protobuf-config.cmake file installed by Protobuf's cmake installation.\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wl,-rpath,$ORIGIN:$ORIGIN/lib\")\n\n\n# serving_common for c++ server and python interface\nfile(GLOB_RECURSE UT_SERVING_CORE_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}\n        \"../../../mindspore_serving/ccsrc/common/*.cc\"\n        \"../../../mindspore_serving/ccsrc/master/*.cc\"\n        \"../../../mindspore_serving/ccsrc/worker/*.cc\")\n\nfile(GLOB_RECURSE UT_SERVING_RMV_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}\n        \"../../../mindspore_serving/ccsrc/worker/inference/inference.cc\")\nlist(REMOVE_ITEM UT_SERVING_CORE_SRC ${UT_SERVING_RMV_SRC})\n\nfile(GLOB_RECURSE UT_SERVING_STUB RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} \"../stub/*.cc\")\nset(UT_SERVING_COMMON ${UT_SERVING_CORE_SRC} ${UT_SERVING_STUB})\n\ninclude_directories(\"${CMAKE_BINARY_DIR}/mindspore_serving\" ${CMAKE_BINARY_DIR}) # for proto header file\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\ninclude_directories(../)\ninclude_directories(../stub)\ninclude_directories(../stub/include)\ninclude_directories(${CMAKE_SOURCE_DIR})\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party)\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../../mindspore_serving/ccsrc)\n\nlink_directories(${CMKAE_BINARY_DIR}/securec/src)\n# copy libevent lib\nfile(GLOB_RECURSE LIBEVENT_LIB_LIST ${libevent_LIBPATH}/libevent* libevent_LIBPATH}/libevent_pthreads*)\nfile(COPY ${LIBEVENT_LIB_LIST} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})\n# copy glog lib\nfile(GLOB_RECURSE GLOG_LIB_LIST ${glog_LIBPATH}/libmindspore_serving_glog*)\nfile(COPY ${GLOG_LIB_LIST} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})\n# copy grpc lib\nfile(GLOB_RECURSE GPRC_LIB_LIST ${grpc_LIBPATH}/lib*)\nfile(COPY ${GPRC_LIB_LIST} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})\n\n# for cpp/serving_ut\nset(CPP_UT_SERVING_CORE ${UT_SERVING_COMMON} ${UT_SERVING_ASCEND})\nadd_library(cpp_serving_common STATIC ${CPP_UT_SERVING_CORE})\n\ntarget_link_libraries(cpp_serving_common PRIVATE PROTO_SRC_LIB)\ntarget_link_libraries(cpp_serving_common PRIVATE mindspore_serving::ssl mindspore_serving::crypto)\ntarget_link_libraries(cpp_serving_common PRIVATE mindspore_serving::grpc++)\ntarget_link_libraries(cpp_serving_common PRIVATE mindspore_serving::protobuf pthread rt dl)\ntarget_link_libraries(cpp_serving_common PRIVATE mindspore_serving::event mindspore_serving::event_pthreads)\ntarget_link_libraries(cpp_serving_common PRIVATE mindspore_serving::event_openssl)\ntarget_link_libraries(cpp_serving_common PRIVATE pthread mindspore_serving::glog)\ntarget_link_libraries(cpp_serving_common PRIVATE mindspore_serving::eigen)\ntarget_link_libraries(cpp_serving_common PRIVATE ${SECUREC_LIBRARY})\n\n# for test\nlink_directories(${CMAKE_BINARY_DIR}/googletest/googlemock/gtest)\nfile(GLOB_RECURSE UT_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} \"common/*.cc\" \"tests/*.cc\")\n\nadd_executable(serving_ut ${UT_LIST})\ntarget_link_libraries(serving_ut PRIVATE mindspore_serving::gtest)\ntarget_link_libraries(serving_ut PRIVATE -Wl,--whole-archive cpp_serving_common -Wl,--no-whole-archive)\n\n# disable auto rpath\nset_target_properties(serving_ut PROPERTIES SKIP_BUILD_RPATH TRUE)\n# copy gtest lib\nfile(GLOB_RECURSE GTEST_LIB_LIST ${gtest_LIBPATH}/libgtest*)\nfile(COPY ${GTEST_LIB_LIST} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})\n"
  },
  {
    "path": "tests/ut/cpp/common/common_test.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"common/common_test.h\"\n\n#define private public\n#include \"mindspore_serving/ccsrc/common/servable.h\"\n#undef private\n#include \"mindspore_serving/ccsrc/worker/servable_register.h\"\n\n#ifdef __cplusplus\n#if __cplusplus\nextern \"C\" {\n#endif\n#endif\n\nnamespace UT {\n\nvoid Common::SetUpTestCase() {}\n\nvoid Common::TearDownTestCase() {}\n\nvoid Common::SetUp() {}\n\nvoid Common::TearDown() {\n  mindspore::serving::ServableRegister::Instance() = mindspore::serving::ServableRegister();\n}\n\n}  // namespace UT\n\n#ifdef __cplusplus\n#if __cplusplus\n}\n#endif\n#endif\n"
  },
  {
    "path": "tests/ut/cpp/common/common_test.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef TESTS_UT_COMMON_UT_COMMON_H_\n#define TESTS_UT_COMMON_UT_COMMON_H_\n\n#include <cmath>\n#include <fstream>\n#include <iostream>\n#include \"gtest/gtest.h\"\nnamespace UT {\nclass Common : public testing::Test {\n public:\n  // TestCase only enter once\n  static void SetUpTestCase();\n  static void TearDownTestCase();\n\n  // every TEST_F macro will enter one\n  virtual void SetUp();\n  virtual void TearDown();\n};\n}  // namespace UT\n#endif  // TESTS_UT_COMMON_UT_COMMON_H_\n"
  },
  {
    "path": "tests/ut/cpp/common/test_main.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"gtest/gtest.h\"\n\nGTEST_API_ int main(int argc, char **argv) {\n  testing::InitGoogleTest(&argc, argv);\n  int ret = RUN_ALL_TESTS();\n  return ret;\n}\n"
  },
  {
    "path": "tests/ut/cpp/common/test_servable_common.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_TEST_SERVABLE_COMMON_H\n#define MINDSPORE_SERVING_TEST_SERVABLE_COMMON_H\n\n#include <unistd.h>\n#include <fstream>\n#include <memory>\n#include <set>\n#include <string>\n#include <vector>\n#include <map>\n#include <utility>\n#include \"common/common_test.h\"\n#include \"master/server.h\"\n\n#define private public\n#include \"worker/worker.h\"\n#undef private\n#include \"worker/notfiy_master/base_notify.h\"\n#include \"worker/context.h\"\n#include \"worker/local_servable/local_model_loader.h\"\n#include \"master/grpc/grpc_process.h\"\n#include \"mindspore_serving/proto/ms_service.pb.h\"\n#include \"mindspore_serving/ccsrc/worker/servable_register.h\"\n\nnamespace mindspore {\nnamespace serving {\n\n#define ExpectContainMsg(error_msg, expected_msg)                                                     \\\n  {                                                                                                   \\\n    std::string error_msg_str = error_msg;                                                                   \\\n    EXPECT_TRUE(error_msg_str.find(expected_msg) != std::string::npos);                               \\\n    if (error_msg_str.find(expected_msg) == std::string::npos) {                                      \\\n      std::cout << \"error_msg: \" << error_msg_str << \", expected_msg: \" << expected_msg << std::endl; \\\n    }                                                                                                 \\\n  }\n\nclass FakeNotifyMaster : public BaseNotifyMaster {\n public:\n  Status Register(const WorkerRegSpec &worker_spec) override { return SUCCESS; }\n  Status Unregister() override { return SUCCESS; }\n};\n\nclass TestMasterWorker : public UT::Common {\n public:\n  TestMasterWorker() = default;\n  void Init(std::string servable_dir, std::string servable_name, int version_number, std::string model_file) {\n    servable_dir_ = servable_dir;\n    servable_name_ = servable_name;\n    version_number_ = version_number;\n    model_file_ = model_file;\n\n    servable_name_path_ = servable_dir_ + \"/\" + servable_name_;\n    version_number_path_ = servable_name_path_ + \"/\" + std::to_string(version_number_);\n    model_name_path_ = version_number_path_ + \"/\" + model_file_;\n\n    __mode_t access_mode = S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH;\n    mkdir(servable_dir_.c_str(), access_mode);\n    mkdir(servable_name_path_.c_str(), access_mode);\n    mkdir(version_number_path_.c_str(), access_mode);\n    std::ofstream fp(model_name_path_);\n    fp << \"model content\";\n    fp.close();\n    model_name_path_list_.emplace(model_name_path_);\n    version_number_path_list_.emplace(version_number_path_);\n    servable_name_path_list_.emplace(servable_name_path_);\n    servable_dir_list_.emplace(servable_dir_);\n  }\n\n  virtual void SetUp() {}\n  virtual void TearDown() {\n    for (auto &item : model_name_path_list_) {\n      remove(item.c_str());\n    }\n    for (auto &item : version_number_path_list_) {\n      rmdir(item.c_str());\n    }\n    for (auto &item : servable_name_path_list_) {\n      rmdir(item.c_str());\n    }\n    for (auto &item : servable_dir_list_) {\n      rmdir(item.c_str());\n    }\n    Worker::GetInstance().Clear();\n    Server::Instance().Clear();\n    UT::Common::TearDown();\n  }\n\n  void StartAddServable() {\n    auto status = StartServable(servable_dir_, servable_name_, 1);\n    ASSERT_TRUE(status.IsSuccess());\n  }\n\n  void RegisterAddServable(bool with_batch_dim = false) {\n    DeclareServable(servable_name_, model_file_, \"mindir\", with_batch_dim);\n\n    // register_method\n    RegisterMethod(servable_name_, model_file_, \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n  }\n\n  static Status StartServable(const std::string &servable_dir, const std::string &servable_name, int version_number) {\n    char path[PATH_MAX];\n    std::string current_path = getcwd(path, PATH_MAX);\n    auto notify_master = std::make_shared<FakeNotifyMaster>();\n    ServableContext::Instance()->SetDeviceId(0);\n    ServableContext::Instance()->SetDeviceTypeStr(\"Ascend\");\n\n    auto servable_dir_full = current_path + \"/\" + servable_dir;\n\n    const auto &signature = ServableRegister::Instance().GetServableSignature();\n    Status status;\n    std::map<std::string, std::shared_ptr<ModelLoaderBase>> models_loader;\n    for (auto &model_meta : signature.model_metas) {\n      auto &model_key = model_meta.common_meta.model_key;\n      auto local_models_loader = std::make_shared<LocalModelLoader>();\n      status =\n        local_models_loader->LoadModel(servable_dir_full, servable_name, version_number, model_meta, \"\", \"\");\n      if (status != SUCCESS) {\n        local_models_loader->Clear();\n        return status;\n      }\n      status = local_models_loader->AfterLoadModel();\n      if (status != SUCCESS) {\n        local_models_loader->Clear();\n        return status;\n      }\n      models_loader[model_key] = local_models_loader;\n    }\n    status = Worker::GetInstance().StartServableInner(servable_name, version_number, models_loader, true);\n    return status;\n  }\n  static void DeclareServable(const std::string &servable_name, const std::string &model_file,\n                              const std::string &model_type, bool with_batch_dim = false) {\n    ModelMeta servable_meta;\n    servable_meta.common_meta.servable_name = servable_name;\n    servable_meta.common_meta.model_key = model_file;\n    servable_meta.common_meta.with_batch_dim = with_batch_dim;\n    servable_meta.local_meta.model_files = {model_file};\n    servable_meta.local_meta.SetModelFormat(model_type);\n    // declare_servable\n    ServableRegister::Instance().DeclareModel(servable_meta);\n  }\n  static Status RegisterMethod(const std::string &servable_name, const std::string &method_file,\n                               const std::string &method_name,\n                               const std::vector<std::string> &input_names,\n                               const std::vector<std::string> &output_names, size_t servable_input_count,\n                               size_t servable_output_count) {\n    auto model_key = method_file;\n    auto status =\n      ServableRegister::Instance().RegisterInputOutputInfo(model_key, servable_input_count, servable_output_count);\n    if (status != SUCCESS) {\n      return status;\n    }\n\n    MethodSignature method_signature;\n    method_signature.servable_name = servable_name;\n    method_signature.method_name = method_name;\n    method_signature.inputs = input_names;\n    method_signature.outputs = output_names;\n    // method input 0 and input 1 as servable input\n    std::vector<std::pair<size_t, uint64_t>> model_input = {{0, 0}, {0, 1}};\n    method_signature.AddStageModel(model_key, model_input, 0, \"\");\n    // servable output as method output\n    std::vector<std::pair<size_t, uint64_t>> return_output = {{1, 0}};\n    method_signature.SetReturn(return_output);\n    ServableRegister::Instance().RegisterMethod(method_signature);\n    return SUCCESS;\n  }\n  std::string servable_dir_;\n  std::string servable_name_;\n  int version_number_ = 0;\n  std::string model_file_;\n  std::string model_name_path_;\n  std::string version_number_path_;\n  std::string servable_name_path_;\n  std::set<std::string> servable_dir_list_;\n  std::set<std::string> model_name_path_list_;\n  std::set<std::string> version_number_path_list_;\n  std::set<std::string> servable_name_path_list_;\n};\n\nclass TestMasterWorkerClient : public TestMasterWorker {\n public:\n  TestMasterWorkerClient() = default;\n\n  static void InitTensor(proto::Tensor *tensor, const std::vector<int64_t> &shape, proto::DataType data_type,\n                         const void *data, size_t data_size) {\n    MSI_EXCEPTION_IF_NULL(tensor);\n    tensor->set_dtype(data_type);\n    auto proto_shape = tensor->mutable_shape();\n    for (auto item : shape) {\n      proto_shape->add_dims(item);\n    }\n    tensor->set_data(data, data_size);\n  }\n\n  static std::vector<float> InitOneInstanceRequest(proto::PredictRequest *request, const std::string &servable_name,\n                                                   const std::string &method_name, int version_number) {\n    MSI_EXCEPTION_IF_NULL(request);\n    auto request_servable_spec = request->mutable_servable_spec();\n    request_servable_spec->set_name(servable_name);\n    request_servable_spec->set_method_name(method_name);\n    request_servable_spec->set_version_number(version_number);\n\n    std::vector<float> x1_data = {1.1, 2.2, 3.3, 4.4};\n    std::vector<float> x2_data = {1.2, 2.3, 3.4, 4.5};\n    std::vector<float> y_data;\n    for (size_t i = 0; i < x1_data.size(); i++) {\n      y_data.push_back(x1_data[i] + x2_data[i]);\n    }\n    auto instance = request->add_instances();\n    auto &input_map = (*instance->mutable_items());\n    // input x1\n    InitTensor(&input_map[\"x1\"], {2, 2}, proto::MS_FLOAT32, x1_data.data(), x1_data.size() * sizeof(float));\n    // input x2\n    InitTensor(&input_map[\"x2\"], {2, 2}, proto::MS_FLOAT32, x2_data.data(), x2_data.size() * sizeof(float));\n    return y_data;\n  }\n  template <class IN_DT = float, class OUT_DT = float>\n  static std::vector<std::vector<OUT_DT>> InitMultiInstancesRequest(proto::PredictRequest *request,\n                                                                    const std::string &servable_name,\n                                                                    const std::string &method_name, int version_number,\n                                                                    size_t instances_count) {\n    MSI_EXCEPTION_IF_NULL(request);\n    auto request_servable_spec = request->mutable_servable_spec();\n    request_servable_spec->set_name(servable_name);\n    request_servable_spec->set_method_name(method_name);\n    request_servable_spec->set_version_number(version_number);\n\n    auto data_type = proto::MS_FLOAT32;\n    if (std::string(typeid(IN_DT).name()) == std::string(typeid(int32_t).name())) {\n      data_type = proto::MS_INT32;\n    }\n\n    std::vector<std::vector<OUT_DT>> y_data_list;\n    for (size_t k = 0; k < instances_count; k++) {\n      std::vector<float> x1_data_org = {1.1, 2.2, 3.3, 4.4};\n      std::vector<float> x2_data_org = {6.6, 7.7, 8.8, 9.9};\n\n      std::vector<IN_DT> x1_data;\n      std::vector<IN_DT> x2_data;\n\n      std::vector<OUT_DT> y_data;\n      for (size_t i = 0; i < x1_data_org.size(); i++) {\n        x1_data.push_back(static_cast<IN_DT>(x1_data_org[i] * (k + 1)));\n        x2_data.push_back(static_cast<IN_DT>(x2_data_org[i] * (k + 1)));\n        y_data.push_back(static_cast<OUT_DT>(x1_data[i] + x2_data[i]));\n      }\n      y_data_list.push_back(y_data);\n\n      auto instance = request->add_instances();\n      auto &input_map = (*instance->mutable_items());\n      // input x1\n      InitTensor(&input_map[\"x1\"], {2, 2}, data_type, x1_data.data(), x1_data.size() * sizeof(IN_DT));\n      // input x2\n      InitTensor(&input_map[\"x2\"], {2, 2}, data_type, x2_data.data(), x2_data.size() * sizeof(IN_DT));\n    }\n    return y_data_list;\n  }\n\n  template <class IN_DT = float, class OUT_DT = float>\n  static std::vector<std::vector<OUT_DT>> InitMultiInstancesShape2Request(proto::PredictRequest *request,\n                                                                          const std::string &servable_name,\n                                                                          const std::string &method_name,\n                                                                          int version_number, size_t instances_count) {\n    MSI_EXCEPTION_IF_NULL(request);\n    auto request_servable_spec = request->mutable_servable_spec();\n    request_servable_spec->set_name(servable_name);\n    request_servable_spec->set_method_name(method_name);\n    request_servable_spec->set_version_number(version_number);\n\n    auto data_type = proto::MS_FLOAT32;\n    if (std::string(typeid(IN_DT).name()) == std::string(typeid(int32_t).name())) {\n      data_type = proto::MS_INT32;\n    }\n\n    std::vector<std::vector<OUT_DT>> y_data_list;\n    for (size_t k = 0; k < instances_count; k++) {\n      std::vector<float> x1_data_org = {1.1, 2.2};\n      std::vector<float> x2_data_org = {8.8, 9.9};\n\n      std::vector<IN_DT> x1_data;\n      std::vector<IN_DT> x2_data;\n\n      std::vector<OUT_DT> y_data;\n      for (size_t i = 0; i < x1_data_org.size(); i++) {\n        x1_data.push_back(static_cast<IN_DT>(x1_data_org[i] * (k + 1)));\n        x2_data.push_back(static_cast<IN_DT>(x2_data_org[i] * (k + 1)));\n        y_data.push_back(x1_data[i] + x2_data[i]);\n      }\n      y_data_list.push_back(y_data);\n\n      auto instance = request->add_instances();\n      auto &input_map = (*instance->mutable_items());\n      // input x1\n      InitTensor(&input_map[\"x1\"], {2}, data_type, x1_data.data(), x1_data.size() * sizeof(IN_DT));\n      // input x2\n      InitTensor(&input_map[\"x2\"], {2}, data_type, x2_data.data(), x2_data.size() * sizeof(IN_DT));\n    }\n    return y_data_list;\n  }\n\n  template <class OUT_DT>\n  static void CheckMultiInstanceResult(const proto::PredictReply &reply,\n                                       const std::vector<std::vector<OUT_DT>> &y_data_list,\n                                       size_t instances_count) {  // checkout output\n    ASSERT_EQ(reply.instances_size(), instances_count);\n    ASSERT_EQ(reply.error_msg_size(), 0);\n    auto data_type = proto::MS_FLOAT32;\n    if (std::string(typeid(OUT_DT).name()) == std::string(typeid(int32_t).name())) {\n      data_type = proto::MS_INT32;\n    }\n    std::vector<int64_t> shape;\n    if (y_data_list[0].size() == 4) {\n      shape = {2, 2};\n    } else {\n      shape = {2};\n    }\n    for (size_t k = 0; k < instances_count; k++) {\n      auto &output_instance = reply.instances(k);\n      ASSERT_EQ(output_instance.items_size(), 1);\n      auto &output_items = output_instance.items();\n      ASSERT_EQ(output_items.begin()->first, \"y\");\n      auto &output_tensor = output_items.begin()->second;\n\n      CheckTensor(output_tensor, shape, data_type, y_data_list[k].data(), y_data_list[k].size() * sizeof(OUT_DT));\n    }\n  }\n\n  template <class OUT_DT>\n  static void CheckInstanceResult(const proto::PredictReply &reply, const std::vector<OUT_DT> &y_data) {\n    // checkout output\n    ASSERT_EQ(reply.instances_size(), 1);\n    ASSERT_EQ(reply.error_msg_size(), 0);\n    auto data_type = proto::MS_FLOAT32;\n    if (std::string(typeid(OUT_DT).name()) == std::string(typeid(int32_t).name())) {\n      data_type = proto::MS_INT32;\n    }\n    std::vector<int64_t> shape;\n    if (y_data.size() == 4) {\n      shape = {2, 2};\n    } else {\n      shape = {2};\n    }\n    auto &output_instance = reply.instances(0);\n    ASSERT_EQ(output_instance.items_size(), 1);\n    auto &output_items = output_instance.items();\n    ASSERT_EQ(output_items.begin()->first, \"y\");\n    auto &output_tensor = output_items.begin()->second;\n\n    CheckTensor(output_tensor, shape, data_type, y_data.data(), y_data.size() * sizeof(OUT_DT));\n  }\n\n  static void CheckTensor(const proto::Tensor &output_tensor, const std::vector<int64_t> &shape,\n                          proto::DataType data_type, const void *data, size_t data_size) {\n    EXPECT_EQ(output_tensor.dtype(), data_type);\n    // check shape [2,2]\n    auto &output_tensor_shape = output_tensor.shape();\n    ASSERT_EQ(output_tensor_shape.dims_size(), shape.size());\n    std::vector<int64_t> proto_shape;\n    for (size_t i = 0; i < output_tensor_shape.dims_size(); i++) {\n      proto_shape.push_back(output_tensor_shape.dims(i));\n    }\n    EXPECT_EQ(proto_shape, shape);\n\n    // check data\n    ASSERT_EQ(output_tensor.data().size(), data_size);\n    switch (data_type) {\n      case proto::MS_FLOAT32: {\n        auto data_len = data_size / sizeof(float);\n        auto real_data = reinterpret_cast<const float *>(output_tensor.data().data());\n        auto expect_data = reinterpret_cast<const float *>(data);\n        for (size_t i = 0; i < data_len; i++) {\n          EXPECT_EQ(real_data[i], expect_data[i]);\n          if (real_data[i] != expect_data[i]) {\n            break;\n          }\n        }\n        break;\n      }\n      case proto::MS_INT32: {\n        auto data_len = data_size / sizeof(int32_t);\n        auto real_data = reinterpret_cast<const int32_t *>(output_tensor.data().data());\n        auto expect_data = reinterpret_cast<const int32_t *>(data);\n        for (size_t i = 0; i < data_len; i++) {\n          EXPECT_EQ(real_data[i], expect_data[i]);\n          if (real_data[i] != expect_data[i]) {\n            break;\n          }\n        }\n        break;\n      }\n      default:\n        FAIL();\n    }\n  }\n  static grpc::Status Dispatch(const proto::PredictRequest &request, proto::PredictReply *reply) {\n    MSWorkerImpl impl;\n    auto promise = std::make_shared<std::promise<void>>();\n    auto future = promise->get_future();\n    PredictOnFinish callback = [promise]() { promise->set_value(); };\n    impl.PredictAsync(&request, reply, callback);\n    future.get();\n    return grpc::Status::OK;\n  }\n};\n\n}  // namespace serving\n}  // namespace mindspore\n#endif  // MINDSPORE_SERVING_TEST_SERVABLE_COMMON_H\n"
  },
  {
    "path": "tests/ut/cpp/runtest.sh",
    "content": "#!/bin/bash\n# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nset -e\nBASEPATH=$(\n  cd \"$(dirname \"$0\")\"\n  pwd\n)\nPROJECT_PATH=${BASEPATH}/../../..\nif [ $BUILD_PATH ]; then\n  echo \"BUILD_PATH = $BUILD_PATH\"\nelse\n  BUILD_PATH=${PROJECT_PATH}/build\n  echo \"BUILD_PATH = $BUILD_PATH\"\nfi\ncd ${BUILD_PATH}/mindspore_serving/tests/ut/cpp\nexport LD_LIBRARY_PATH=${BUILD_PATH}/mindspore_serving/tests/ut/cpp:${LD_LIBRARY_PATH}\necho \"LD_LIBRARY_PATH = $LD_LIBRARY_PATH\"\n\nif [ $# -gt 0 ]; then\n  ./serving_ut --gtest_filter=$1\nelse\n  ./serving_ut\nfi\nRET=$?\ncd -\n\nexit ${RET}\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_agent_config_acquire.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"common/common_test.h\"\n#include \"common/tensor_base.h\"\n#define private public\n#include \"worker/distributed_worker/distributed_process/distributed_process.h\"\n#include \"worker/distributed_worker/notify_distributed/notify_worker.h\"\n#undef private\n\nusing std::string;\nusing std::vector;\nnamespace mindspore {\nnamespace serving {\nclass TestAgentConfigAcquire : public UT::Common {\n public:\n  TestAgentConfigAcquire() = default;\n  virtual void SetUp() {}\n  virtual void TearDown() {\n    UT::Common::TearDown();\n  }\n};\n\nTEST_F(TestAgentConfigAcquire, test_agent_config_acquire_success) {\n  std::shared_ptr<DistributedModelLoader> servable = std::make_shared<DistributedModelLoader>();\n  std::string rank_table_content = \"rank table content\";\n  CommonModelMeta commonServableMeta;\n  commonServableMeta.servable_name = \"servable_name\";\n  commonServableMeta.model_key = \"model_key\";\n  commonServableMeta.outputs_count[0] = 1;\n  commonServableMeta.inputs_count[0] = 1;\n  commonServableMeta.with_batch_dim = false;\n  commonServableMeta.without_batch_dim_inputs.push_back(8);\n  DistributedModelMeta distributedServableMeta;\n  distributedServableMeta.stage_size = 8;\n  distributedServableMeta.rank_size = 8;\n  OneRankConfig oneRankConfig;\n  oneRankConfig.ip = \"1.1.1.1\";\n  oneRankConfig.device_id = 0;\n  servable->config_.rank_table_content = rank_table_content;\n  servable->config_.common_meta = commonServableMeta;\n  servable->config_.distributed_meta = distributedServableMeta;\n  servable->config_.rank_list.push_back(oneRankConfig);\n  servable->config_loaded_ = true;\n  const std::string server_address = \"any_addr\";\n  MSDistributedImpl mSDistributedImpl(servable, server_address);\n  grpc::ServerContext context;\n  const proto::AgentConfigAcquireRequest request;\n  proto::AgentConfigAcquireReply reply;\n  grpc::Status status = mSDistributedImpl.AgentConfigAcquire(&context, &request, &reply);\n  ASSERT_EQ(status.error_code(), 0);\n\n  DistributedServableConfig config;\n  GrpcNotifyDistributeWorker::ParseAgentConfigAcquireReply(reply, &config);\n  ASSERT_EQ(config.rank_table_content, rank_table_content);\n  ASSERT_EQ(config.common_meta.servable_name, \"servable_name\");\n  ASSERT_EQ(config.common_meta.model_key, \"model_key\");\n  ASSERT_EQ(config.common_meta.inputs_count.at(0), 1);\n  ASSERT_EQ(config.common_meta.outputs_count.at(0), 1);\n  ASSERT_EQ(config.common_meta.with_batch_dim, false);\n  ASSERT_EQ(config.common_meta.without_batch_dim_inputs.size(), 1);\n  ASSERT_EQ(config.common_meta.without_batch_dim_inputs.at(0), 8);\n  ASSERT_EQ(config.distributed_meta.rank_size, 8);\n  ASSERT_EQ(config.distributed_meta.stage_size, 8);\n  ASSERT_EQ(config.rank_list.size(), 1);\n  OneRankConfig tempRankConfig = config.rank_list.at(0);\n  ASSERT_EQ(tempRankConfig.device_id, 0);\n  ASSERT_EQ(tempRankConfig.ip, \"1.1.1.1\");\n}\n\nTEST_F(TestAgentConfigAcquire, test_agent_config_acquire_not_load_config_failed) {\n  std::shared_ptr<DistributedModelLoader> servable = std::make_shared<DistributedModelLoader>();\n  servable->config_loaded_ = false;\n  const std::string server_address = \"any_addr\";\n  MSDistributedImpl mSDistributedImpl(servable, server_address);\n  grpc::ServerContext context;\n  const proto::AgentConfigAcquireRequest request;\n  proto::AgentConfigAcquireReply reply;\n  const grpc::Status status = mSDistributedImpl.AgentConfigAcquire(&context, &request, &reply);\n  ASSERT_EQ(status.error_code(), 1);\n}\n\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_context.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"../common/common_test.h\"\n#define private public\n#include \"worker/inference/inference.h\"\n#include \"worker/inference/mindspore_model_wrap.h\"\n#undef private\n\nusing std::string;\nusing std::vector;\nnamespace mindspore {\nnamespace serving {\nclass TestModelContext : public UT::Common {\n public:\n  TestModelContext() = default;\n  void Init(std::string file_name) {\n    char *dir;\n    dir = get_current_dir_name();\n    std::string file_path(dir);\n    file_path += file_name;\n    std::ofstream fp(file_path);\n    fp << \"model content\";\n    fp.close();\n    model_file = file_path;\n    free(dir);\n  }\n  virtual void SetUp() {\n    setenv(\"SERVING_ENABLE_CPU_DEVICE\", \"1\", 1);\n    setenv(\"SERVING_ENABLE_GPU_DEVICE\", \"1\", 1);\n  }\n  virtual void TearDown() {\n    remove(model_file.c_str());\n    setenv(\"SERVING_ENABLE_CPU_DEVICE\", \"0\", 1);\n    setenv(\"SERVING_ENABLE_GPU_DEVICE\", \"0\", 1);\n  }\n  std::string model_file;\n};\n\n/// Feature: model context\n/// Description: ascend910 device with mindspore\n/// Expectation: the context has ascend910 and load success\nTEST_F(TestModelContext, test_ms_set_ascend910) {\n  setenv(\"SERVING_ENABLE_CPU_DEVICE\", \"0\", 1);\n  setenv(\"SERVING_ENABLE_GPU_DEVICE\", \"0\", 1);\n\n  Init(\"tensor_add.mindir@ms_ascend\");\n  ModelContext model_context;\n  auto mindspore_wrap = InferenceLoader::Instance().CreateMindSporeInfer();\n  auto status = mindspore_wrap->LoadModelFromFile(serving::DeviceType::kDeviceTypeAscend, 0, {model_file},\n                                                  serving::kMindIR, false, {}, model_context, {}, {}, {}, false);\n  ASSERT_TRUE(status.IsSuccess());\n}\n\n/// Feature: model context\n/// Description: gpu device with lite\n/// Expectation: the context has gpu and load success\nTEST_F(TestModelContext, test_lite_set_gpu) {\n  Init(\"tensor_add.mindir@lite_gpu_cpu\");\n  ModelContext model_context;\n  auto mindspore_wrap = InferenceLoader::Instance().CreateMindSporeInfer();\n  auto status = mindspore_wrap->LoadModelFromFile(serving::DeviceType::kDeviceTypeGpu, 0, {model_file},\n                                                  serving::kMindIR, false, {}, model_context, {}, {}, {}, true);\n  ASSERT_TRUE(status.IsSuccess());\n}\n\n/// Feature: Model context\n/// Description: gpu cpu device with lite\n/// Expectation: the context has gpu and cpu and load success\nTEST_F(TestModelContext, test_lite_set_gpu_cpu) {\n  Init(\"tensor_add.mindir@lite_gpu_cpu\");\n  ModelContext model_context;\n  DeviceInfo cpu_device_info{{\"device_type\", \"cpu\"}};\n  model_context.device_list.push_back(cpu_device_info);\n  auto mindspore_wrap = InferenceLoader::Instance().CreateMindSporeInfer();\n  auto status = mindspore_wrap->LoadModelFromFile(serving::DeviceType::kDeviceTypeGpu, 0, {model_file},\n                                                  serving::kMindIR, false, {}, model_context, {}, {}, {}, true);\n  ASSERT_TRUE(status.IsSuccess());\n}\n\n/// Feature: Model context\n/// Description: gpu cpu device with mindspore\n/// Expectation: the context only has gpu and load success\nTEST_F(TestModelContext, test_ms_set_gpu) {\n  Init(\"tensor_add.mindir@ms_gpu\");\n  ModelContext model_context;\n  DeviceInfo cpu_device_info{{\"device_type\", \"cpu\"}};\n  model_context.device_list.push_back(cpu_device_info);\n  auto mindspore_wrap = InferenceLoader::Instance().CreateMindSporeInfer();\n  auto status = mindspore_wrap->LoadModelFromFile(serving::DeviceType::kDeviceTypeGpu, 0, {model_file},\n                                                  serving::kMindIR, false, {}, model_context, {}, {}, {}, false);\n  ASSERT_TRUE(status.IsSuccess());\n}\n\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_distributed_inference.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include <thread>\n#include <chrono>\n#include <vector>\n#include \"gtest/gtest.h\"\n#include \"common/status.h\"\n#include \"proto/ms_agent.pb.h\"\n#include \"tests/ut/cpp/common/common_test.h\"\n#include \"common/grpc_client.h\"\n#include \"worker/distributed_worker/notify_agent/base_notify_agent.h\"\n#define private public\n#include \"common/exit_handle.h\"\n#include \"worker/distributed_worker/distributed_model_loader.h\"\n#undef private\n\nnamespace mindspore {\nnamespace serving {\n\nstruct AgentInferResult {\n  int64_t prediction_time = 0;  // milliseconds\n  Status status = SUCCESS;\n  int64_t error_code = 0;\n  std::string error_msg = \"\";\n};\n\nclass FakeNotifyAgent : public BaseNotifyAgent {\n public:\n  explicit FakeNotifyAgent(int64_t prediction_time = 0, Status status = SUCCESS, int64_t error_code = 0,\n                           std::string error_msg = \"\")\n      : prediction_time_(prediction_time), status_(status), error_code_(error_code), error_msg_(error_msg) {}\n  ~FakeNotifyAgent() = default;\n  Status Exit() override { return SUCCESS; }\n  Status DispatchAsync(const proto::DistributedPredictRequest &request, proto::DistributedPredictReply *reply,\n                       AsyncPredictCallback callback) override {\n    auto error_msg = reply->mutable_error_msg();\n    error_msg->set_error_code(error_code_);\n    if (!error_msg_.empty()) {\n      error_msg->set_error_msg(error_msg_);\n    }\n\n    auto prediction_time = prediction_time_;\n    auto status = status_;\n    auto predict = [prediction_time, status, callback]() {\n      std::chrono::milliseconds dura(prediction_time);\n      std::this_thread::sleep_for(dura);\n      callback(status);\n    };\n    std::thread t1(predict);\n    t1.detach();\n    return SUCCESS;\n  }\n\n private:\n  int64_t prediction_time_;  // milliseconds\n  Status status_;\n  int64_t error_code_;\n  std::string error_msg_;\n};\n\nclass TestDistributedInference : public UT::Common {\n public:\n  TestDistributedInference() = default;\n  ~TestDistributedInference() = default;\n\n  void InitDistributedServable(std::shared_ptr<DistributedModelLoader> servable, size_t rank_size, size_t stage_size,\n                               bool is_running, bool is_loaded) {\n    ExitSignalHandle::Instance().is_running_ = is_running;\n    servable->model_loaded_ = is_loaded;\n    servable->config_.distributed_meta.rank_size = rank_size;\n    servable->config_.distributed_meta.stage_size = stage_size;\n  }\n\n  void InitAgentSpecMap(std::shared_ptr<DistributedModelLoader> servable,\n                        const std::vector<AgentInferResult> &result_list) {\n    for (size_t rank_id = 0; rank_id < result_list.size(); ++rank_id) {\n      const auto &result = result_list[rank_id];\n      DistributedAgentContext agent_context;\n      agent_context.notify_agent_ =\n        std::make_shared<FakeNotifyAgent>(result.prediction_time, result.status, result.error_code, result.error_msg);\n      servable->agent_spec_map_.insert({rank_id, agent_context});\n    }\n  }\n};\n\nTEST_F(TestDistributedInference, test_agent_8_stage_1) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 8, 1, true, true);\n\n  std::vector<AgentInferResult> result_list(8);\n  InitAgentSpecMap(servable, result_list);\n\n  std::vector<TensorBasePtr> input, output;\n  auto status = servable->Predict(input, &output);\n\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestDistributedInference, test_agent_4) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 4, 1, true, true);\n\n  std::vector<AgentInferResult> result_list(4);\n  InitAgentSpecMap(servable, result_list);\n\n  std::vector<TensorBasePtr> input, output;\n  auto status = servable->Predict(input, &output);\n\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestDistributedInference, test_agent_32_stage_1) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 32, 1, true, true);\n\n  std::vector<AgentInferResult> result_list(32);\n  InitAgentSpecMap(servable, result_list);\n\n  std::vector<TensorBasePtr> input, output;\n  auto status = servable->Predict(input, &output);\n\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestDistributedInference, test_agent_32_stage_2) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 32, 2, true, true);\n\n  std::vector<AgentInferResult> result_list(32);\n  InitAgentSpecMap(servable, result_list);\n\n  std::vector<TensorBasePtr> input, output;\n  auto status = servable->Predict(input, &output);\n\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestDistributedInference, test_agent_32_stage_4) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 32, 4, true, true);\n\n  std::vector<AgentInferResult> result_list(32);\n  InitAgentSpecMap(servable, result_list);\n\n  std::vector<TensorBasePtr> input, output;\n  auto status = servable->Predict(input, &output);\n\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestDistributedInference, test_agent_64_stage_8) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 64, 8, true, true);\n\n  std::vector<AgentInferResult> result_list(64);\n  InitAgentSpecMap(servable, result_list);\n\n  std::vector<TensorBasePtr> input, output;\n  auto status = servable->Predict(input, &output);\n\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestDistributedInference, test_output_nullptr) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 32, 4, true, true);\n\n  std::vector<AgentInferResult> result_list(32);\n  InitAgentSpecMap(servable, result_list);\n\n  Status status;\n  std::vector<TensorBasePtr> input, output;\n  ASSERT_ANY_THROW({ status = servable->Predict(input, nullptr); });\n  ASSERT_EQ(status.StatusCode(), FAILED);\n}\n\nTEST_F(TestDistributedInference, test_agent_infer_more_than_10s) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 32, 4, true, true);\n\n  std::vector<AgentInferResult> result_list(32);\n  result_list[20].prediction_time = 11000;\n  InitAgentSpecMap(servable, result_list);\n\n  std::vector<TensorBasePtr> input, output;\n  auto status = servable->Predict(input, &output);\n\n  ASSERT_EQ(status.StatusCode(), FAILED);\n}\n\nTEST_F(TestDistributedInference, test_agent_exit) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 32, 4, false, true);\n\n  std::vector<AgentInferResult> result_list(32);\n  InitAgentSpecMap(servable, result_list);\n\n  std::vector<TensorBasePtr> input, output;\n  auto status = servable->Predict(input, &output);\n\n  ASSERT_EQ(status.StatusCode(), FAILED);\n}\n\nTEST_F(TestDistributedInference, test_rank_size_not_equal_agent_num) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 32, 4, true, true);\n\n  std::vector<AgentInferResult> result_list(12);\n  InitAgentSpecMap(servable, result_list);\n\n  Status status;\n  std::vector<TensorBasePtr> input, output;\n  ASSERT_ANY_THROW({ status = servable->Predict(input, &output); });\n  ASSERT_EQ(status.StatusCode(), FAILED);\n}\n\nTEST_F(TestDistributedInference, test_agent_reply_with_error_msg) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 32, 4, true, true);\n\n  std::vector<AgentInferResult> result_list(32);\n  result_list[10].error_msg = \"failed\";\n  result_list[10].error_code = 1;\n  InitAgentSpecMap(servable, result_list);\n\n  std::vector<TensorBasePtr> input, output;\n  auto status = servable->Predict(input, &output);\n  ASSERT_EQ(status.StatusCode(), FAILED);\n}\n\nTEST_F(TestDistributedInference, test_model_not_loaded) {\n  auto servable = std::make_shared<DistributedModelLoader>();\n  servable->model_key_ = \"test_distributed_model_key\";\n  InitDistributedServable(servable, 32, 4, true, false);\n\n  std::vector<AgentInferResult> result_list(32);\n  InitAgentSpecMap(servable, result_list);\n\n  Status status;\n  std::vector<TensorBasePtr> input, output;\n  ASSERT_ANY_THROW({ status = servable->Predict(input, &output); });\n  ASSERT_EQ(status.StatusCode(), FAILED);\n}\n\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_init_config_on_start_up.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"common/common_test.h\"\n#include \"common/tensor_base.h\"\n#define private public\n#include \"worker/distributed_worker/distributed_model_loader.h\"\n#undef private\n\nusing std::string;\nusing std::vector;\nnamespace mindspore {\nnamespace serving {\nclass TestParseRankTableFile : public UT::Common {\n public:\n  TestParseRankTableFile() = default;\n  virtual void SetUp() {}\n  virtual void TearDown() {\n    for (auto &item : config_file_list_) {\n      remove(item.c_str());\n    }\n    UT::Common::TearDown();\n  }\n  std::set<std::string> config_file_list_;\n};\n\nTEST_F(TestParseRankTableFile, test_init_config_on_startup_empty_file_failed) {\n  std::string empty_rank_table_file = \"empty_rank_table_file\";\n  std::ofstream fp(empty_rank_table_file);\n  fp << \"empty rank table file\";\n  fp.close();\n  config_file_list_.emplace(empty_rank_table_file);\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->InitConfigOnStartup(empty_rank_table_file);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_init_config_on_startup_success) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"server_list\": [\n          {\n              \"server_id\": \"10.155.111.140\",\n              \"device\": [\n                  {\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\",\"rank_id\": \"0\"},\n                  {\"device_id\": \"1\",\"device_ip\": \"192.2.27.6\",\"rank_id\": \"1\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  std::string rank_table_file = \"rank_table_file\";\n  std::ofstream fp(rank_table_file);\n  fp << rank_table_server_list;\n  fp.close();\n  config_file_list_.emplace(rank_table_file);\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->InitConfigOnStartup(rank_table_file);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_server_list_success) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"10.155.111.140\",\n              \"device\": [\n                  {\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\",\"rank_id\": \"0\"},\n                  {\"device_id\": \"1\",\"device_ip\": \"192.2.27.6\",\"rank_id\": \"1\"},\n                  {\"device_id\": \"2\",\"device_ip\": \"192.3.27.6\",\"rank_id\": \"2\"},\n                  {\"device_id\": \"3\",\"device_ip\": \"192.4.27.6\",\"rank_id\": \"3\"},\n                  {\"device_id\": \"4\",\"device_ip\": \"192.1.27.7\",\"rank_id\": \"4\"},\n                  {\"device_id\": \"5\",\"device_ip\": \"192.2.27.7\",\"rank_id\": \"5\"},\n                  {\"device_id\": \"6\",\"device_ip\": \"192.3.27.7\",\"rank_id\": \"6\"},\n                  {\"device_id\": \"7\",\"device_ip\": \"192.4.27.7\",\"rank_id\": \"7\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  ASSERT_EQ(servable->config_.rank_list.size(), 8);\n  uint32_t expect_device_id = 0;\n  for (auto &one_rank_config : servable->config_.rank_list) {\n    std::string server_ip = one_rank_config.ip;\n    uint32_t device_id = one_rank_config.device_id;\n    ASSERT_EQ(server_ip, \"10.155.111.140\");\n    ASSERT_EQ(device_id, expect_device_id);\n    expect_device_id++;\n  }\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_not_server_list_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_invalid_server_list_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": \"0\",\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_empty_server_list_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_not_server_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"device\": [\n                  {\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\",\"rank_id\": \"0\"},\n                  {\"device_id\": \"1\",\"device_ip\": \"192.2.27.6\",\"rank_id\": \"1\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_invalid_server_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": [],\n              \"device\": [\n                  {\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\",\"rank_id\": \"0\"},\n                  {\"device_id\": \"1\",\"device_ip\": \"192.2.27.6\",\"rank_id\": \"1\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_empty_server_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"\",\n              \"device\": [\n                  {\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\",\"rank_id\": \"0\"},\n                  {\"device_id\": \"1\",\"device_ip\": \"192.2.27.6\",\"rank_id\": \"1\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_not_device_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"10.155.111.140\",\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_invalid_device_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"10.155.111.140\",\n              \"device\": \"dsfds\",\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_empty_device_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"10.155.111.140\",\n              \"device\": [],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_not_device_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"10.155.111.140\",\n              \"device\": [\n                  {\"device_ip\": \"192.1.27.6\",\"rank_id\": \"0\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_invalid_device_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"\",\n              \"device\": [\n                  {\"device_id\": \"1wdb\",\"device_ip\": \"192.1.27.6\",\"rank_id\": \"0\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_not_rank_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"10.155.111.140\",\n              \"device\": [\n                  {\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_invalid_rank_id_failed1) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"\",\n              \"device\": [\n                  {\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\",\"rank_id\": \"0wer\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_invalid_rank_id_failed2) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"version\": \"1.0\",\n      \"server_count\": \"1\",\n      \"server_list\": [\n          {\n              \"server_id\": \"\",\n              \"device\": [\n                  {\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\",\"rank_id\": \"5\"}],\n               \"host_nic_ip\": \"reserve\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithServerList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_success) {\n  nlohmann::json rank_table_group_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"chip_info\": \"910\",\n      \"deploy_mode\": \"lab\",\n      \"group_count\": \"1\",\n      \"group_list\": [\n          {\n              \"device_num\": \"2\",\n              \"server_num\": \"1\",\n              \"group_name\": \"\",\n              \"instance_count\": \"2\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [{\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\"}],\n                      \"rank_id\": \"0\",\n                      \"server_id\": \"10.155.111.140\"\n                  },\n                  {\n                      \"devices\": [{\"device_id\": \"1\",\"device_ip\": \"192.2.27.6\"}],\n                      \"rank_id\": \"1\",\n                      \"server_id\": \"10.155.111.140\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_group_list);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  ASSERT_EQ(servable->config_.rank_list.size(), 2);\n  uint32_t expect_device_id = 0;\n  for (auto &one_rank_config : servable->config_.rank_list) {\n    std::string server_ip = one_rank_config.ip;\n    uint32_t device_id = one_rank_config.device_id;\n    ASSERT_EQ(server_ip, \"10.155.111.140\");\n    ASSERT_EQ(device_id, expect_device_id);\n    expect_device_id++;\n  }\n}\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_not_group_list_failed) {\n  nlohmann::json rank_table_group_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"chip_info\": \"910\",\n      \"deploy_mode\": \"lab\",\n      \"group_count\": \"1\",\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_group_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_invalid_group_list_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"chip_info\": \"910\",\n      \"group_count\": \"1\",\n      \"group_list\": \"0\",\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_empty_group_list_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"chip_info\": \"910\",\n      \"group_count\": \"1\",\n      \"group_list\": [],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_not_instance_list_failed) {\n  nlohmann::json rank_table_group_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"chip_info\": \"910\",\n      \"deploy_mode\": \"lab\",\n      \"group_count\": \"1\",\n      \"group_list\": [\n          {\n              \"server_num\": \"1\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_group_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_invalid_instance_list_failed) {\n  nlohmann::json rank_table_group_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"chip_info\": \"910\",\n      \"deploy_mode\": \"lab\",\n      \"group_count\": \"1\",\n      \"group_list\": [\n          {\n              \"server_num\": \"1\",\n              \"instance_list\": \"0\"\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_group_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_empty_instance_list_failed) {\n  nlohmann::json rank_table_group_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"chip_info\": \"910\",\n      \"deploy_mode\": \"lab\",\n      \"group_count\": \"1\",\n      \"group_list\": [\n          {\n              \"server_num\": \"1\",\n              \"instance_list\": []\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_group_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_not_server_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [{\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\"}],\n                      \"rank_id\": \"0\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_invalid_server_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [{\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\"}],\n                      \"rank_id\": \"0\",\n                      \"server_id\": []\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_empty_server_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [{\"device_id\": \"0\",\"device_ip\": \"192.1.27.6\"}],\n                      \"rank_id\": \"0\",\n                      \"server_id\": \"\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_not_devices_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"rank_id\": \"0\",\n                      \"server_id\": \"10.155.111.140\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_invalid_devices_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": \"rtrt\",\n                      \"rank_id\": \"0\",\n                      \"server_id\": \"10.155.111.140\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_empty_devices_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [],\n                      \"rank_id\": \"0\",\n                      \"server_id\": \"10.155.111.140\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_not_device_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [{\"device_ip\": \"192.1.27.6\"}],\n                      \"rank_id\": \"0\",\n                      \"server_id\": \"10.155.111.140\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_invalid_device_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [{\"device_id\": \"wd1gt2\", \"device_ip\": \"192.1.27.6\"}],\n                      \"rank_id\": \"0\",\n                      \"server_id\": \"10.155.111.140\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_not_rank_id_failed) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [{\"device_id\": \"0\", \"device_ip\": \"192.1.27.6\"}],\n                      \"server_id\": \"10.155.111.140\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_invalid_rank_id_failed1) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [{\"device_id\": \"0\", \"device_ip\": \"192.1.27.6\"}],\n                      \"rank_id\": \"tfdg5\",\n                      \"server_id\": \"10.155.111.140\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\nTEST_F(TestParseRankTableFile, test_parse_rank_table_file_with_group_list_invalid_rank_id_failed2) {\n  nlohmann::json rank_table_server_list = R\"(\n  {\n      \"board_id\": \"0x0000\",\n      \"group_list\": [\n          {\n              \"instance_count\": \"1\",\n              \"instance_list\": [\n                  {\n                      \"devices\": [{\"device_id\": \"0\", \"device_ip\": \"192.1.27.6\"}],\n                      \"rank_id\": \"7\",\n                      \"server_id\": \"10.155.111.140\"\n                  }\n              ]\n          }\n      ],\n      \"status\": \"completed\"\n  }\n  )\"_json;\n  auto servable = std::make_shared<DistributedModelLoader>();\n  auto status = servable->ParserRankTableWithGroupList(\"rank_table_file\", rank_table_server_list);\n  ASSERT_EQ(status.StatusCode(), INVALID_INPUTS);\n}\n\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_master_worker.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"tests/ut/cpp/common/test_servable_common.h\"\n\n#define private public\n#undef private\n\nusing std::string;\nusing std::vector;\nnamespace mindspore {\nnamespace serving {\n\nTEST_F(TestMasterWorkerClient, test_master_worker_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  auto y_data = InitOneInstanceRequest(&request, servable_name_, \"add_common\", 0);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.instances_size(), 1);\n  ASSERT_EQ(reply.error_msg_size(), 0);\n  auto &output_instance = reply.instances(0);\n  ASSERT_EQ(output_instance.items_size(), 1);\n  auto &output_items = output_instance.items();\n  ASSERT_EQ(output_items.begin()->first, \"y\");\n  auto &output_tensor = output_items.begin()->second;\n\n  CheckTensor(output_tensor, {2, 2}, proto::MS_FLOAT32, y_data.data(), y_data.size() * sizeof(float));\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_success_version_number_1_request_version_1) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  auto y_data = InitOneInstanceRequest(&request, servable_name_, \"add_common\", 1);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.instances_size(), 1);\n  ASSERT_EQ(reply.error_msg_size(), 0);\n  auto &output_instance = reply.instances(0);\n  ASSERT_EQ(output_instance.items_size(), 1);\n  auto &output_items = output_instance.items();\n  ASSERT_EQ(output_items.begin()->first, \"y\");\n  auto &output_tensor = output_items.begin()->second;\n\n  CheckTensor(output_tensor, {2, 2}, proto::MS_FLOAT32, y_data.data(), y_data.size() * sizeof(float));\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_success_version_number_2_request_version_2) {\n  Init(\"test_servable_dir\", \"test_servable\", 2, \"test_add.mindir\");\n  RegisterAddServable();\n\n  // start_servable\n  auto status = StartServable(servable_dir_, servable_name_, 2);\n  ASSERT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  auto y_data = InitOneInstanceRequest(&request, servable_name_, \"add_common\", 2);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckInstanceResult(reply, y_data);\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_success_version_number_2_request_lastest) {\n  Init(\"test_servable_dir\", \"test_servable\", 2, \"test_add.mindir\");\n  RegisterAddServable();\n\n  // start_servable\n  auto status = StartServable(servable_dir_, servable_name_, 2);\n  ASSERT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  auto y_data = InitOneInstanceRequest(&request, servable_name_, \"add_common\", 0);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckInstanceResult(reply, y_data);\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_success_multi_version_number_1_2_request_lastest) {\n  auto servable_dir = std::string(test_info_->test_case_name()) + \"_test_servable_dir\";\n  Init(servable_dir, \"test_servable\", 1, \"test_add.mindir\");\n  Init(servable_dir, \"test_servable\", 2, \"test_add.mindir\");\n\n  RegisterAddServable();\n\n  // start_servable\n  auto status = StartServable(servable_dir_, servable_name_, 2);\n  ASSERT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  auto y_data = InitOneInstanceRequest(&request, servable_name_, \"add_common\", 0);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckInstanceResult(reply, y_data);\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_success_version_number_1_2_request_2) {\n  auto servable_dir = std::string(test_info_->test_case_name()) + \"_test_servable_dir\";\n  Init(servable_dir, \"test_servable\", 1, \"test_add.mindir\");\n  Init(servable_dir, \"test_servable\", 2, \"test_add.mindir\");\n\n  RegisterAddServable();\n\n  // start_servable\n  auto status = StartServable(servable_dir_, servable_name_, 2);\n  ASSERT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  auto y_data = InitOneInstanceRequest(&request, servable_name_, \"add_common\", 2);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckInstanceResult(reply, y_data);\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_success_version_number_1_2_request_1_failed) {\n  auto servable_dir = std::string(test_info_->test_case_name()) + \"_test_servable_dir\";\n  Init(servable_dir, \"test_servable\", 1, \"test_add.mindir\");\n  Init(servable_dir, \"test_servable\", 2, \"test_add.mindir\");\n\n  RegisterAddServable();\n\n  // start_servable\n  auto status = StartServable(servable_dir_, servable_name_, 2);\n  ASSERT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  auto y_data = InitOneInstanceRequest(&request, servable_name_, \"add_common\", 1);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 1);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Cannot find servable match servable\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_three_instance_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input float32 --> servable float32-float32, shape [2, 2]\n  auto y_data_list = InitMultiInstancesRequest(&request, servable_name_, \"add_common\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckMultiInstanceResult(reply, y_data_list, instances_count);\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_input_size_not_match_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  auto request_servable_spec = request.mutable_servable_spec();\n  request_servable_spec->set_name(servable_name_);\n  request_servable_spec->set_method_name(\"add_common\");\n  request_servable_spec->set_version_number(0);\n\n  size_t instances_count = 3;\n  std::vector<std::vector<float>> y_data_list;\n  for (size_t k = 0; k < instances_count; k++) {\n    std::vector<float> x1_data = {1.1, 2.2};\n    std::vector<float> x2_data = {1.2, 2.3};\n    std::vector<float> y_data;\n    for (size_t i = 0; i < x1_data.size(); i++) {\n      x1_data[i] *= (k + 1);\n      x2_data[i] *= (k + 1);\n      y_data.push_back(x1_data[i] + x2_data[i]);\n    }\n    y_data_list.push_back(y_data);\n\n    auto instance = request.add_instances();\n    auto &input_map = (*instance->mutable_items());\n    // input x1\n    InitTensor(&input_map[\"x1\"], {2}, proto::MS_FLOAT32, x1_data.data(), x1_data.size() * sizeof(float));\n    // input x2\n    InitTensor(&input_map[\"x2\"], {2}, proto::MS_FLOAT32, x2_data.data(), x2_data.size() * sizeof(float));\n  }\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), instances_count);\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_with_batch_dim_true_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable(true);  // with_batch_dim = true\n\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input float32 --> servable float32-float32, shape [2]\n  auto y_data_list = InitMultiInstancesShape2Request(&request, servable_name_, \"add_common\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckMultiInstanceResult(reply, y_data_list, instances_count);\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_with_batch_dim_true_input_size_not_match_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable(true);  // with_batch_dim = true\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // shape [2,2] not match required shape [2] as with_batch_dim = true\n  auto y_data = InitMultiInstancesRequest(&request, servable_name_, \"add_common\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), instances_count);\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_error_servable_name) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid servable name\n  auto y_data = InitMultiInstancesRequest(&request, servable_name_ + \"_error\", \"add_common\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 1);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Servable test_servable_error is not declared\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_error_method_name) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid method name\n  auto y_data = InitMultiInstancesRequest(&request, servable_name_, \"add_common_error\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 1);\n  ExpectContainMsg(reply.error_msg(0).error_msg(),\n                   \"Method add_common_error is not registered for servable test_servable\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_error_version_number) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid version_number\n  auto y_data = InitMultiInstancesRequest(&request, servable_name_, \"add_common\", 2, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 1);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Cannot find servable match servable\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_invalid_input_name) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid version_number\n  auto request_servable_spec = request.mutable_servable_spec();\n  request_servable_spec->set_name(servable_name_);\n  request_servable_spec->set_method_name(\"add_common\");\n  request_servable_spec->set_version_number(0);\n\n  std::vector<std::vector<float>> y_data_list;\n  for (size_t k = 0; k < instances_count; k++) {\n    std::vector<float> x1_data = {1.1, 2.2, 3.3, 4.4};\n    std::vector<float> x2_data = {1.2, 2.3, 3.4, 4.5};\n    std::vector<float> y_data;\n    for (size_t i = 0; i < x1_data.size(); i++) {\n      x1_data[i] *= (k + 1);\n      x2_data[i] *= (k + 1);\n      y_data.push_back(x1_data[i] + x2_data[i]);\n    }\n    y_data_list.push_back(y_data);\n\n    auto instance = request.add_instances();\n    auto &input_map = (*instance->mutable_items());\n    // input x1\n    InitTensor(&input_map[\"x1\"], {2, 2}, proto::MS_FLOAT32, x1_data.data(), x1_data.size() * sizeof(float));\n    // input x3, expected is x2\n    InitTensor(&input_map[\"x3\"], {2, 2}, proto::MS_FLOAT32, x2_data.data(), x2_data.size() * sizeof(float));\n  }\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 1);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Cannot find input x2 in instance input\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_three_instance_one_input_invalid_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input float32 --> servable float32-float32, shape [2, 2]\n  auto y_data_list = InitMultiInstancesRequest(&request, servable_name_, \"add_common\", 0, instances_count);\n  auto items = request.mutable_instances(1)->mutable_items();\n  auto it = items->find(\"x2\");\n  ASSERT_TRUE(it != items->end());\n  items->erase(it);  // erase x2 input\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 1);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Cannot find input x2 in instance input\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_extra_input_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid version_number\n  auto request_servable_spec = request.mutable_servable_spec();\n  request_servable_spec->set_name(servable_name_);\n  request_servable_spec->set_method_name(\"add_common\");\n  request_servable_spec->set_version_number(0);\n\n  std::vector<std::vector<float>> y_data_list;\n  for (size_t k = 0; k < instances_count; k++) {\n    std::vector<float> x1_data = {1.1, 2.2, 3.3, 4.4};\n    std::vector<float> x2_data = {1.2, 2.3, 3.4, 4.5};\n    std::vector<float> y_data;\n    for (size_t i = 0; i < x1_data.size(); i++) {\n      x1_data[i] *= (k + 1);\n      x2_data[i] *= (k + 1);\n      y_data.push_back(x1_data[i] + x2_data[i]);\n    }\n    y_data_list.push_back(y_data);\n\n    auto instance = request.add_instances();\n    auto &input_map = (*instance->mutable_items());\n    // input x1\n    InitTensor(&input_map[\"x1\"], {2, 2}, proto::MS_FLOAT32, x1_data.data(), x1_data.size() * sizeof(float));\n    // input x2\n    InitTensor(&input_map[\"x2\"], {2, 2}, proto::MS_FLOAT32, x2_data.data(), x2_data.size() * sizeof(float));\n    // extra input x3\n    InitTensor(&input_map[\"x3\"], {2, 2}, proto::MS_FLOAT32, x2_data.data(), x2_data.size() * sizeof(float));\n  }\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckMultiInstanceResult(reply, y_data_list, instances_count);\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_invalid_input_datatype_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid version_number\n  auto request_servable_spec = request.mutable_servable_spec();\n  request_servable_spec->set_name(servable_name_);\n  request_servable_spec->set_method_name(\"add_common\");\n  request_servable_spec->set_version_number(0);\n\n  std::vector<std::vector<float>> y_data_list;\n  for (size_t k = 0; k < instances_count; k++) {\n    std::vector<float> x1_data = {1.1, 2.2, 3.3, 4.4};\n    std::vector<float> x2_data = {1.2, 2.3, 3.4, 4.5};\n    std::vector<float> y_data;\n    for (size_t i = 0; i < x1_data.size(); i++) {\n      x1_data[i] *= (k + 1);\n      x2_data[i] *= (k + 1);\n      y_data.push_back(x1_data[i] + x2_data[i]);\n    }\n    y_data_list.push_back(y_data);\n\n    auto instance = request.add_instances();\n    auto &input_map = (*instance->mutable_items());\n    // input x1\n    InitTensor(&input_map[\"x1\"], {2, 2}, proto::MS_FLOAT32, x1_data.data(), x1_data.size() * sizeof(float));\n    // input x2, invalid data type\n    InitTensor(&input_map[\"x2\"], {2, 2}, proto::MS_INT32, x2_data.data(), x2_data.size() * sizeof(float));\n  }\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 3);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Given model input 1 data type\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_with_batch_dim_true_invalid_input_datatype_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable(true);  // with_batch_dim=true\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid version_number\n  auto request_servable_spec = request.mutable_servable_spec();\n  request_servable_spec->set_name(servable_name_);\n  request_servable_spec->set_method_name(\"add_common\");\n  request_servable_spec->set_version_number(0);\n\n  std::vector<std::vector<float>> y_data_list;\n  for (size_t k = 0; k < instances_count; k++) {\n    std::vector<float> x1_data = {1.1, 2.2};\n    std::vector<float> x2_data = {1.2, 2.3};\n    std::vector<float> y_data;\n    for (size_t i = 0; i < x1_data.size(); i++) {\n      x1_data[i] *= (k + 1);\n      x2_data[i] *= (k + 1);\n      y_data.push_back(x1_data[i] + x2_data[i]);\n    }\n    y_data_list.push_back(y_data);\n\n    auto instance = request.add_instances();\n    auto &input_map = (*instance->mutable_items());\n    // input x1\n    InitTensor(&input_map[\"x1\"], {2}, proto::MS_FLOAT32, x1_data.data(), x1_data.size() * sizeof(float));\n    // input x2, invalid data type\n    InitTensor(&input_map[\"x2\"], {2}, proto::MS_INT32, x2_data.data(), x2_data.size() * sizeof(float));\n  }\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 3);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Given model input 1 data type\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_invalid_input_datasize_not_match_shape_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid version_number\n  auto request_servable_spec = request.mutable_servable_spec();\n  request_servable_spec->set_name(servable_name_);\n  request_servable_spec->set_method_name(\"add_common\");\n  request_servable_spec->set_version_number(0);\n\n  std::vector<std::vector<float>> y_data_list;\n  for (size_t k = 0; k < instances_count; k++) {\n    std::vector<float> x1_data = {1.1, 2.2, 3.3, 4.4};\n    std::vector<float> x2_data = {1.2, 2.3, 3.4, 4.5};\n    std::vector<float> y_data;\n    for (size_t i = 0; i < x1_data.size(); i++) {\n      x1_data[i] *= (k + 1);\n      x2_data[i] *= (k + 1);\n      y_data.push_back(x1_data[i] + x2_data[i]);\n    }\n    y_data_list.push_back(y_data);\n\n    auto instance = request.add_instances();\n    auto &input_map = (*instance->mutable_items());\n    // input x1\n    InitTensor(&input_map[\"x1\"], {2, 2}, proto::MS_FLOAT32, x1_data.data(), x1_data.size() * sizeof(float));\n    // input x2, invalid data size\n    InitTensor(&input_map[\"x2\"], {2, 2}, proto::MS_FLOAT32, x2_data.data(), (x2_data.size() - 1) * sizeof(float));\n  }\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 1);  // proto parse check failed\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Tensor check failed: input data size\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_invalid_input_datasize_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable();\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid version_number\n  auto request_servable_spec = request.mutable_servable_spec();\n  request_servable_spec->set_name(servable_name_);\n  request_servable_spec->set_method_name(\"add_common\");\n  request_servable_spec->set_version_number(0);\n\n  std::vector<std::vector<float>> y_data_list;\n  for (size_t k = 0; k < instances_count; k++) {\n    std::vector<float> x1_data = {1.1, 2.2, 3.3, 4.4};\n    std::vector<float> x2_data = {1.2, 2.3, 3.4, 4.5};\n    std::vector<float> y_data;\n    for (size_t i = 0; i < x1_data.size(); i++) {\n      x1_data[i] *= (k + 1);\n      x2_data[i] *= (k + 1);\n      y_data.push_back(x1_data[i] + x2_data[i]);\n    }\n    y_data_list.push_back(y_data);\n\n    auto instance = request.add_instances();\n    auto &input_map = (*instance->mutable_items());\n    // input x1\n    InitTensor(&input_map[\"x1\"], {2, 2}, proto::MS_FLOAT32, x1_data.data(), x1_data.size() * sizeof(float));\n    // input x2, invalid data size\n    InitTensor(&input_map[\"x2\"], {2, 1}, proto::MS_FLOAT32, x2_data.data(), (x2_data.size() - 2) * sizeof(float));\n  }\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 3);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Given model input 1 size 8\");\n}\n\nTEST_F(TestMasterWorkerClient, test_master_worker_with_batch_dim_true_invalid_input_datasize_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  RegisterAddServable(true);  // with_batch_dim=true\n  // start_servable\n  StartAddServable();\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // invalid version_number\n  auto request_servable_spec = request.mutable_servable_spec();\n  request_servable_spec->set_name(servable_name_);\n  request_servable_spec->set_method_name(\"add_common\");\n  request_servable_spec->set_version_number(0);\n\n  std::vector<std::vector<float>> y_data_list;\n  for (size_t k = 0; k < instances_count; k++) {\n    std::vector<float> x1_data = {1.1, 2.2};\n    std::vector<float> x2_data = {1.2, 2.3};\n    std::vector<float> y_data;\n    for (size_t i = 0; i < x1_data.size(); i++) {\n      x1_data[i] *= (k + 1);\n      x2_data[i] *= (k + 1);\n      y_data.push_back(x1_data[i] + x2_data[i]);\n    }\n    y_data_list.push_back(y_data);\n\n    auto instance = request.add_instances();\n    auto &input_map = (*instance->mutable_items());\n    // input x1\n    InitTensor(&input_map[\"x1\"], {2}, proto::MS_FLOAT32, x1_data.data(), x1_data.size() * sizeof(float));\n    // input x2, invalid data size\n    InitTensor(&input_map[\"x2\"], {1}, proto::MS_FLOAT32, x2_data.data(), (x2_data.size() - 1) * sizeof(float));\n  }\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), 3);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Given model input 1 size 4\");\n}\n\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_model_thread.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"common/common_test.h\"\n#include \"master/server.h\"\n#include \"common/tensor_base.h\"\n#define private public\n#include \"master/model_thread.h\"\n#undef private\n\nusing std::string;\nusing std::vector;\nnamespace mindspore {\nnamespace serving {\nclass TestModelThead : public UT::Common {\n public:\n  TestModelThead() = default;\n};\n\nclass MS_API TestNotify : public BaseNotifyWorker {\n public:\n  explicit TestNotify(proto::PredictReply *reply) {\n    if (reply) {\n      reply_ = *reply;\n    }\n  }\n  ~TestNotify() override = default;\n\n  Status DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                       const PredictOnFinish &on_finish) override;\n\n  proto::PredictReply reply_;\n};\n\nStatus TestNotify::DispatchAsync(const proto::PredictRequest &request, proto::PredictReply *reply,\n                                 const PredictOnFinish &on_finish) {\n  *reply = reply_;\n  on_finish();\n  return SUCCESS;\n}\n\nstd::shared_ptr<WorkerContext> InitWorkerContext(proto::PredictReply *reply = nullptr) {\n  std::shared_ptr<WorkerContext> worker_context = std::make_shared<WorkerContext>();\n  std::shared_ptr<BaseNotifyWorker> notify = std::make_shared<TestNotify>(reply);\n  WorkerRegSpec spec;\n  spec.worker_pid = 1;\n  spec.servable_spec.servable_name = \"test_servable\";\n  spec.servable_spec.version_number = 1;\n  spec.servable_spec.batch_size = 1;\n  spec.servable_spec.methods.push_back(ServableMethodInfo{\"add_cast\", {}});\n  worker_context->OnWorkerRegRequest(spec, notify);\n  return worker_context;\n}\n\nTEST_F(TestModelThead, AddWorker) {\n  ServableMethodInfo method_info;\n  method_info.name = \"add_cast\";\n  ModelThread thread(\"test_servable\", \"add_cast\", 0, 1, method_info);\n  uint64_t pid = 1;\n  std::shared_ptr<WorkerContext> worker_context = InitWorkerContext();\n  Status status = thread.AddWorker(pid, worker_context);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  status = thread.AddWorker(pid, worker_context);\n  ASSERT_EQ(status.StatusCode(), FAILED);\n  pid = 2;\n  status = thread.AddWorker(pid, worker_context);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\nTEST_F(TestModelThead, DelWorker) {\n  ServableMethodInfo method_info;\n  method_info.name = \"add_cast\";\n  ModelThread thread(\"test_servable\", \"add_cast\", 0, 1, method_info);\n  uint64_t pid = 1;\n  Status status = thread.DelWorker(pid);\n  ASSERT_EQ(status.StatusCode(), FAILED);\n  std::shared_ptr<WorkerContext> worker_context = InitWorkerContext();\n  status = thread.AddWorker(pid, worker_context);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  status = thread.DelWorker(pid);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\nTEST_F(TestModelThead, Dispatch) {\n  ServableMethodInfo method_info;\n  method_info.name = \"add_cast\";\n  ModelThread thread(\"test_servable\", \"add_cast\", 0, 1, method_info);\n  uint64_t pid = 1;\n  std::shared_ptr<WorkerContext> worker_context = InitWorkerContext();\n  Status status = thread.AddWorker(pid, worker_context);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  proto::PredictRequest request;\n  request.mutable_servable_spec()->set_name(\"test_servable\");\n  request.mutable_servable_spec()->set_version_number(0);\n  request.mutable_servable_spec()->set_method_name(\"add_cast\");\n  proto::Instance instance;\n  auto proto_instance = request.add_instances();\n  *proto_instance->mutable_items() = instance.items();\n  proto::PredictReply reply;\n  PredictOnFinish callback = []() {};\n  status = thread.DispatchAsync(request, &reply, callback);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  status = thread.DelWorker(pid);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\nTEST_F(TestModelThead, Dispatch1) {\n  ServableMethodInfo method_info;\n  method_info.name = \"add_cast\";\n  ModelThread thread(\"test_servable\", \"add_cast\", 0, 1, method_info);\n  uint64_t pid = 1;\n  std::shared_ptr<WorkerContext> worker_context = InitWorkerContext();\n  proto::PredictRequest request;\n  request.mutable_servable_spec()->set_name(\"test_servable\");\n  request.mutable_servable_spec()->set_version_number(0);\n  request.mutable_servable_spec()->set_method_name(\"add_cast\");\n  proto::Instance instance;\n  auto proto_instance = request.add_instances();\n  *proto_instance->mutable_items() = instance.items();\n  proto::PredictReply reply;\n  PredictOnFinish callback = []() {};\n  Status status = thread.DispatchAsync(request, &reply, callback);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n  status = thread.AddWorker(pid, worker_context);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  status = thread.DelWorker(pid);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestModelThead, Commit) {\n  ServableMethodInfo method_info;\n  method_info.name = \"add_cast\";\n  ModelThread thread(\"test_servable\", \"add_cast\", 0, 1, method_info);\n  uint64_t pid = 1;\n\n  proto::Instance instance;\n  proto::PredictReply reply;\n  auto proto_instance1 = reply.add_instances();\n  *proto_instance1->mutable_items() = instance.items();\n  proto::ErrorMsg msg;\n  auto proto_instance2 = reply.add_error_msg();\n  *proto_instance2 = msg;\n\n  std::shared_ptr<WorkerContext> worker_context = InitWorkerContext(&reply);\n  Status status = thread.AddWorker(pid, worker_context);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  proto::PredictRequest request;\n  auto proto_instance = request.add_instances();\n  *proto_instance->mutable_items() = instance.items();\n  request.mutable_servable_spec()->set_name(\"test_servable\");\n  request.mutable_servable_spec()->set_version_number(0);\n  request.mutable_servable_spec()->set_method_name(\"add_cast\");\n\n  bool flag = false;\n  PredictOnFinish callback = [&flag]() { flag = true; };\n  status = thread.DispatchAsync(request, &reply, callback);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  ASSERT_EQ(flag, true);\n  status = thread.DelWorker(pid);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_parse_restful.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"common/common_test.h\"\n#include \"master/server.h\"\n#include \"common/tensor_base.h\"\n#define private public\n#include \"master/restful/http_process.h\"\n#undef private\n\nusing std::string;\nusing std::vector;\nnamespace mindspore {\nnamespace serving {\nclass TestParseInput : public UT::Common {\n public:\n  TestParseInput() = default;\n};\n\nclass TestParseReply : public UT::Common {\n public:\n  TestParseReply() = default;\n};\n\nTEST_F(TestParseInput, test_parse_SUCCESS) {\n  nlohmann::json js = R\"(\n    {\"instances\":[\n        {\n          \"key_tag\":\"scalar\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        },\n        {\n          \"key_tag\":\"tensor\",\n          \"key_int\": [1,2,3],\n          \"key_bool\":[[true, false], [false, true]],\n          \"key_float\":[[1.1, 2.2]],\n          \"key_str\":[\"ut_test\"],\n          \"key_bytes\":{\"b64\":\"dXRfdGVzdA==\"}\n        },\n        {\n          \"key_tag\":\"b64\",\n          \"key_str_format1\":\"ut_test\",\n          \"key_str_foramt2\":{\"b64\":\"dXRfdGVzdA==\", \"type\":\"str\"},\n          \"key_bytes_int16\":{\"b64\":\"AQACAAIAAwADAAQA\", \"type\":\"int16\", \"shape\":[3,2]},\n          \"key_bytes_fp16\":{\"b64\":\"ZjxmQJpCZkQ=\", \"type\":\"fp16\", \"shape\":[2,2]},\n          \"key_bytes_bool\":{\"b64\":\"AQA=\", \"type\":\"bool\", \"shape\":[1,2]}\n        }\n      ]\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n  ASSERT_EQ(predict_request.instances().size(), 3);\n  for (int32_t k = 0; k < predict_request.instances().size(); k++) {\n    auto &cur_instance = predict_request.instances(k);\n    auto &items = cur_instance.items();\n    if (k == 0) {\n      ASSERT_EQ(items.size(), 6);\n      for (const auto &item : items) {\n        ProtoTensor pb_tensor(const_cast<proto::Tensor *>(&item.second));\n        if (item.first == \"key_int\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Int32);\n          const int32_t *data = reinterpret_cast<const int32_t *>(pb_tensor.data());\n          ASSERT_EQ(*data, 1);\n        } else if (item.first == \"key_bool\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Bool);\n          const bool *data = reinterpret_cast<const bool *>(pb_tensor.data());\n          ASSERT_EQ(*data, false);\n        } else if (item.first == \"key_float\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Float32);\n          const float *data = reinterpret_cast<const float *>(pb_tensor.data());\n          ASSERT_FLOAT_EQ(*data, 2.3);\n        } else if (item.first == \"key_str\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_String);\n          auto str_nums = pb_tensor.bytes_data_size();\n          ASSERT_EQ(str_nums, 1);\n          std::string value;\n          size_t length;\n          const uint8_t *ptr = nullptr;\n          pb_tensor.get_bytes_data(0, &ptr, &length);\n          value.resize(length);\n          memcpy_s(value.data(), length, reinterpret_cast<const char *>(ptr), length);\n          ASSERT_EQ(value, \"ut_test\");\n        } else if (item.first == \"key_bytes\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Bytes);\n          auto str_nums = pb_tensor.bytes_data_size();\n          ASSERT_EQ(str_nums, 1);\n          std::string value;\n          size_t length;\n          const uint8_t *ptr = nullptr;\n          pb_tensor.get_bytes_data(0, &ptr, &length);\n          value.resize(length);\n          memcpy_s(value.data(), length, reinterpret_cast<const char *>(ptr), length);\n          ASSERT_EQ(value, \"ut_test\");\n        }\n      }\n    } else if (k == 1) {\n      ASSERT_EQ(items.size(), 6);\n      for (const auto &item : items) {\n        ProtoTensor pb_tensor(const_cast<proto::Tensor *>(&item.second));\n        auto shape = pb_tensor.shape();\n        if (item.first == \"key_int\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Int32);\n          ASSERT_EQ(shape.size(), 1);\n          ASSERT_EQ(shape[0], 3);\n          vector<int32_t> expected_value = {1, 2, 3};\n          for (int i = 0; i < 3; i++) {\n            const int32_t *data = reinterpret_cast<const int32_t *>(pb_tensor.data()) + i;\n            ASSERT_EQ(*data, expected_value[i]);\n          }\n        } else if (item.first == \"key_bool\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Bool);\n          ASSERT_EQ(shape.size(), 2);\n          ASSERT_EQ(shape[0], 2);\n          ASSERT_EQ(shape[1], 2);\n          vector<vector<bool>> expected_value = {{true, false}, {false, true}};\n          for (int i = 0; i < 2; i++) {\n            for (int j = 0; j < 2; j++) {\n              const bool *data = reinterpret_cast<const bool *>(pb_tensor.data()) + i * 2 + j;\n              ASSERT_EQ(*data, expected_value[i][j]);\n            }\n          }\n        } else if (item.first == \"key_float\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Float32);\n          ASSERT_EQ(shape.size(), 2);\n          ASSERT_EQ(shape[0], 1);\n          ASSERT_EQ(shape[1], 2);\n          vector<vector<float>> expected_value = {{1.1, 2.2}};\n          for (int i = 0; i < 1; i++) {\n            for (int j = 0; j < 2; j++) {\n              const float *data = reinterpret_cast<const float *>(pb_tensor.data()) + i * 1 + j;\n              ASSERT_FLOAT_EQ(*data, expected_value[i][j]);\n            }\n          }\n        } else if (item.first == \"key_str\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_String);\n          ASSERT_EQ(shape.size(), 1);\n          ASSERT_EQ(shape[0], 1);\n          auto str_nums = pb_tensor.bytes_data_size();\n          ASSERT_EQ(str_nums, 1);\n          std::string value;\n          size_t length;\n          const uint8_t *ptr = nullptr;\n          pb_tensor.get_bytes_data(0, &ptr, &length);\n          value.resize(length);\n          memcpy_s(value.data(), length, reinterpret_cast<const char *>(ptr), length);\n          ASSERT_EQ(value, \"ut_test\");\n        } else if (item.first == \"key_bytes\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Bytes);\n          auto str_nums = pb_tensor.bytes_data_size();\n          ASSERT_EQ(str_nums, 1);\n          std::string value;\n          size_t length;\n          const uint8_t *ptr = nullptr;\n          pb_tensor.get_bytes_data(0, &ptr, &length);\n          value.resize(length);\n          memcpy_s(value.data(), length, reinterpret_cast<const char *>(ptr), length);\n          ASSERT_EQ(value, \"ut_test\");\n        }\n      }\n    } else if (k == 2) {\n      ASSERT_EQ(items.size(), 6);\n      for (const auto &item : items) {\n        ProtoTensor pb_tensor(const_cast<proto::Tensor *>(&item.second));\n        auto shape = pb_tensor.shape();\n        if (item.first == \"key_str_format1\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_String);\n          auto str_nums = pb_tensor.bytes_data_size();\n          ASSERT_EQ(str_nums, 1);\n          std::string value;\n          size_t length;\n          const uint8_t *ptr = nullptr;\n          pb_tensor.get_bytes_data(0, &ptr, &length);\n          value.resize(length);\n          memcpy_s(value.data(), length, reinterpret_cast<const char *>(ptr), length);\n          ASSERT_EQ(value, \"ut_test\");\n        } else if (item.first == \"key_str_format2\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_String);\n          auto str_nums = pb_tensor.bytes_data_size();\n          ASSERT_EQ(str_nums, 1);\n          std::string value;\n          size_t length;\n          const uint8_t *ptr = nullptr;\n          pb_tensor.get_bytes_data(0, &ptr, &length);\n          value.resize(length);\n          memcpy_s(value.data(), length, reinterpret_cast<const char *>(ptr), length);\n          ASSERT_EQ(value, \"ut_test\");\n        } else if (item.first == \"key_bytes_int16\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Int16);\n          ASSERT_EQ(shape.size(), 2);\n          ASSERT_EQ(shape[0], 3);\n          ASSERT_EQ(shape[1], 2);\n          vector<vector<int16_t>> expected_value = {{1, 2}, {2, 3}, {3, 4}};\n          for (int i = 0; i < 3; i++) {\n            for (int j = 0; j < 2; j++) {\n              const int16_t *data = reinterpret_cast<const int16_t *>(pb_tensor.data()) + i * 2 + j;\n              ASSERT_FLOAT_EQ(*data, expected_value[i][j]);\n            }\n          }\n        } else if (item.first == \"key_bytes_fp16\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Float16);\n          ASSERT_EQ(shape.size(), 2);\n          ASSERT_EQ(shape[0], 2);\n          ASSERT_EQ(shape[1], 2);\n        } else if (item.first == \"key_bytes_bool\") {\n          ASSERT_EQ(pb_tensor.data_type(), DataType::kMSI_Bool);\n          ASSERT_EQ(shape.size(), 2);\n          ASSERT_EQ(shape[0], 1);\n          ASSERT_EQ(shape[1], 2);\n          vector<vector<bool>> expected_value = {{true, false}};\n          for (int i = 0; i < 1; i++) {\n            for (int j = 0; j < 2; j++) {\n              const bool *data = reinterpret_cast<const bool *>(pb_tensor.data()) + i * 2 + j;\n              ASSERT_FLOAT_EQ(*data, expected_value[i][j]);\n            }\n          }\n        }\n      }\n    }\n  }\n}\n\nTEST_F(TestParseInput, test_instances_empty_FAIL) {\n  nlohmann::json js = R\"(\n    {\"\":\n        {\n          \"key_tag\":\"scalar\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_instances_incorrect_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instance\":\n        {\n          \"key_tag\":\"scalar\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_key_empty_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"\":\"scalar\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_value_empty_SUCCESS) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_obj_unknown_key_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\", \"type1\":\"bytes\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_obj_nob64_key_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"base64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_obj_illegal_b64value_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"base64\": \"dXRfdGVzdA\", \"type\": \"bytes\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_obj_unknown_type_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"base64\": \"dXRfdGVzdA==\", \"type\": \"INt\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_obj_error_shape_format_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes_int16\":{\"b64\":\"AQACAAIAAwADAAQA\", \"type\":\"int16\", \"shape\":3}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_obj_error_shape_format2_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes_int16\":{\"b64\":\"AQACAAIAAwADAAQA\", \"type\":\"int16\", \"shape\":[[3],[2]]}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_obj_error_shape_value_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes_int16\":{\"b64\":\"AQACAAIAAwADAAQA\", \"type\":\"int16\", \"shape\":[3.0,2.0]}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_obj_error_shape_value2_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes_int16\":{\"b64\":\"AQACAAIAAwADAAQA\", \"type\":\"int16\", \"shape\":[3,3]}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_obj_error_shape_value3_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n        {\n          \"key_tag\":\"\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes_int16\":{\"b64\":\"AQACAAIAAwADAAQA\", \"type\":\"int16\", \"shape\":[3,-2]}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_tensor_value_empty_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n       {\n          \"key_tag\":\"tensor\",\n          \"key_int\": [],\n          \"key_bool\":[[true, false], [false, true]],\n          \"key_float\":[[1.1, 2.2]],\n          \"key_str\":[\"ut_test\"],\n          \"key_bytes\":{\"b64\":\"dXRfdGVzdA==\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_tensor_value_diff_type_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n       {\n          \"key_tag\":\"tensor\",\n          \"key_int\": [1, 2.0],\n          \"key_bool\":[[true, false], [false, true]],\n          \"key_float\":[[1.1, 2.2]],\n          \"key_str\":[\"ut_test\"],\n          \"key_bytes\":{\"b64\":\"dXRfdGVzdA==\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_tensor_value_diff_dimention_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n       {\n          \"key_tag\":\"tensor\",\n          \"key_int\": [1, 2],\n          \"key_bool\":[[true, false], [false]],\n          \"key_float\":[[1.1, 2.2]],\n          \"key_str\":[\"ut_test\"],\n          \"key_bytes\":{\"b64\":\"dXRfdGVzdA==\"}\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseInput, test_tensor_multi_object_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":\n       {\n          \"key_tag\":\"tensor\",\n          \"key_int\": [1, 2],\n          \"key_bool\":[[true, false], [false, true]],\n          \"key_float\":[[1.1, 2.2]],\n          \"key_str\":[\"ut_test\"],\n          \"key_bytes\":[{\"b64\":\"dXRfdGVzdA==\"}, {\"b64\":\"dXRfdGVzdA==\"}]\n        }\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_NE(status.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseReply, test_reply_SUCCESS) {\n  nlohmann::json js = R\"(\n    {\"instances\":[\n        {\n          \"key_tag\":\"scalar\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        },\n        {\n          \"key_tag\":\"tensor\",\n          \"key_int\": [1,2,3],\n          \"key_bool\":[[true, false], [false, true]],\n          \"key_float\":[[1.1, 2.2]],\n          \"key_str\":[\"ut_test\"]\n        }\n      ]\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status(INVALID_INPUTS);\n  status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n\n  nlohmann::json out_js;\n  proto::PredictReply reply;\n  auto instance_ptr = reply.add_instances();\n  auto &map_item = *(instance_ptr->mutable_items());\n  // test scalar:\n  // scalar:key_int\n  proto::Tensor tensor_int;\n  ProtoTensor pb_tensor_int(&tensor_int);\n  DataType type_int = kMSI_Int32;\n  pb_tensor_int.set_data_type(type_int);\n  pb_tensor_int.set_shape({1});\n  pb_tensor_int.resize_data(pb_tensor_int.GetTypeSize(type_int));\n  auto data_int = reinterpret_cast<int32_t *>(pb_tensor_int.mutable_data());\n  *data_int = 1;\n  map_item[\"key_int\"] = tensor_int;\n\n  // scalar: key_bool\n  proto::Tensor tensor_bool;\n  ProtoTensor pb_tensor_bool(&tensor_bool);\n  DataType type_bool = kMSI_Bool;\n  pb_tensor_bool.set_data_type(type_bool);\n  pb_tensor_bool.resize_data(pb_tensor_bool.GetTypeSize(type_bool));\n  auto data_bool = reinterpret_cast<bool *>(pb_tensor_bool.mutable_data());\n  *data_bool = false;\n  map_item[\"key_bool\"] = tensor_bool;\n\n  // scalar: key_float\n  proto::Tensor tensor_float;\n  ProtoTensor pb_tensor_float(&tensor_float);\n  DataType type_float = kMSI_Float32;\n  pb_tensor_float.set_data_type(type_float);\n  pb_tensor_float.set_shape({1});\n  pb_tensor_float.resize_data(pb_tensor_float.GetTypeSize(type_float));\n  auto data_float = reinterpret_cast<float *>(pb_tensor_float.mutable_data());\n  *data_float = 2.3;\n  map_item[\"key_float\"] = tensor_float;\n\n  // scalar: key_str\n  string value = \"ut_test\";\n  proto::Tensor tensor_str;\n  ProtoTensor pb_tensor_str(&tensor_str);\n  DataType type_str = kMSI_String;\n  pb_tensor_str.set_data_type(type_str);\n  pb_tensor_str.add_bytes_data(reinterpret_cast<uint8_t *>(value.data()), value.length());\n  map_item[\"key_str\"] = tensor_str;\n\n  // scalar: key_bytes\n  string value_bytes = \"ut_test\";\n  proto::Tensor tensor_bytes;\n  ProtoTensor pb_tensor_bytes(&tensor_bytes);\n  DataType type_bytes = kMSI_Bytes;\n  pb_tensor_bytes.set_data_type(type_bytes);\n  pb_tensor_bytes.add_bytes_data(reinterpret_cast<uint8_t *>(value_bytes.data()), value_bytes.length());\n  map_item[\"key_bytes\"] = tensor_bytes;\n\n  // test tensor:\n  auto instance_ptr2 = reply.add_instances();\n  auto &map_item2 = *(instance_ptr2->mutable_items());\n\n  // tensor int:\n  vector<int32_t> tensor_value_int = {1, 2, 3};\n  proto::Tensor tensor_int2;\n  ProtoTensor pb_tensor_int2(&tensor_int2);\n  DataType type_int2 = kMSI_Int32;\n  pb_tensor_int2.set_data_type(type_int2);\n  pb_tensor_int2.set_shape({3});\n  pb_tensor_int2.resize_data(pb_tensor_int2.GetTypeSize(type_int2) * 3);\n  for (int i = 0; i < 3; i++) {\n    auto data_int2 = reinterpret_cast<int32_t *>(pb_tensor_int2.mutable_data()) + i;\n    *data_int2 = tensor_value_int[i];\n  }\n  map_item2[\"key_int\"] = tensor_int2;\n\n  // tensor: key_bool\n  vector<vector<bool>> tensor_value_bool = {{true, false}, {false, true}};\n  proto::Tensor tensor_bool2;\n  ProtoTensor pb_tensor_bool2(&tensor_bool2);\n  DataType type_bool2 = kMSI_Bool;\n  pb_tensor_bool2.set_data_type(type_bool2);\n  pb_tensor_bool2.set_shape({2, 2});\n  pb_tensor_bool2.resize_data(pb_tensor_bool2.GetTypeSize(type_bool2) * 4);\n  for (int i = 0; i < 2; i++) {\n    for (int j = 0; j < 2; j++) {\n      auto data_bool2 = reinterpret_cast<bool *>(pb_tensor_bool2.mutable_data()) + i * 2 + j;\n      *data_bool2 = tensor_value_bool[i][j];\n    }\n  }\n  map_item2[\"key_bool\"] = tensor_bool2;\n\n  // tensor: key_float\n  vector<vector<float>> tensor_value_float = {{1.1, 2.2}};\n  proto::Tensor tensor_float2;\n  ProtoTensor pb_tensor_float2(&tensor_float2);\n  DataType type_float2 = kMSI_Float32;\n  pb_tensor_float2.set_data_type(type_float2);\n  pb_tensor_float2.set_shape({1, 2});\n  pb_tensor_float2.resize_data(pb_tensor_float2.GetTypeSize(type_float2) * 2);\n  for (int i = 0; i < 1; i++) {\n    for (int j = 0; j < 2; j++) {\n      auto data_float2 = reinterpret_cast<float *>(pb_tensor_float2.mutable_data()) + i * 1 + j;\n      *data_float2 = tensor_value_float[i][j];\n    }\n  }\n  map_item2[\"key_float\"] = tensor_float2;\n\n  // tensor: key_str\n  vector<string> tensor_value_str = {\"ut_test\", \"ut_test2\"};\n  proto::Tensor tensor_str2;\n  ProtoTensor pb_tensor_str2(&tensor_str2);\n  DataType type_str2 = kMSI_String;\n  pb_tensor_str2.set_data_type(type_str2);\n  pb_tensor_str2.set_shape({2});\n  for (int i = 0; i < 2; i++) {\n    pb_tensor_str2.add_bytes_data(reinterpret_cast<uint8_t *>(tensor_value_str[i].data()),\n                                  tensor_value_str[i].length());\n  }\n  map_item2[\"key_str\"] = tensor_str2;\n\n  Status status2 = restful_service.ParseReply(reply, &out_js);\n  ASSERT_EQ(status2.StatusCode(), SUCCESS);\n  string out_str = out_js.dump();\n  std::cout << \"Parse reply out:\" << out_str << std::endl;\n\n  ASSERT_TRUE(out_js.is_object());\n  for (auto &item : out_js.items()) {\n    ASSERT_EQ(item.key(), \"instances\");\n    ASSERT_TRUE(item.value().is_array());\n    ASSERT_EQ(item.value().size(), 2);\n    int sum = 0;\n    // array\n    for (auto &element : item.value()) {\n      ASSERT_TRUE(element.is_object());\n      if (element.size() == 5) {\n        int count = 0;\n        // object\n        std::cout << \"===start====\" << std::endl;\n        for (auto &it : element.items()) {\n          if (it.key() == \"key_int\") {\n            ASSERT_TRUE(it.value().is_array());\n            ASSERT_EQ(it.value().size(), 1);\n            auto array_items = it.value().items();\n            auto int_val = *(array_items.begin());\n            ASSERT_TRUE(int_val.value().is_number_integer());\n            ASSERT_EQ(int_val.value().get<int>(), 1);\n            count++;\n          } else if (it.key() == \"key_bool\") {\n            ASSERT_TRUE(it.value().is_boolean());\n            ASSERT_EQ(it.value().get<bool>(), false);\n            count++;\n          } else if (it.key() == \"key_float\") {\n            ASSERT_TRUE(it.value().is_array());\n            ASSERT_EQ(it.value().size(), 1);\n            auto array_items = it.value().items();\n            auto float_val = *(array_items.begin());\n            ASSERT_FLOAT_EQ(float_val.value().get<float>(), 2.3);\n            count++;\n          } else if (it.key() == \"key_str\") {\n            ASSERT_TRUE(it.value().is_string());\n            ASSERT_EQ(it.value().get<std::string>(), \"ut_test\");\n            count++;\n          } else if (it.key() == \"key_bytes\") {\n            ASSERT_TRUE(it.value().is_object());\n            ASSERT_EQ(it.value()[\"b64\"].get<std::string>(), \"dXRfdGVzdA==\");\n            count++;\n          }\n        }\n        ASSERT_EQ(count, 5);\n        sum++;\n      } else if (element.size() == 4) {\n        int count = 0;\n        // object\n        for (auto &it : element.items()) {\n          if (it.key() == \"key_int\") {\n            ASSERT_TRUE(it.value().is_array());\n            ASSERT_EQ(it.value().size(), 3);\n            ASSERT_EQ(it.value()[0].get<int>(), 1);\n            ASSERT_EQ(it.value()[1].get<int>(), 2);\n            ASSERT_EQ(it.value()[2].get<int>(), 3);\n            count++;\n          } else if (it.key() == \"key_bool\") {\n            ASSERT_TRUE(it.value().is_array());\n            ASSERT_EQ(it.value().size(), 2);\n            ASSERT_TRUE(it.value()[0].is_array());\n            ASSERT_EQ(it.value()[0].size(), 2);\n            ASSERT_EQ(it.value()[0][0].get<bool>(), true);\n            ASSERT_EQ(it.value()[0][1].get<bool>(), false);\n            ASSERT_EQ(it.value()[1].size(), 2);\n            ASSERT_EQ(it.value()[1][0].get<bool>(), false);\n            ASSERT_EQ(it.value()[1][1].get<bool>(), true);\n            count++;\n          } else if (it.key() == \"key_float\") {\n            ASSERT_TRUE(it.value().is_array());\n            ASSERT_EQ(it.value().size(), 1);\n            ASSERT_TRUE(it.value()[0].is_array());\n            ASSERT_EQ(it.value()[0].size(), 2);\n            ASSERT_FLOAT_EQ(it.value()[0][0].get<float>(), 1.1);\n            ASSERT_FLOAT_EQ(it.value()[0][1].get<float>(), 2.2);\n            count++;\n          } else if (it.key() == \"key_str\") {\n            ASSERT_TRUE(it.value().is_array());\n            ASSERT_EQ(it.value().size(), 2);\n            ASSERT_EQ(it.value()[0].get<std::string>(), \"ut_test\");\n            ASSERT_EQ(it.value()[1].get<std::string>(), \"ut_test2\");\n            count++;\n          }\n        }\n        ASSERT_EQ(count, 4);\n        sum++;\n      }\n    }\n    ASSERT_EQ(sum, 2);\n  }\n}\n\nTEST_F(TestParseReply, test_reply_instances_num_not_match_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":[\n        {\n          \"key_tag\":\"scalar\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        }\n      ]\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status(INVALID_INPUTS);\n  status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n\n  nlohmann::json out_js;\n  proto::PredictReply reply;\n  auto instance_ptr = reply.add_instances();\n  auto &map_item = *(instance_ptr->mutable_items());\n  // test scalar:\n  // scalar:key_int\n  proto::Tensor tensor_int;\n  ProtoTensor pb_tensor_int(&tensor_int);\n  DataType type_int = kMSI_Int32;\n  pb_tensor_int.set_data_type(type_int);\n  pb_tensor_int.set_shape({1});\n  pb_tensor_int.resize_data(pb_tensor_int.GetTypeSize(type_int));\n  auto data_int = reinterpret_cast<int32_t *>(pb_tensor_int.mutable_data());\n  *data_int = 1;\n  map_item[\"key_int\"] = tensor_int;\n\n  // scalar: key_bool\n  proto::Tensor tensor_bool;\n  ProtoTensor pb_tensor_bool(&tensor_bool);\n  DataType type_bool = kMSI_Bool;\n  pb_tensor_bool.set_data_type(type_bool);\n  pb_tensor_bool.resize_data(pb_tensor_bool.GetTypeSize(type_bool));\n  auto data_bool = reinterpret_cast<bool *>(pb_tensor_bool.mutable_data());\n  *data_bool = false;\n  map_item[\"key_bool\"] = tensor_bool;\n\n  // scalar: key_float\n  proto::Tensor tensor_float;\n  ProtoTensor pb_tensor_float(&tensor_float);\n  DataType type_float = kMSI_Float32;\n  pb_tensor_float.set_data_type(type_float);\n  pb_tensor_float.set_shape({1});\n  pb_tensor_float.resize_data(pb_tensor_float.GetTypeSize(type_float));\n  auto data_float = reinterpret_cast<float *>(pb_tensor_float.mutable_data());\n  *data_float = 2.3;\n  map_item[\"key_float\"] = tensor_float;\n\n  // scalar: key_str\n  string value = \"ut_test\";\n  proto::Tensor tensor_str;\n  ProtoTensor pb_tensor_str(&tensor_str);\n  DataType type_str = kMSI_String;\n  pb_tensor_str.set_data_type(type_str);\n  pb_tensor_str.add_bytes_data(reinterpret_cast<uint8_t *>(value.data()), value.length());\n  map_item[\"key_str\"] = tensor_str;\n\n  // scalar: key_bytes\n  string value_bytes = \"ut_test\";\n  proto::Tensor tensor_bytes;\n  ProtoTensor pb_tensor_bytes(&tensor_bytes);\n  DataType type_bytes = kMSI_Bytes;\n  pb_tensor_bytes.set_data_type(type_bytes);\n  pb_tensor_bytes.add_bytes_data(reinterpret_cast<uint8_t *>(value_bytes.data()), value_bytes.length());\n  map_item[\"key_bytes\"] = tensor_bytes;\n\n  // test tensor:\n  auto instance_ptr2 = reply.add_instances();\n  auto &map_item2 = *(instance_ptr2->mutable_items());\n\n  // tensor int:\n  vector<int32_t> tensor_value_int = {1, 2, 3};\n  proto::Tensor tensor_int2;\n  ProtoTensor pb_tensor_int2(&tensor_int2);\n  DataType type_int2 = kMSI_Int32;\n  pb_tensor_int2.set_data_type(type_int2);\n  pb_tensor_int2.set_shape({3});\n  pb_tensor_int2.resize_data(pb_tensor_int2.GetTypeSize(type_int2) * 3);\n  for (int i = 0; i < 3; i++) {\n    auto data_int2 = reinterpret_cast<int32_t *>(pb_tensor_int2.mutable_data()) + i;\n    *data_int2 = tensor_value_int[i];\n  }\n  map_item2[\"key_int\"] = tensor_int2;\n\n  // tensor: key_bool\n  vector<vector<bool>> tensor_value_bool = {{true, false}, {false, true}};\n  proto::Tensor tensor_bool2;\n  ProtoTensor pb_tensor_bool2(&tensor_bool2);\n  DataType type_bool2 = kMSI_Bool;\n  pb_tensor_bool2.set_data_type(type_bool2);\n  pb_tensor_bool2.set_shape({2, 2});\n  pb_tensor_bool2.resize_data(pb_tensor_bool2.GetTypeSize(type_bool2) * 4);\n  for (int i = 0; i < 2; i++) {\n    for (int j = 0; j < 2; j++) {\n      auto data_bool2 = reinterpret_cast<bool *>(pb_tensor_bool2.mutable_data()) + i * 2 + j;\n      *data_bool2 = tensor_value_bool[i][j];\n    }\n  }\n  map_item2[\"key_bool\"] = tensor_bool2;\n\n  // tensor: key_float\n  vector<vector<float>> tensor_value_float = {{1.1, 2.2}};\n  proto::Tensor tensor_float2;\n  ProtoTensor pb_tensor_float2(&tensor_float2);\n  DataType type_float2 = kMSI_Float32;\n  pb_tensor_float2.set_data_type(type_float2);\n  pb_tensor_float2.set_shape({1, 2});\n  pb_tensor_float2.resize_data(pb_tensor_float2.GetTypeSize(type_float2) * 2);\n  for (int i = 0; i < 1; i++) {\n    for (int j = 0; j < 2; j++) {\n      auto data_float2 = reinterpret_cast<float *>(pb_tensor_float2.mutable_data()) + i * 1 + j;\n      *data_float2 = tensor_value_float[i][j];\n    }\n  }\n  map_item2[\"key_float\"] = tensor_float2;\n\n  // tensor: key_str\n  vector<string> tensor_value_str = {\"ut_test\", \"ut_test2\"};\n  proto::Tensor tensor_str2;\n  ProtoTensor pb_tensor_str2(&tensor_str2);\n  DataType type_str2 = kMSI_String;\n  pb_tensor_str2.set_data_type(type_str2);\n  pb_tensor_str2.set_shape({2});\n  for (int i = 0; i < 2; i++) {\n    pb_tensor_str2.add_bytes_data(reinterpret_cast<uint8_t *>(tensor_value_str[i].data()),\n                                  tensor_value_str[i].length());\n  }\n  map_item2[\"key_str\"] = tensor_str2;\n\n  Status status2 = restful_service.ParseReply(reply, &out_js);\n  ASSERT_NE(status2.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseReply, test_reply_error_num_not_match_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":[\n        {\n          \"key_tag\":\"scalar\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        }\n      ]\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status(INVALID_INPUTS);\n  status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n\n  nlohmann::json out_js;\n  proto::PredictReply reply;\n  auto error_msg = reply.add_error_msg();\n  error_msg->set_error_msg(\"error1\");\n\n  auto error_msg2 = reply.add_error_msg();\n  error_msg2->set_error_msg(\"error2\");\n\n  Status status2 = restful_service.ParseReply(reply, &out_js);\n  ASSERT_NE(status2.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseReply, test_reply_type_not_set_FAIL) {\n  nlohmann::json js = R\"(\n    {\"instances\":[\n        {\n          \"key_tag\":\"scalar\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        }\n      ]\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n\n  RestfulService restful_service;\n  Status status(INVALID_INPUTS);\n  status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n\n  nlohmann::json out_js;\n  proto::PredictReply reply;\n  auto instance_ptr = reply.add_instances();\n  auto &map_item = *(instance_ptr->mutable_items());\n  // test scalar:\n  // scalar:key_int\n  proto::Tensor tensor_int;\n  ProtoTensor pb_tensor_int(&tensor_int);\n  pb_tensor_int.set_shape({1});\n  pb_tensor_int.resize_data(pb_tensor_int.GetTypeSize(kMSI_Int32));\n  auto data_int = reinterpret_cast<int32_t *>(pb_tensor_int.mutable_data());\n  *data_int = 1;\n  map_item[\"key_int\"] = tensor_int;\n\n  Status status2 = restful_service.ParseReply(reply, &out_js);\n  ASSERT_NE(status2.StatusCode(), SUCCESS);\n}\n\nTEST_F(TestParseReply, test_reply_type_fp16_SUCCESS) {\n  nlohmann::json js = R\"(\n    {\"instances\":[\n        {\n          \"key_tag\":\"scalar\",\n          \"key_int\": 1,\n          \"key_bool\": false,\n          \"key_float\": 2.3,\n          \"key_str\": \"ut_test\",\n          \"key_bytes\": {\"b64\": \"dXRfdGVzdA==\", \"type\": \"bytes\"}\n        }\n      ]\n    }\n  )\"_json;\n\n  struct evhttp_request request_local = {};\n  struct evhttp_request *request = &request_local;\n  int size = 100;\n  std::shared_ptr<DecomposeEvRequest> request_msg = std::make_shared<DecomposeEvRequest>(request, size);\n  request_msg->request_message_ = js;\n  std::shared_ptr<RestfulRequest> restful_request = std::make_shared<RestfulRequest>(request_msg);\n  proto::PredictRequest predict_request;\n  RestfulService restful_service;\n  Status status(INVALID_INPUTS);\n  status = restful_service.ParseRequest(restful_request, &predict_request);\n  ASSERT_EQ(status.StatusCode(), SUCCESS);\n\n  nlohmann::json out_js;\n  proto::PredictReply reply;\n  auto instance_ptr = reply.add_instances();\n  auto &map_item = *(instance_ptr->mutable_items());\n  // test scalar:\n  // scalar: key_float\n  proto::Tensor tensor_float;\n  ProtoTensor pb_tensor_float(&tensor_float);\n  DataType type_float = kMSI_Float16;\n  pb_tensor_float.set_data_type(type_float);\n  pb_tensor_float.set_shape({1});\n  pb_tensor_float.resize_data(pb_tensor_float.GetTypeSize(type_float));\n  map_item[\"key_float16\"] = tensor_float;\n\n  Status status2 = restful_service.ParseReply(reply, &out_js);\n  ASSERT_EQ(status2.StatusCode(), SUCCESS);\n}\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_shared_memory.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"tests/ut/cpp/common/test_servable_common.h\"\n#include \"common/shared_memory.h\"\n\n#define private public\n#undef private\n\nusing std::string;\nusing std::vector;\nnamespace mindspore {\nnamespace serving {\n\nclass TestSharedMemory : public UT::Common {\n public:\n  void SetUp() override {\n    UT::Common::SetUp();\n  }\n  void TearDown() override {\n    UT::Common::TearDown();\n  }\n};\n\nTEST_F(TestSharedMemory, test_alloc_release_shared_memory_success) {\n  SharedMemoryAllocator allocator;\n  std::string memory_key_prefix = \"test_memory_key\";\n  uint64_t item_size = 64;\n  auto status = allocator.NewMemoryBuffer(memory_key_prefix, item_size, 3);\n  ASSERT_TRUE(status == SUCCESS);\n  std::string first_memory_key;\n  std::vector<SharedMemoryItem> first_shm_list;\n  for (int i = 0; i < 3; i++) {\n    SharedMemoryItem shm_item;\n    status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n    ASSERT_TRUE(status == SUCCESS);\n    ASSERT_EQ(shm_item.memory_key_prefix, memory_key_prefix);\n    ASSERT_EQ(shm_item.size, item_size);\n    ASSERT_TRUE(shm_item.memory_key.find(memory_key_prefix) != std::string::npos);\n    if (first_memory_key.empty()) {\n      first_memory_key = shm_item.memory_key;\n    } else {\n      ASSERT_EQ(first_memory_key, shm_item.memory_key);\n    }\n    first_shm_list.push_back(shm_item);\n  }\n  // new shared memory\n  std::string second_memory_key;\n  std::vector<SharedMemoryItem> second_shm_list;\n  for (int i = 0; i < 3; i++) {\n    SharedMemoryItem shm_item;\n    status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n    ASSERT_TRUE(status == SUCCESS);\n    ASSERT_EQ(shm_item.memory_key_prefix, memory_key_prefix);\n    ASSERT_EQ(shm_item.size, item_size);\n    ASSERT_TRUE(shm_item.memory_key.find(memory_key_prefix) != std::string::npos);\n    if (second_memory_key.empty()) {\n      second_memory_key = shm_item.memory_key;\n    } else {\n      ASSERT_EQ(second_memory_key, shm_item.memory_key);\n    }\n    ASSERT_NE(second_memory_key, first_memory_key);\n    second_shm_list.push_back(shm_item);\n  }\n  // free shared memory and alloc\n  {\n    auto &free_memory = second_shm_list[1];\n    allocator.ReleaseMemoryItem(free_memory);\n    SharedMemoryItem shm_item;\n    status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n    ASSERT_TRUE(status == SUCCESS);\n    ASSERT_EQ(shm_item.memory_key, free_memory.memory_key);\n    ASSERT_EQ(shm_item.bytes_size, free_memory.bytes_size);\n    ASSERT_EQ(shm_item.offset_address, free_memory.offset_address);\n    ASSERT_EQ(shm_item.offset, free_memory.offset);\n  }\n  {\n    auto &free_memory = first_shm_list[1];\n    allocator.ReleaseMemoryItem(free_memory);\n    SharedMemoryItem shm_item;\n    status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n    ASSERT_TRUE(status == SUCCESS);\n    ASSERT_EQ(shm_item.memory_key, free_memory.memory_key);\n    ASSERT_EQ(shm_item.bytes_size, free_memory.bytes_size);\n    ASSERT_EQ(shm_item.offset_address, free_memory.offset_address);\n    ASSERT_EQ(shm_item.offset, free_memory.offset);\n  }\n}\n\nTEST_F(TestSharedMemory, test_alloc_release_shared_memory_repeat_release_failed) {\n  SharedMemoryAllocator allocator;\n  std::string memory_key_prefix = \"test_memory_key\";\n  uint64_t item_size = 64;\n  auto status = allocator.NewMemoryBuffer(memory_key_prefix, item_size, 3);\n  ASSERT_TRUE(status == SUCCESS);\n  SharedMemoryItem shm_item;\n  status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n  ASSERT_TRUE(status == SUCCESS);\n  allocator.ReleaseMemoryItem(shm_item);\n  try {\n    allocator.ReleaseMemoryItem(shm_item);\n    FAIL();\n  } catch (std::runtime_error &ex) {\n    std::string error_msg = ex.what();\n    auto index = error_msg.find(\"Shared memory \" + shm_item.memory_key + \" has already been in free set, offset: \");\n    ASSERT_TRUE(index != std::string::npos);\n  }\n}\n\nTEST_F(TestSharedMemory, test_alloc_attach_shared_memory_success) {\n  SharedMemoryAllocator allocator;\n  std::string memory_key_prefix = \"test_memory_key\";\n  uint64_t item_size = 64;\n  auto status = allocator.NewMemoryBuffer(memory_key_prefix, item_size, 3);\n  ASSERT_TRUE(status == SUCCESS);\n  SharedMemoryItem shm_item;\n  status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n  ASSERT_TRUE(status == SUCCESS);\n  SharedMemoryManager attach;\n  SharedMemoryAttachItem attach_item;\n  status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, shm_item.offset, shm_item.size, &attach_item);\n  ASSERT_TRUE(status == SUCCESS);\n  ASSERT_NE(shm_item.offset_address, attach_item.offset_address);\n  attach_item.offset_address[0] = 0xfe;\n  ASSERT_EQ(0xfe, shm_item.offset_address[0]);\n\n  shm_item.offset_address[1] = 0xfa;\n  ASSERT_EQ(0xfa, attach_item.offset_address[1]);\n  attach.Detach(attach_item.memory_key);\n}\n\nTEST_F(TestSharedMemory, test_alloc_twice_attach_shared_memory_success) {\n  SharedMemoryAllocator allocator;\n  std::string memory_key_prefix = \"test_memory_key\";\n  uint64_t item_size = 64;\n  auto status = allocator.NewMemoryBuffer(memory_key_prefix, item_size, 3);\n  ASSERT_TRUE(status == SUCCESS);\n\n  SharedMemoryManager attach;\n  std::string memory_key;\n  // first memory item\n  {\n    SharedMemoryItem shm_item;\n    status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n    ASSERT_TRUE(status == SUCCESS);\n    SharedMemoryAttachItem attach_item;\n    status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, shm_item.offset, shm_item.size, &attach_item);\n    ASSERT_TRUE(status == SUCCESS);\n    ASSERT_NE(shm_item.offset_address, attach_item.offset_address);\n    attach_item.offset_address[0] = 0xfe;\n    ASSERT_EQ(0xfe, shm_item.offset_address[0]);\n    shm_item.offset_address[1] = 0xfa;\n    ASSERT_EQ(0xfa, attach_item.offset_address[1]);\n    memory_key = shm_item.memory_key;\n  }\n  // second memory item\n  {\n    SharedMemoryItem shm_item;\n    status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n    ASSERT_TRUE(status == SUCCESS);\n    SharedMemoryAttachItem attach_item;\n    status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, shm_item.offset, shm_item.size, &attach_item);\n    ASSERT_TRUE(status == SUCCESS);\n    ASSERT_NE(shm_item.offset_address, attach_item.offset_address);\n    attach_item.offset_address[3] = 0xfe;\n    ASSERT_EQ(0xfe, shm_item.offset_address[3]);\n    shm_item.offset_address[4] = 0xfa;\n    ASSERT_EQ(0xfa, attach_item.offset_address[4]);\n  }\n  attach.Detach(memory_key);\n}\n\nTEST_F(TestSharedMemory, test_alloc_re_attach_shared_memory_success) {\n  SharedMemoryAllocator allocator;\n  std::string memory_key_prefix = \"test_memory_key\";\n  uint64_t item_size = 64;\n  auto status = allocator.NewMemoryBuffer(memory_key_prefix, item_size, 3);\n  ASSERT_TRUE(status == SUCCESS);\n\n  SharedMemoryManager attach;\n  // first memory item\n  {\n    SharedMemoryItem shm_item;\n    status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n    ASSERT_TRUE(status == SUCCESS);\n    SharedMemoryAttachItem attach_item;\n    status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, shm_item.offset, shm_item.size, &attach_item);\n    ASSERT_TRUE(status == SUCCESS);\n    ASSERT_NE(shm_item.offset_address, attach_item.offset_address);\n    attach_item.offset_address[0] = 0xfe;\n    ASSERT_EQ(0xfe, shm_item.offset_address[0]);\n    shm_item.offset_address[1] = 0xfa;\n    ASSERT_EQ(0xfa, attach_item.offset_address[1]);\n    attach.Detach(shm_item.memory_key);\n  }\n  // second memory item\n  {\n    SharedMemoryItem shm_item;\n    status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n    ASSERT_TRUE(status == SUCCESS);\n    SharedMemoryAttachItem attach_item;\n    status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, shm_item.offset, shm_item.size, &attach_item);\n    ASSERT_TRUE(status == SUCCESS);\n    ASSERT_NE(shm_item.offset_address, attach_item.offset_address);\n    attach_item.offset_address[3] = 0xfe;\n    ASSERT_EQ(0xfe, shm_item.offset_address[3]);\n    shm_item.offset_address[4] = 0xfa;\n    ASSERT_EQ(0xfa, attach_item.offset_address[4]);\n    attach.Detach(shm_item.memory_key);\n  }\n}\n\nTEST_F(TestSharedMemory, test_alloc_attach_shared_memory_attach_repeat_success) {\n  SharedMemoryAllocator allocator;\n  std::string memory_key_prefix = \"test_memory_key\";\n  uint64_t item_size = 64;\n  auto status = allocator.NewMemoryBuffer(memory_key_prefix, item_size, 3);\n  ASSERT_TRUE(status == SUCCESS);\n  SharedMemoryItem shm_item;\n  status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n  ASSERT_TRUE(status == SUCCESS);\n  SharedMemoryManager attach;\n  SharedMemoryAttachItem attach_item;\n  status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, shm_item.offset, shm_item.size, &attach_item);\n  ASSERT_TRUE(status == SUCCESS);\n  SharedMemoryAttachItem attach_item2;\n  status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, shm_item.offset, shm_item.size, &attach_item2);\n  ASSERT_TRUE(status == SUCCESS);\n  ASSERT_EQ(attach_item.offset_address, attach_item2.offset_address);\n}\n\n\nTEST_F(TestSharedMemory, test_alloc_attach_shared_memory_detach_repeat_failed) {\n  SharedMemoryAllocator allocator;\n  std::string memory_key_prefix = \"test_memory_key\";\n  uint64_t item_size = 64;\n  auto status = allocator.NewMemoryBuffer(memory_key_prefix, item_size, 3);\n  ASSERT_TRUE(status == SUCCESS);\n  SharedMemoryItem shm_item;\n  status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n  ASSERT_TRUE(status == SUCCESS);\n  SharedMemoryManager attach;\n  SharedMemoryAttachItem attach_item;\n  status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, shm_item.offset, shm_item.size, &attach_item);\n  ASSERT_TRUE(status == SUCCESS);\n  status = attach.Detach(shm_item.memory_key);\n  ASSERT_TRUE(status == SUCCESS);\n  status = attach.Detach(shm_item.memory_key);\n  ASSERT_TRUE(status != SUCCESS);\n}\n\nTEST_F(TestSharedMemory, test_alloc_attach_invalid_shared_memory_failed) {\n  SharedMemoryAllocator allocator;\n  std::string memory_key_prefix = \"test_memory_key\";\n  uint64_t item_size = 64;\n  auto status = allocator.NewMemoryBuffer(memory_key_prefix, item_size, 1);\n  ASSERT_TRUE(status == SUCCESS);\n  SharedMemoryItem shm_item;\n  status = allocator.AllocMemoryItem(memory_key_prefix, &shm_item);\n  ASSERT_TRUE(status == SUCCESS);\n\n  SharedMemoryManager attach;\n  SharedMemoryAttachItem attach_item;\n  // invalid memory key\n  status = attach.Attach(\"invalid memory key\", shm_item.bytes_size, shm_item.offset, shm_item.size, &attach_item);\n  ASSERT_TRUE(status != SUCCESS);\n\n  // invalid memory bytes size\n  status = attach.Attach(shm_item.memory_key, 0,  shm_item.offset, shm_item.size, &attach_item);\n  ASSERT_TRUE(status != SUCCESS);\n\n  // invalid memory data offset\n  status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, shm_item.bytes_size, shm_item.size, &attach_item);\n  ASSERT_TRUE(status != SUCCESS);\n\n  // invalid memory data size\n  status = attach.Attach(shm_item.memory_key, shm_item.bytes_size, 0, shm_item.bytes_size + 1, &attach_item);\n  ASSERT_TRUE(status != SUCCESS);\n\n  // success\n  status = attach.Attach(shm_item.memory_key, shm_item.bytes_size,  shm_item.offset, shm_item.size, &attach_item);\n  ASSERT_TRUE(status == SUCCESS);\n}\n\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_start_preprocess_postprocess.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"tests/ut/cpp/common/test_servable_common.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass TestPreprocessPostprocess : public TestMasterWorkerClient {\n public:\n  TestPreprocessPostprocess() = default;\n  ~TestPreprocessPostprocess() = default;\n  virtual void SetUp() {}\n  virtual void TearDown() { TestMasterWorkerClient::TearDown(); }\n  MethodSignature InitDefaultMethod() {\n    MethodSignature method_signature = InitMethodSig();\n    // preprocess\n    method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n    // method input 0 and input 1 as servable input\n    method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n    // postprocess\n    method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n    // servable output as method output\n    method_signature.SetReturn({{3, 0}});\n    return method_signature;\n  }\n  MethodSignature InitMethodSig() {\n    MethodSignature method_signature;\n    method_signature.servable_name = \"test_servable\";\n    method_signature.method_name = \"add_cast\";\n    method_signature.inputs = {\"x1\", \"x2\"};\n    method_signature.outputs = {\"y\"};\n    return method_signature;\n  }\n  const std::string model_file_ = \"test_add.mindir\";\n};\n\nTEST_F(TestPreprocessPostprocess, test_master_worker_with_preproces_and_postprocess_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  // declare_servable\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  // register method\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitDefaultMethod();\n  ServableRegister::Instance().RegisterMethod(method_signature);\n\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input int32 --> preprocess int32-float32 --> servable float32-float32 --> postprocess int32-int32, shape [2,2]\n  auto y_data_list =\n    InitMultiInstancesRequest<int32_t, int32_t>(&request, servable_name_, \"add_cast\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckMultiInstanceResult(reply, y_data_list, instances_count);\n}\n\nTEST_F(TestPreprocessPostprocess, test_master_worker_with_preproces_and_postprocess_batching_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  // declare_servable\n  // with_batch_dim = true\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  // register method\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitDefaultMethod();\n  ServableRegister::Instance().RegisterMethod(method_signature);\n\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input int32 --> preprocess int32-float32 --> servable float32-float32 --> postprocess int32-int32, shape [2]\n  auto y_data_list =\n    InitMultiInstancesShape2Request<int32_t, int32_t>(&request, servable_name_, \"add_cast\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckMultiInstanceResult(reply, y_data_list, instances_count);\n}\n\nTEST_F(TestPreprocessPostprocess, test_master_worker_with_only_preproces_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  // declare_servable\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  // register method\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // servable output as method output\n  method_signature.SetReturn({{2, 0}});\n\n  ServableRegister::Instance().RegisterMethod(method_signature);\n\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input int32 --> preprocess int32-float32 --> servable float32-float32, shape [2,2]\n  auto y_data_list =\n    InitMultiInstancesRequest<int32_t, float>(&request, servable_name_, \"add_cast\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckMultiInstanceResult(reply, y_data_list, instances_count);\n}\n\nTEST_F(TestPreprocessPostprocess, test_master_worker_with_only_preproces_batching_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  // declare_servable\n  // with_batch_dim=true\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  // register method\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  method_signature.SetReturn({{2, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input int32 --> preprocess int32-float32 --> servable float32-float32, shape [2]\n  auto y_data_list =\n    InitMultiInstancesShape2Request<int32_t, float>(&request, servable_name_, \"add_cast\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  ASSERT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckMultiInstanceResult(reply, y_data_list, instances_count);\n}\n\nTEST_F(TestPreprocessPostprocess, test_master_worker_with_only_postprocess_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  // declare_servable\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  // register method\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n  MethodSignature method_signature = InitMethodSig();\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{0, 0}, {0, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{1, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{2, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input float32 --> servable float32-float32 --> postprocess float32-int32, shape [2,2]\n  auto y_data_list =\n    InitMultiInstancesRequest<float, int32_t>(&request, servable_name_, \"add_cast\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckMultiInstanceResult(reply, y_data_list, instances_count);\n}\n\nTEST_F(TestPreprocessPostprocess, test_master_worker_with_only_postprocess_batching_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  // declare_servable\n  // with_batch_dim=true\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  // register method\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n  MethodSignature method_signature = InitMethodSig();\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{0, 0}, {0, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{1, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{2, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input float32 --> servable float32-float32 --> postprocess float32-int32, shape [2]\n  auto y_data_list =\n    InitMultiInstancesShape2Request<float, int32_t>(&request, servable_name_, \"add_cast\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  CheckMultiInstanceResult(reply, y_data_list, instances_count);\n}\n\n// Test data flow in input\\preprocess\\predict\\postprocess\nTEST_F(TestPreprocessPostprocess, test_worker_start_preprocess_not_found) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  try {\n    MethodSignature method_signature = InitMethodSig();\n    // preprocess\n    method_signature.AddStageFunction(\"preprocess_fake_fun\", {{0, 0}, {0, 1}});\n\n    // method input 0 and input 1 as servable input\n    method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n    // postprocess\n    method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n    // servable output as method output\n    method_signature.SetReturn({{3, 0}});\n    FAIL();\n  } catch (std::runtime_error &ex) {\n    ExpectContainMsg(ex.what(), \"Function 'preprocess_fake_fun' is not defined\")\n  }\n}\n\nTEST_F(TestPreprocessPostprocess, test_worker_start_postprocess_not_found) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  try {\n    MethodSignature method_signature = InitMethodSig();\n    // preprocess\n    method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n    // method input 0 and input 1 as servable input\n    method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n    // postprocess\n    method_signature.AddStageFunction(\"postprocess_fake_fun\", {{2, 0}});\n    // servable output as method output\n    method_signature.SetReturn({{3, 0}});\n    FAIL();\n  } catch (std::runtime_error &ex) {\n    ExpectContainMsg(ex.what(), \"Function 'postprocess_fake_fun' is not defined\")\n  }\n}\n\nTEST_F(TestPreprocessPostprocess, test_preproces_process_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitDefaultMethod();\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input float32, invalid for preprocess, which required int32\n  auto y_data_list = InitMultiInstancesRequest<float, float>(&request, servable_name_, \"add_cast\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), instances_count);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Call failed: Input data type invalid\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_postproces_process_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\",\n                                    {{0, 0}});  // use method input as postprocess input\n  // servable output as method output\n  method_signature.SetReturn({{2, 0}});\n\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n\n  // run servable\n  proto::PredictRequest request;\n  size_t instances_count = 3;\n  // input int32, invalid for postprocess\n  auto y_data_list =\n    InitMultiInstancesRequest<int32_t, int32_t>(&request, servable_name_, \"add_cast\", 0, instances_count);\n\n  proto::PredictReply reply;\n  auto grpc_status = Dispatch(request, &reply);\n  EXPECT_TRUE(grpc_status.ok());\n  // checkout output\n  ASSERT_EQ(reply.error_msg_size(), instances_count);\n  ExpectContainMsg(reply.error_msg(0).error_msg(), \"Postprocess failed: Input data type invalid\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_preproces_input_invalid1_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{1, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n\n  ExpectContainMsg(status.StatusMessage(), \"The 0th input data of stage 1 cannot not come from stage 1\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_preproces_input_invalid2_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {2, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"The 1th input data of stage 1 cannot not come from stage 2\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_preproces_input_invalid3_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {3, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"The 1th input data of stage 1 cannot not come from stage 3\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_preproces_input_invalid4_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 2}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The stage 1 1th input uses method 2th input, that is greater than the method inputs size 2\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_predict_input_invalid1_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{2, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"The 0th input data of stage 2 cannot not come from stage 2\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_predict_input_invalid2_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {3, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"The 1th input data of stage 2 cannot not come from stage 3\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_predict_input_invalid3_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 2}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The stage(begin with 1) 2 1th input uses c++ function stub_preprocess_cast_int32_to_fp32_cpp \"\n                   \"2th output, that is greater than the function output size 2\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_predict_input_invalid4_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{0, 2}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The stage 2 0th input uses method 2th input, that is greater than the method inputs size 2\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_postprocess_input_invalid1_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{3, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"The 0th input data of stage 3 cannot not come from stage 3\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_postprocess_input_invalid2_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{0, 2}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The stage 3 0th input uses method 2th input, that is greater than the method inputs size 2\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_postprocess_input_invalid3_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{1, 2}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The stage(begin with 1) 3 0th input uses c++ function stub_preprocess_cast_int32_to_fp32_cpp\"\n                   \" 2th output, that is greater than the function output size 2\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_postprocess_input_invalid4_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 1}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"The stage(begin with 1) 3 0th input uses model \"\n                   \"test_add.mindir subgraph 0 1th output, that is greater than the model output size 1\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_return_invalid1_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{0, 2}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The stage 4 0th input uses method 2th input, \"\n                   \"that is greater than the method inputs size 2\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_return_invalid2_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{1, 2}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The stage(begin with 1) 4 0th input uses c++ function stub_preprocess_cast_int32_to_fp32_cpp\"\n                   \" 2th output, that is greater than the function output size 2\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_return_invalid3_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(model_file_, {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{2, 1}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"The stage(begin with 1) 4 0th input uses model \"\n                   \"test_add.mindir subgraph 0 1th output, that is greater than the model output size 1\");\n}\n\nTEST_F(TestPreprocessPostprocess, test_return_invalid4_failed) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", false);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature = InitMethodSig();\n  // preprocess\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 1}});\n\n  // method input 0 and input 1 as servable input\n  method_signature.AddStageModel(\"test_add.mindir\", {{1, 0}, {1, 1}});\n  // postprocess\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // servable output as method output\n  method_signature.SetReturn({{3, 1}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The stage(begin with 1) 4 0th input uses c++ function stub_postprocess_cast_fp32_to_int32_cpp\"\n                                \" 1th output, that is greater than the function output size 1\");\n}\n\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/cpp/tests/test_start_worker.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"tests/ut/cpp/common/test_servable_common.h\"\n\nnamespace mindspore {\nnamespace serving {\nclass TestStartWorker : public TestMasterWorker {\n public:\n  TestStartWorker() = default;\n  ~TestStartWorker() = default;\n  virtual void SetUp() {}\n  virtual void TearDown() { TestMasterWorker::TearDown(); }\n};\n\nTEST_F(TestStartWorker, test_worker_start_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n}\n\nTEST_F(TestStartWorker, test_worker_start_error_model_file_name) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add_error.mindir\", \"mindir\", true);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n\n  // start_servable\n  auto status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"Load model failed, servable directory: \");\n}\n\nTEST_F(TestStartWorker, test_worker_start_error_version_number) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n\n  // start_servable\n  int error_version_number = 2;\n  auto status = StartServable(\"test_servable_dir\", \"test_servable\", error_version_number);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(\n    status.StatusMessage(),\n    \"Start servable failed: There is no specified version directory of models, specified version number: 2\");\n}\n\nTEST_F(TestStartWorker, test_worker_start_multi_version_number) {\n  auto servable_dir = std::string(test_info_->test_case_name()) + \"_test_servable_dir\";\n  Init(servable_dir, \"test_servable\", 1, \"test_add.mindir\");\n  Init(servable_dir, \"test_servable\", 2, \"test_add.mindir\");\n\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n\n  // start_servable\n  int version_number = 2;\n  Status status = StartServable(servable_dir, \"test_servable\", version_number);\n  EXPECT_TRUE(status.IsSuccess());\n}\n\nTEST_F(TestStartWorker, test_worker_start_version_number_no_valid) {\n  auto servable_dir = std::string(test_info_->test_case_name()) + \"_test_servable_dir\";\n\n  Init(servable_dir, \"test_servable\", 0, \"test_add.mindir\");\n  Init(servable_dir, \"test_servable\", -2, \"test_add.mindir\");\n\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n\n  // start_servable\n  Status status = StartServable(servable_dir, \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(\n    status.StatusMessage(),\n    \"Start servable failed: There is no specified version directory of models, specified version number: 1\");\n}\n\nTEST_F(TestStartWorker, test_worker_start_error_servable_dir) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n\n  // start_servable\n  std::string error_servable_dir = \"test_servable_dir_error\";\n  Status status = StartServable(error_servable_dir, \"test_servable\", 0);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(\n    status.StatusMessage(),\n    \"Start servable failed: There is no specified version directory of models, specified version number: 0\");\n}\n\nTEST_F(TestStartWorker, test_worker_start_error_servable_name) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n\n  // start_servable\n  std::string error_servable_name = \"test_servable_error\";\n  Status status = StartServable(\"test_servable_dir\", error_servable_name, 0);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"'test_servable_error' has not been registered\");\n}\n\nTEST_F(TestStartWorker, test_worker_start_error_servable_format) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"om\", true);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"Not support device type Ascend and model type OM. \");\n}\n\nTEST_F(TestStartWorker, test_worker_start_no_registered_method) {\n  Init(\"test_servable_dir\", \"test_servable\", 2, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  // no registered method\n  // RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 2);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"There is no method registered for servable\");\n}\n\nTEST_F(TestStartWorker, test_worker_start_no_declared_servable) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  // no declared method\n  // DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  auto status = RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(), \"RegisterInputOutputInfo failed, cannot find model test_add.mindir\");\n}\n\nTEST_F(TestStartWorker, test_worker_start_multi_method) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common2\", {\"x1\", \"x2\"}, {\"y\"}, 2, 1);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n}\n\nTEST_F(TestStartWorker, test_worker_start_method_servable_input_count_not_match) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  size_t servable_input_count = 1;\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, servable_input_count, 1);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The inputs count 1 in register_method not equal to the count 2 defined in model\")\n}\n\nTEST_F(TestStartWorker, test_worker_start_method_servable_output_count_not_match) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  size_t servable_output_count = 2;\n  RegisterMethod(\"test_servable\", \"test_add.mindir\", \"add_common\", {\"x1\", \"x2\"}, {\"y\"}, 2, servable_output_count);\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_FALSE(status.IsSuccess());\n  ExpectContainMsg(status.StatusMessage(),\n                   \"The outputs count 2 in register_method not equal to the count 1 defined in model\")\n}\n\n// Test data flow in input\\preprocess\\predict\\postprocess\nTEST_F(TestStartWorker, test_worker_start_preprocess_not_found) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature;\n  method_signature.servable_name = \"test_servable\";\n  method_signature.method_name = \"add_common\";\n  method_signature.inputs = {\"x1\", \"x2\"};\n  method_signature.outputs = {\"y\"};\n  // preprocess\n  try {\n    method_signature.AddStageFunction(\"preprocess_fake_fun\", {{0, 0}, {0, 0}});\n    // method input 0 and input 1 as servable input\n    method_signature.AddStageModel(\"test_add.mindir\", {{1, 0}, {0, 1}}, 0, \"\");\n    // servable output as method output\n    method_signature.SetReturn({{2, 0}});\n    ServableRegister::Instance().RegisterMethod(method_signature);\n  } catch (std::runtime_error &ex) {\n    ExpectContainMsg(ex.what(), \"Function 'preprocess_fake_fun' is not defined\")\n  }\n}\n\nTEST_F(TestStartWorker, test_worker_start_with_preproces_and_postprocess_success) {\n  Init(\"test_servable_dir\", \"test_servable\", 1, \"test_add.mindir\");\n  DeclareServable(\"test_servable\", \"test_add.mindir\", \"mindir\", true);\n  ServableRegister::Instance().RegisterInputOutputInfo(\"test_add.mindir\", 2, 1);\n\n  MethodSignature method_signature;\n  method_signature.servable_name = \"test_servable\";\n  method_signature.method_name = \"add_cast\";\n  method_signature.inputs = {\"x1\", \"x2\"};\n  method_signature.outputs = {\"y\"};\n  // preprocess, stage 1, input is input data(stage index = 0) 0 and 1\n  method_signature.AddStageFunction(\"stub_preprocess_cast_int32_to_fp32_cpp\", {{0, 0}, {0, 0}});\n  // model, stage 2, input is stage 1 output data 0 and 1\n  method_signature.AddStageModel(\"test_add.mindir\", {{1, 0}, {1, 1}}, 0);\n  // postprocess, stage 3, input is stage 2 output data 0 and 1\n  method_signature.AddStageFunction(\"stub_postprocess_cast_fp32_to_int32_cpp\", {{2, 0}});\n  // method output, stage 3 output data 0\n  method_signature.SetReturn({{3, 0}});\n  ServableRegister::Instance().RegisterMethod(method_signature);\n\n  // start_servable\n  Status status = StartServable(\"test_servable_dir\", \"test_servable\", 1);\n  EXPECT_TRUE(status.IsSuccess());\n}\n\n}  // namespace serving\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/python/CMakeLists.txt",
    "content": "set(STUB_DIR ../stub)\nset(ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../..)\n\nfile(GLOB_RECURSE UT_SERVING_STUB RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} \"${STUB_DIR}/cxx_api/*.cc\"\n        \"${STUB_DIR}/graph_impl_stub.cc\" \"${STUB_DIR}/include/utils/*.cc\")\n\nadd_library(mindspore SHARED ${UT_SERVING_STUB})\n\nset(UT_SERVING_COMMON ${UT_SERVING_CORE_SRC} ${UT_SERVING_STUB})\n\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\ninclude_directories(${STUB_DIR}/..)\ninclude_directories(${STUB_DIR})\ninclude_directories(${STUB_DIR}/include)\n\ninclude_directories(${ROOT_DIR}/third_party)\nlink_directories(${CMKAE_BINARY_DIR}/securec/src)\n\ntarget_link_libraries(mindspore PRIVATE ${SECUREC_LIBRARY} pthread)\ntarget_link_libraries(mindspore PRIVATE mindspore_serving::glog)\n\nset(LIBRARY_OUTPUT_PATH ${ROOT_DIR}/build/package/tests/mindspore/lib/)\n\n# copy mindspore include\nfile(COPY ${STUB_DIR}/include/api DESTINATION ${ROOT_DIR}/build/package/tests/mindspore/include)\n"
  },
  {
    "path": "tests/ut/python/mindspore/dataset/__init__.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n"
  },
  {
    "path": "tests/ut/python/runtest.sh",
    "content": "#!/bin/bash\n# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nset -e\nBASEPATH=$(\n  cd \"$(dirname \"$0\")\"\n  pwd\n)\nPROJECT_PATH=${BASEPATH}/../../..\n\nrm -rf ${PROJECT_PATH}/build/package/mindspore_serving/server\nrm -rf ${PROJECT_PATH}/build/package/mindspore_serving/client\ncp -r ${PROJECT_PATH}/mindspore_serving/server ${PROJECT_PATH}/build/package/mindspore_serving/\ncp -r ${PROJECT_PATH}/mindspore_serving/client ${PROJECT_PATH}/build/package/mindspore_serving/\n\nexport PYTHONPATH=${PROJECT_PATH}/build/package:${PROJECT_PATH}/tests/ut/python:$PYTHONPATH\nexport LD_LIBRARY_PATH=${PROJECT_PATH}/build/package/tests/mindspore/lib:${LD_LIBRARY_PATH}\n\necho \"PYTHONPATH=$PYTHONPATH\"\necho \"LD_LIBRARY_PATH=$LD_LIBRARY_PATH\"\nexport GLOG_v=1\n\nunset http_proxy\nunset https_proxy\n\nfunction clear_port()\n{\n  PROCESS=`netstat -nlp | grep :$1 | awk '{print $7}' | awk -F\"/\" '{print $1}'`\n  for i in $PROCESS\n     do\n     echo \"Kill the process [ $i ]\"\n     kill -9 $i\n  done\n}\n\nport_list=(5500 6200 7000 7001 7002 7003 7004 7005 7006 7007)\nfor port in ${port_list[*]}; do\n  clear_port ${port}\ndone\n\ncd ${PROJECT_PATH}/tests/ut/python/tests/\nif [ $# -gt 0 ]; then\n  pytest -s -v . -k \"$1\"\nelse\n  pytest -v .\nfi\n\nrm -f *.crt *.csr *.key *.srl\nrm -rf unix_socket_files\n\nexit $?\n"
  },
  {
    "path": "tests/ut/python/servable_config/add_servable_config.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"add model servable config\"\"\"\n\nimport numpy as np\nfrom mindspore_serving.server import register\n\n\ndef add_trans_datatype(x1, x2):\n    \"\"\"define preprocess, this example has one input and one output\"\"\"\n    return x1.astype(np.float32), x2.astype(np.float32)\n\n\n# when with_batch_dim is set to False, only 2x2 add is supported\n# when with_batch_dim is set to True(default), Nx2 add is supported, while N is viewed as batch\n# float32 inputs/outputs\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n\n# register add_common method in add\n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):  # only support float32 inputs\n    \"\"\"method add_common data flow definition, only call model servable\"\"\"\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n\n\n# register add_cast method in add\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    \"\"\"method add_cast data flow definition, only call preprocess and model servable\"\"\"\n    x1, x2 = register.add_stage(add_trans_datatype, x1, x2, outputs_count=2)  # cast input to float32\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n"
  },
  {
    "path": "tests/ut/python/servable_config/generate_certs.sh",
    "content": "#!/bin/bash\necho \"[req]\ndefault_bits = 2048\ndistinguished_name = req_distinguished_name\nx509_extensions = v3_req\nprompt = no\n[req_distinguished_name]\ncountryName = XX\nstateOrProvinceName = Self-signed Cert\ncommonName = Self-signed Cert\n[v3_req]\nbasicConstraints = CA:TRUE\" > ca.cnf\n\n# generate ca's cert and private key for signing server and client cert\nopenssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ca.key -out ca.crt -config ca.cnf\n\nrm ca.cnf\n\n# generate server's cert\n\nIP=$SERVING_IP\nDNS=$SERVING_HOSTNAME\nCN=$SERVING_COMMON_NAME\n\necho \"\nauthorityKeyIdentifier=keyid,issuer\nbasicConstraints=CA:FALSE\nkeyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment\nsubjectAltName = @alt_names\n[alt_names]\nIP.1 = $IP\nDNS.1 = $DNS\n\" > server.cnf\n\nopenssl genrsa -out server.key 2048\n\nopenssl req -new -key server.key -out server.csr -subj \"/C=XX/ST=MyST/L=XX/O=HW/OU=gRPC/CN=$CN\"\n\nopenssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 730 -sha256 -extfile server.cnf\n\nrm server.cnf\n\n# generate client's cert\n\nopenssl genrsa -out client.key 2048\n\nopenssl req -new -key client.key -out client.csr -subj \"/C=XX/ST=MyST/L=XX/O=HW/OU=gRPC/CN=client\"\n\nopenssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt -days 730 -sha256"
  },
  {
    "path": "tests/ut/python/tests/common.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving, Common\"\"\"\n\nimport os\nfrom functools import wraps\n\nfrom mindspore_serving import server\nfrom mindspore_serving import log as logger\nfrom mindspore_serving.client import Client\n\nservable_index = 0\n\n\nclass ServingTestBase:\n    def __init__(self):\n        servable_dir = \"serving_python_ut_servables\"\n        self.servable_dir = os.path.join(os.getcwd(), servable_dir)\n        os.system(f\"rm -rf {self.servable_dir}\")\n        global servable_index\n        self.servable_name = \"add_\" + str(servable_index)\n        servable_index += 1\n\n    def init_servable(self, version_number, config_file, model_file=\"tensor_add.mindir\"):\n        cur_dir = os.path.dirname(os.path.abspath(__file__))\n        config_file_abs = os.path.join(os.path.join(cur_dir, \"../servable_config/\"), config_file)\n        try:\n            with open(config_file_abs, \"r\") as fp:\n                servable_config_content = fp.read()\n        except FileNotFoundError:\n            servable_config_content = None\n        self.init_servable_with_servable_config(version_number, servable_config_content, model_file)\n\n    def init_servable_with_servable_config(self, version_number, servable_config_content,\n                                           model_file=\"tensor_add.mindir\", model_config_file=None):\n        if not isinstance(model_file, (tuple, list)):\n            model_file = (model_file,)\n        self.version_number = version_number\n        self.model_files = model_file\n        self.servable_name_path = os.path.join(self.servable_dir, self.servable_name)\n        self.version_number_path = os.path.join(self.servable_name_path, str(version_number))\n        self.model_files_path = [os.path.join(self.version_number_path, file) for file in model_file]\n\n        try:\n            os.mkdir(self.servable_dir)\n        except FileExistsError:\n            pass\n        try:\n            os.mkdir(self.servable_name_path)\n        except FileExistsError:\n            pass\n        if self.model_files_path and version_number is not None:\n            try:\n                os.mkdir(self.version_number_path)\n            except FileExistsError:\n                pass\n            for file in self.model_files_path:\n                with open(file, \"w\") as fp:\n                    print(\"model content\", file=fp)\n        if servable_config_content is not None:\n            config_file = os.path.join(self.servable_name_path, \"servable_config.py\")\n            with open(config_file, \"w\") as fp:\n                fp.write(servable_config_content)\n\n        if model_config_file is not None:\n            model_config_file_path = os.path.join(self.servable_name_path, model_config_file)\n            with open(model_config_file_path, \"w\") as fp:\n                print(\"model config file\", file=fp)\n\n    def init_distributed_servable(self, servable_config_content, rank_size, rank_table_content):\n        self.version_number = 1\n        self.servable_name_path = os.path.join(self.servable_dir, self.servable_name)\n        self.model_dir = os.path.join(self.servable_dir, \"model_\" + self.servable_name)\n        self.rank_table_content_path = os.path.join(self.servable_dir, self.servable_name + \"_hccl.json\")\n        try:\n            os.mkdir(self.servable_dir)\n        except FileExistsError:\n            pass\n        try:\n            os.mkdir(self.servable_name_path)\n        except FileExistsError:\n            pass\n        try:\n            os.mkdir(self.model_dir)\n        except FileExistsError:\n            pass\n        self.model_file_list = []\n        for i in range(rank_size):\n            model_file_path = os.path.join(self.model_dir, f\"model{i}.mindir\")\n            self.model_file_list.append(model_file_path)\n            with open(model_file_path, \"w\") as fp:\n                print(\"model content\", file=fp)\n        self.group_config_list = []\n        for i in range(rank_size):\n            group_config = os.path.join(self.model_dir, f\"group{i}.pb\")\n            self.group_config_list.append(group_config)\n            with open(group_config, \"w\") as fp:\n                print(\"group config content\", file=fp)\n\n        if servable_config_content is not None:\n            config_file = os.path.join(self.servable_name_path, \"servable_config.py\")\n            with open(config_file, \"w\") as fp:\n                fp.write(servable_config_content)\n\n        if rank_table_content is not None:\n            with open(self.rank_table_content_path, \"w\") as fp:\n                fp.write(rank_table_content)\n\n    @staticmethod\n    def add_on_exit(fun):\n        global exit_fun_list\n        exit_fun_list.append(fun)\n\n\nexit_fun_list = []\nclient_create_list = []\n\n\ndef serving_test(func):\n    @wraps(func)\n    def wrap_test(*args, **kwargs):\n        try:\n            os.environ[\"SERVING_ENABLE_CPU_DEVICE\"] = \"0\"\n            os.environ[\"SERVING_ENABLE_GPU_DEVICE\"] = \"0\"\n            func(*args, **kwargs)\n        except Exception:\n            logger.error(\"Serving test catch exception\")\n            serving_logs_dir = os.path.join(os.getcwd(), \"serving_logs\")\n            os.system(f\"ls -l {serving_logs_dir}/*.log && cat {serving_logs_dir}/*.log\")\n            raise\n        finally:\n            logger.info(\"Serving test begin to clear\")\n            server.master.context.set_max_enqueued_requests(10000)\n            server.stop()\n            global client_create_list\n            for client in client_create_list:\n                del client.stub\n                client.stub = None\n            client_create_list = []\n            global exit_fun_list\n            for fun in exit_fun_list:\n                fun()\n            exit_fun_list = []\n            cwd_dir = os.getcwd()\n            servable_dir = os.path.join(cwd_dir, \"serving_python_ut_servables\")\n            os.system(f\"rm -rf {servable_dir}\")\n            temp_rank_dir = os.path.join(cwd_dir, \"temp_rank_table\")\n            os.system(f\"rm -rf {temp_rank_dir}\")\n            serving_logs_dir = os.path.join(cwd_dir, \"serving_logs\")\n            os.system(f\"rm -rf {serving_logs_dir}\")\n            unix_socket_files_dir = os.path.join(cwd_dir, \"unix_socket_files\")\n            os.system(f\"rm -rf {unix_socket_files_dir}\")\n            unix_socket_files_dir = os.path.join(cwd_dir, \"device_\")\n            os.system(f\"rm -rf {unix_socket_files_dir}*\")\n            os.system(f\"rm -rf *.crt *.key *.csr *.srl\")\n            logger.info(\"Serving test end clear\")\n\n    return wrap_test\n\n\ndef create_client(address, servable_name, method_name, version_number=0, ssl_config=None):\n    client = Client(address, servable_name, method_name, version_number, ssl_config)\n    client_create_list.append(client)\n    return client\n\n\ndef generate_cert(server_ip=\"0.0.0.0\", server_host_name=\"serving\", common_name=\"serving.com\"):\n    cur_dir = os.path.dirname(os.path.abspath(__file__))\n    shell_path = os.path.join(os.path.join(cur_dir, \"../servable_config/\"), \"generate_certs.sh\")\n    os.environ[\"SERVING_IP\"] = server_ip\n    os.environ[\"SERVING_HOSTNAME\"] = server_host_name\n    os.environ[\"SERVING_COMMON_NAME\"] = common_name\n    with open(shell_path, 'r') as f:\n        command = f.read()\n    os.system(command)\n\n\ndef release_client(client):\n    del client.stub\n    client.stub = None\n\n\n# test servable_config.py with client\nservable_config_import = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\"\"\"\n\nservable_config_declare_servable = r\"\"\"\nregister.declare_servable(servable_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\"\"\"\n\nservable_config_preprocess_cast = r\"\"\"\ndef add_trans_datatype(x1, x2):\n    return x1.astype(np.float32), x2.astype(np.float32)\n\"\"\"\n\nservable_config_method_add_common = r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):  # only support float32 inputs\n    y = register.call_servable(x1, x2)\n    return y\n\"\"\"\n\nservable_config_method_add_cast = r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)  # cast input to float32\n    y = register.call_servable(x1, x2)\n    return y\n\"\"\"\n\n\ndef init_add_servable():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += servable_config_method_add_common\n    servable_content += servable_config_method_add_cast\n    base.init_servable_with_servable_config(1, servable_content)\n    return base\n\n\ndef init_str_servable():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += r\"\"\"\ndef preprocess(other):\n    return np.ones([2,2], np.float32), np.ones([2,2], np.float32)\n    \ndef str_concat_postprocess(text1, text2):\n    print(\"text1\", text1, \"text2\", text2)\n    return text1 + text2\n\n@register.register_method(output_names=[\"text\"])\ndef str_concat(text1, text2):\n    text = register.add_stage(str_concat_postprocess, text1, text2, outputs_count=1)\n    return text\n    \ndef str_empty_postprocess(text1, text2):\n    if len(text1) == 0:\n        text = text2\n    else:\n        text = \"\"\n    return text\n\n@register.register_method(output_names=[\"text\"])\ndef str_empty(text1, text2):\n    text = register.add_stage(str_empty_postprocess, text1, text2, outputs_count=1)\n    return text\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    return base\n\n\ndef init_bytes_servable():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += r\"\"\"\ndef preprocess(other):\n    return np.ones([2,2], np.float32), np.ones([2,2], np.float32)\n\ndef bytes_concat_process(text1, text2):\n    text1 = bytes.decode(text1.tobytes()) # bytes decode to str\n    text2 = bytes.decode(text2.tobytes()) # bytes decode to str\n    return str.encode(text1 + text2) # str encode to bytes\n\n@register.register_method(output_names=[\"text\"])\ndef bytes_concat(text1, text2):  \n    text = register.add_stage(bytes_concat_process, text1, text2, outputs_count=1)\n    return text\n\ndef bytes_empty_process(text1, text2):   \n    text1 = bytes.decode(text1.tobytes()) # bytes decode to str\n    text2 = bytes.decode(text2.tobytes()) # bytes decode to str\n    if len(text1) == 0:\n        text = text2\n    else:\n        text = \"\"\n    return str.encode(text) # str encode to bytes\n\n@register.register_method(output_names=[\"text\"])\ndef bytes_empty(text1, text2):\n    text = register.add_stage(bytes_empty_process, text1, text2, outputs_count=1)\n    return text\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    return base\n\n\ndef init_bool_int_float_servable():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += r\"\"\"\ndef bool_process(bool_val):\n    return  ~bool_val\n\n@register.register_method(output_names=[\"value\"])\ndef bool_not(bool_val):\n    value = register.add_stage(bool_process, bool_val, outputs_count=1)\n    return value\n\ndef int_process(int_val):\n    return int_val + 1\n\n@register.register_method(output_names=[\"value\"])\ndef int_plus_1(int_val): \n    value = register.add_stage(int_process, int_val, outputs_count=1)\n    return value\n    \ndef float_process(float_val):\n    value = (float_val + 1).astype(float_val.dtype) # also support float16 input and output\n    return value   \n    \n@register.register_method(output_names=[\"value\"])\ndef float_plus_1(float_val):\n    value = register.add_stage(float_process, float_val, outputs_count=1)\n    return value\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    return base\n\n\ndef start_serving_server(servable_content, model_file=\"tensor_add.mindir\", version_number=1, start_version_number=None,\n                         device_ids=0, num_parallel_workers=0, device_type=None):\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(version_number, servable_content, model_file=model_file)\n    if start_version_number is None:\n        start_version_number = version_number\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=device_ids,\n                                                      version_number=start_version_number,\n                                                      num_parallel_workers=num_parallel_workers,\n                                                      device_type=device_type))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    return base\n"
  },
  {
    "path": "tests/ut/python/tests/common_restful.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving, Common\"\"\"\n\nfrom multiprocessing import Process, Pipe\nimport json\nimport requests\nimport numpy as np\n\nfrom common import init_str_servable, init_bytes_servable, init_bool_int_float_servable\nfrom mindspore_serving import server\n\n\ndef compare_float_value(result, expect):\n    if isinstance(expect, (float, int)):\n        assert isinstance(result, float)\n        assert abs(expect - result) < 0.001\n        return\n    expect = np.array(expect)\n    result = np.array(result)\n    assert (np.abs(expect - result) < 0.001).all()\n\n\ndef create_multi_instances_fp32(instance_count):\n    instances = []\n    # instance 1\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1, 2.2], [3.3, 4.4]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5, 6.6], [7.7, 8.8]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x1\": x1.tolist(), \"x2\": x2.tolist()})\n    return instances, y_data_list\n\n\ndef create_multi_instances_with_batch_fp32(instance_count):\n    instances = []\n    # instance 1\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1], [3.3]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5], [7.7]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x1\": x1.tolist(), \"x2\": x2.tolist()})\n    return instances, y_data_list\n\n\ndef check_number_result(result, y_data_list, output_name=\"y\"):\n    result = result[\"instances\"]\n    assert len(result) == len(y_data_list)\n    for result_item, expected_item in zip(result, y_data_list):\n        result_item = np.array(result_item[output_name])\n        print(\"result\", result_item)\n        print(\"expect:\", expected_item)\n        assert result_item.shape == expected_item.shape\n        assert (np.abs(result_item - expected_item) < 0.001).all()\n\n\ndef post_restful(address, servable_name, method_name, json_instances, version_number=None, verify=\"ca.crt\",\n                 cert=(\"client.crt\", \"client.key\"), https=False, post_payload=None):\n    if not post_payload:\n        instances_map = {\"instances\": json_instances}\n        post_payload = json.dumps(instances_map)\n    print(\"request:\", post_payload[:200])\n    protocol = \"http\"\n    if https:\n        protocol = \"https\"\n\n    def post_request(request_url, post_payload, send_pipe, verify=verify, cert=cert):\n        try:\n            if https:\n                result = requests.post(request_url, data=post_payload, verify=verify, cert=cert)\n            else:\n                result = requests.post(request_url, data=post_payload)\n            print(f\"result inner: {result}\")\n            result = json.loads(result.text)\n            send_pipe.send(result)\n        # pylint: disable=broad-except\n        except Exception as e:\n            print(f\"post failed: {e}\")\n            send_pipe.send(\"post failed\")\n\n    if version_number is not None:\n        request_url = f\"{protocol}://{address}/model/{servable_name}/version/{version_number}:{method_name}\"\n    else:\n        request_url = f\"{protocol}://{address}/model/{servable_name}:{method_name}\"\n    result = None\n    for _ in range(2):\n        send_pipe, recv_pipe = Pipe()\n        sub_process = Process(target=post_request, args=(request_url, post_payload, send_pipe))\n        sub_process.start()\n        sub_process.join()\n        if recv_pipe.poll(0.1):\n            result = recv_pipe.recv()\n            if result != \"post failed\":\n                break\n        else:\n            result = \"post failed\"\n    print(f\"result outer: {result}\")\n    return result\n\n\ndef start_str_restful_server():\n    base = init_str_servable()\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\")\n    return base\n\n\ndef start_bytes_restful_server():\n    base = init_bytes_servable()\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\")\n    return base\n\n\ndef start_bool_int_float_restful_server():\n    base = init_bool_int_float_servable()\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\")\n    return base\n"
  },
  {
    "path": "tests/ut/python/tests/test_distributed_worker.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test distributed worker\"\"\"\nimport logging\nimport os\nimport signal\nimport time\nfrom multiprocessing import Process, Pipe\nimport numpy as np\nimport psutil\n\nfrom common import serving_test, create_client, ServingTestBase\nfrom mindspore_serving.server import distributed\nfrom mindspore_serving import server\n\ndistributed_import = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import distributed\nfrom mindspore_serving.server import register\n\"\"\"\n\ndistributed_declare_servable = r\"\"\"\nmodel = distributed.declare_servable(rank_size=8, stage_size=1, with_batch_dim=False)\n\"\"\"\n\nrank_table_content = r\"\"\"\n{\n  \"version\": \"1.0\", \"server_count\": \"1\",\n  \"server_list\": [\n    {\n      \"server_id\": \"127.0.0.1\",\n      \"device\": [\n        { \"device_id\": \"0\", \"device_ip\": \"192.1.27.6\", \"rank_id\": \"0\" },\n        { \"device_id\": \"1\", \"device_ip\": \"192.2.27.6\", \"rank_id\": \"1\" },\n        { \"device_id\": \"2\", \"device_ip\": \"192.3.27.6\", \"rank_id\": \"2\" },\n        { \"device_id\": \"3\", \"device_ip\": \"192.4.27.6\", \"rank_id\": \"3\" },\n        { \"device_id\": \"4\", \"device_ip\": \"192.1.27.7\", \"rank_id\": \"4\" },\n        { \"device_id\": \"5\", \"device_ip\": \"192.2.27.7\", \"rank_id\": \"5\" },\n        { \"device_id\": \"6\", \"device_ip\": \"192.3.27.7\", \"rank_id\": \"6\" },\n        { \"device_id\": \"7\", \"device_ip\": \"192.4.27.7\", \"rank_id\": \"7\" }\n      ],\n      \"host_nic_ip\": \"reserve\"\n    }\n  ],\n  \"status\": \"completed\"\n}\n\"\"\"\n\n\ndef init_distributed_servable():\n    base = ServingTestBase()\n    servable_content = distributed_import\n    servable_content += distributed_declare_servable\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n\"\"\"\n    base.init_distributed_servable(servable_content, 8, rank_table_content)\n    return base\n\n\ndef start_distributed_grpc_server():\n    base = init_distributed_servable()\n    return base\n\n\ndef start_distributed_worker(base):\n    send_pipe, recv_pipe = Pipe()\n\n    def worker_process(send_pipe):\n        try:\n            distributed.start_servable(base.servable_dir, base.servable_name,\n                                       rank_table_json_file=base.rank_table_content_path,\n                                       distributed_address=\"127.0.0.1:6200\")\n            server.start_grpc_server(\"0.0.0.0:5500\")\n            send_pipe.send(\"Success\")\n        # pylint: disable=broad-except\n        except Exception as e:\n            logging.exception(e)\n            send_pipe.send(e)\n\n    worker = Process(target=worker_process, args=(send_pipe,))\n    worker.start()\n    time.sleep(0.5)  # wait parse rank table ready\n    assert worker.is_alive()\n    return worker, recv_pipe\n\n\ndef wait_worker_registered_ready(worker, recv_pipe):\n    index = 0\n    while index < 100 and worker.is_alive():  # wait max 10 s\n        index += 1\n        if recv_pipe.poll(0.1):\n            msg = recv_pipe.recv()\n            print(f\"Receive worker process msg: {msg} {worker.is_alive()}\")\n            if isinstance(msg, Exception):\n                raise msg\n            break\n\n    if recv_pipe.poll(0.1):\n        msg = recv_pipe.recv()\n        print(f\"Receive worker process msg: {msg} {worker.is_alive()}\")\n        if isinstance(msg, Exception):\n            raise msg\n    assert index < 100\n    assert worker.is_alive()\n\n\ndef start_agents(model_file_list, group_config_list, start_port, dec_key=None, dec_mode='AES-GCM'):\n    send_pipe, recv_pipe = Pipe()\n\n    def agent_process(send_pipe):\n        try:\n            distributed.startup_agents(distributed_address=\"127.0.0.1:6200\", model_files=model_file_list,\n                                       group_config_files=group_config_list, agent_start_port=start_port,\n                                       dec_key=dec_key, dec_mode=dec_mode)\n            send_pipe.send(\"Success\")\n        # pylint: disable=broad-except\n        except Exception as e:\n            logging.exception(e)\n            send_pipe.send(e)\n\n    agent = Process(target=agent_process, args=(send_pipe,))\n    agent.start()\n    index = 0\n    while index < 100 and agent.is_alive():  # wait max 10 s\n        index += 1\n        if recv_pipe.poll(0.1):\n            msg = recv_pipe.recv()\n            print(f\"Receive agent process msg: {msg} {agent.is_alive()}\")\n            if isinstance(msg, Exception):\n                raise msg\n            break\n\n    if recv_pipe.poll(0.1):\n        msg = recv_pipe.recv()\n        print(f\"Receive agent process msg: {msg} {agent.is_alive()}\")\n        if isinstance(msg, Exception):\n            raise msg\n    assert index < 100\n    assert agent.is_alive()\n    return agent\n\n\ndef send_exit(process):\n    if not process.is_alive():\n        return\n    parent_process = psutil.Process(process.pid)\n    child_processes = parent_process.children(recursive=True)\n\n    def children_alive():\n        return any([item.is_running() for item in child_processes])\n\n    os.kill(process.pid, signal.SIGINT)\n    for _ in range(50):  # 50*0.1s\n        if not process.is_alive() and not children_alive():\n            break\n        time.sleep(0.1)\n    for item in child_processes:\n        if item.is_running():\n            os.kill(item.pid, signal.SIGKILL)\n    if process.is_alive():\n        os.kill(process.pid, signal.SIGKILL)\n\n\ndef start_distributed_serving_server():\n    base = start_distributed_grpc_server()\n    worker_process, recv_pipe = start_distributed_worker(base)\n    base.add_on_exit(lambda: send_exit(worker_process))\n    agent_process = start_agents(base.model_file_list, base.group_config_list, 7000)\n    base.add_on_exit(lambda: send_exit(agent_process))\n    wait_worker_registered_ready(worker_process, recv_pipe)\n    return base, worker_process, agent_process\n\n\n@serving_test\ndef test_distributed_worker_worker_exit_success():\n    \"\"\"\n    Feature: distributed serving server\n    Description: Test distributed serving server exit when worker receive signal SIGINT\n    Expectation: When worker receive signal SIGINT, serving server will exit.\n    \"\"\"\n    base, worker_process, agent_process = start_distributed_serving_server()\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    instances = [{}, {}, {}]\n    y_data_list = []\n    for index, instance in enumerate(instances):\n        instance[\"x1\"] = np.array([[1.1, 1.2], [2.2, 2.3]], np.float32) * (index + 1)\n        instance[\"x2\"] = np.array([[3.3, 3.4], [4.4, 4.5]], np.float32) * (index + 1)\n        y_data_list.append((instance[\"x1\"] + instance[\"x2\"]).tolist())\n\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == 3\n    assert result[0][\"y\"].dtype == np.float32\n    assert result[1][\"y\"].dtype == np.float32\n    assert result[2][\"y\"].dtype == np.float32\n    assert result[0][\"y\"].tolist() == y_data_list[0]\n    assert result[1][\"y\"].tolist() == y_data_list[1]\n    assert result[2][\"y\"].tolist() == y_data_list[2]\n\n    # send SIGINT to worker, expect worker and all agents exit\n    agents = psutil.Process(agent_process.pid).children()\n\n    def agents_alive():\n        return any([item.is_running() for item in agents])\n\n    os.kill(worker_process.pid, signal.SIGINT)\n    for _ in range(50):  # 50*0.1s\n        if not worker_process.is_alive() and not agent_process.is_alive() and not agents_alive():\n            break\n        time.sleep(0.1)\n    assert not worker_process.is_alive()\n    assert not agent_process.is_alive()\n    assert not agents_alive()\n\n\n@serving_test\ndef test_distributed_worker_agent_exit_success():\n    \"\"\"\n    Feature: distributed serving server\n    Description: Test distributed serving server exit when agent startup process receive signal SIGINT\n    Expectation: When agent startup process receive signal SIGINT, serving server will exit.\n    \"\"\"\n    base, worker_process, agent_process = start_distributed_serving_server()\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    instances = [{}, {}, {}]\n    y_data_list = []\n    for index, instance in enumerate(instances):\n        instance[\"x1\"] = np.array([[1.1, 1.2], [2.2, 2.3]], np.float32) * (index + 1)\n        instance[\"x2\"] = np.array([[3.3, 3.4], [4.4, 4.5]], np.float32) * (index + 1)\n        y_data_list.append((instance[\"x1\"] + instance[\"x2\"]).tolist())\n\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == 3\n    assert result[0][\"y\"].tolist() == y_data_list[0]\n    assert result[1][\"y\"].tolist() == y_data_list[1]\n    assert result[2][\"y\"].tolist() == y_data_list[2]\n\n    # send SIGINT to worker, expect worker and all agents exit\n    agents = psutil.Process(agent_process.pid).children()\n\n    def agents_alive():\n        return any([item.is_running() for item in agents])\n\n    os.kill(agent_process.pid, signal.SIGINT)\n    for _ in range(50):  # 50*0.1s\n        if not worker_process.is_alive() and not agent_process.is_alive() and not agents_alive():\n            break\n        time.sleep(0.1)\n    assert not worker_process.is_alive()\n    assert not agent_process.is_alive()\n    assert not agents_alive()\n\n\n@serving_test\ndef test_distributed_worker_agent_startup_killed_exit_success():\n    \"\"\"\n    Feature: distributed serving server\n    Description: Test distributed serving server exit when agent start up process killed by signal SIGKILL\n    Expectation: When agent startup process receive signal SIGKILL, serving server will exit.\n    \"\"\"\n    base, worker_process, agent_process = start_distributed_serving_server()\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    instances = [{}, {}, {}]\n    y_data_list = []\n    for index, instance in enumerate(instances):\n        instance[\"x1\"] = np.array([[1.1, 1.2], [2.2, 2.3]], np.float32) * (index + 1)\n        instance[\"x2\"] = np.array([[3.3, 3.4], [4.4, 4.5]], np.float32) * (index + 1)\n        y_data_list.append((instance[\"x1\"] + instance[\"x2\"]).tolist())\n\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == 3\n    assert result[0][\"y\"].tolist() == y_data_list[0]\n    assert result[1][\"y\"].tolist() == y_data_list[1]\n    assert result[2][\"y\"].tolist() == y_data_list[2]\n\n    # send SIGINT to worker, expect worker and all agents exit\n    agents = psutil.Process(agent_process.pid).children()\n\n    def agents_alive():\n        return any([item.is_running() for item in agents])\n\n    os.kill(agent_process.pid, signal.SIGKILL)  # kill msg\n    for _ in range(50):  # 50*0.1s\n        # test agent_process.is_alive() first, it will make agents(children) notify exit of their parent\n        if not agent_process.is_alive() and not worker_process.is_alive() and not agents_alive():\n            break\n        time.sleep(0.1)\n    assert not worker_process.is_alive()\n    assert not agent_process.is_alive()\n    assert not agents_alive()\n\n\n@serving_test\ndef test_distributed_worker_agent_killed_exit_success():\n    \"\"\"\n    Feature: distributed serving server\n    Description: Test distributed serving server exit when one of agents killed by signal SIGKILL\n    Expectation: When one of agent process receive signal SIGKILL, serving server will exit.\n    \"\"\"\n    base, worker_process, agent_process = start_distributed_serving_server()\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    instances = [{}, {}, {}]\n    y_data_list = []\n    for index, instance in enumerate(instances):\n        instance[\"x1\"] = np.array([[1.1, 1.2], [2.2, 2.3]], np.float32) * (index + 1)\n        instance[\"x2\"] = np.array([[3.3, 3.4], [4.4, 4.5]], np.float32) * (index + 1)\n        y_data_list.append((instance[\"x1\"] + instance[\"x2\"]).tolist())\n\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == 3\n    assert result[0][\"y\"].tolist() == y_data_list[0]\n    assert result[1][\"y\"].tolist() == y_data_list[1]\n    assert result[2][\"y\"].tolist() == y_data_list[2]\n\n    # send SIGINT to worker, expect worker and all agents exit\n    agents = psutil.Process(agent_process.pid).children()\n    assert agents\n\n    def agents_alive():\n        return any([item.is_running() for item in agents])\n\n    os.kill(agents[0].pid, signal.SIGKILL)  # kill msg\n    for _ in range(50):  # 50*0.1s\n        if not worker_process.is_alive() and not agent_process.is_alive() and not agents_alive():\n            break\n        time.sleep(0.1)\n\n    assert not worker_process.is_alive()\n    assert not agent_process.is_alive()\n    assert not agents_alive()\n\n\n@serving_test\ndef test_distributed_worker_agent_invalid_model_files_failed():\n    \"\"\"\n    Feature: distributed serving server\n    Description: Test distributed serving server start up when model files are invalid\n    Expectation: serving server raise runtime error.\n    \"\"\"\n    base = start_distributed_grpc_server()\n    worker_process, _ = start_distributed_worker(base)\n    base.add_on_exit(lambda: send_exit(worker_process))\n    base.model_file_list[0] = base.model_file_list[0] + \"_error\"\n    try:\n        start_agents(base.model_file_list, base.group_config_list, 7036)\n        assert False\n    # pylint: disable=broad-except\n    except Exception as e:\n        assert \"Cannot access model file\" in str(e)\n\n\n@serving_test\ndef test_distributed_worker_dec_model_success():\n    \"\"\"\n    Feature: distributed serving server\n    Description: Test distributed serving server with dec models\n    Expectation: serving server running ok.\n    \"\"\"\n    base = start_distributed_grpc_server()\n    worker_process, recv_pipe = start_distributed_worker(base)\n    base.add_on_exit(lambda: send_exit(worker_process))\n    agent_process = start_agents(base.model_file_list, base.group_config_list, 7000, dec_key=('abcd1234' * 3).encode())\n    base.add_on_exit(lambda: send_exit(agent_process))\n    wait_worker_registered_ready(worker_process, recv_pipe)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    instances = [{}, {}, {}]\n    y_data_list = []\n    for index, instance in enumerate(instances):\n        instance[\"x1\"] = np.array([[1.1, 1.2], [2.2, 2.3]], np.float32) * (index + 1)\n        instance[\"x2\"] = np.array([[3.3, 3.4], [4.4, 4.5]], np.float32) * (index + 1)\n        y_data_list.append((instance[\"x1\"] + instance[\"x2\"]).tolist())\n\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == 3\n    assert result[0][\"y\"].dtype == np.float32\n    assert result[1][\"y\"].dtype == np.float32\n    assert result[2][\"y\"].dtype == np.float32\n    assert result[0][\"y\"].tolist() == y_data_list[0]\n    assert result[1][\"y\"].tolist() == y_data_list[1]\n    assert result[2][\"y\"].tolist() == y_data_list[2]\n\n    # send SIGINT to worker, expect worker and all agents exit\n    agents = psutil.Process(agent_process.pid).children()\n\n    def agents_alive():\n        return any([item.is_running() for item in agents])\n\n    os.kill(worker_process.pid, signal.SIGINT)\n    for _ in range(50):  # 50*0.1s\n        if not worker_process.is_alive() and not agent_process.is_alive() and not agents_alive():\n            break\n        time.sleep(0.1)\n    assert not worker_process.is_alive()\n    assert not agent_process.is_alive()\n    assert not agents_alive()\n"
  },
  {
    "path": "tests/ut/python/tests/test_grpc_request.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving with master, worker and client\"\"\"\n\nimport numpy as np\n\nfrom common import init_str_servable, init_bytes_servable, init_bool_int_float_servable\nfrom common import serving_test, create_client\nfrom mindspore_serving import server\n\n\ndef start_str_grpc_server():\n    base = init_str_servable()\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    return base\n\n\ndef start_bytes_grpc_server():\n    base = init_bytes_servable()\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    return base\n\n\ndef start_bool_int_float_grpc_server():\n    base = init_bool_int_float_servable()\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    return base\n\n\n@serving_test\ndef test_grpc_request_str_input_output_success():\n    base = start_str_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = str_a[i]\n        instance[\"text2\"] = str_b[i]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"str_concat\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert result[0][\"text\"] == str_a[0] + str_b[0]\n    assert result[1][\"text\"] == str_a[1] + str_b[1]\n    assert result[2][\"text\"] == str_a[2] + str_b[2]\n\n\n@serving_test\ndef test_grpc_request_empty_str_input_output_success():\n    base = start_str_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = str_a[i]\n        instance[\"text2\"] = str_b[i]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"str_empty\")\n    result = client.infer(instances)\n    assert result[0][\"text\"] == \"\"\n    assert result[1][\"text\"] == \"456\"\n    assert result[2][\"text\"] == \"\"\n\n\n@serving_test\ndef test_grpc_request_str_shape1_list_input_failed():\n    base = start_str_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = [str_a[i]]\n        instance[\"text2\"] = [str_b[i]]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"str_concat\")\n    try:\n        client.infer(instances)\n        assert False\n    except RuntimeError as e:\n        assert \"Not support value type <class 'list'>\" in str(e)\n\n\n@serving_test\ndef test_grpc_request_str_np_1d_array_input_failed():\n    base = start_str_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = np.array([str_a[i], str_a[i]])\n        instance[\"text2\"] = np.array([str_b[i], str_b[i]])\n        print(instance)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"str_concat\")\n    try:\n        client.infer(instances)\n        assert False\n    except RuntimeError as e:\n        assert \"Unknown data type\" in str(e)\n\n\n@serving_test\ndef test_grpc_request_bytes_input_output_success():\n    base = start_bytes_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = str.encode(str_a[i])\n        instance[\"text2\"] = str.encode(str_b[i])\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"bytes_concat\")\n    result = client.infer(instances)\n    assert bytes.decode(result[0][\"text\"]) == str_a[0] + str_b[0]\n    assert bytes.decode(result[1][\"text\"]) == str_a[1] + str_b[1]\n    assert bytes.decode(result[2][\"text\"]) == str_a[2] + str_b[2]\n\n\n@serving_test\ndef test_grpc_request_empty_bytes_input_output_success():\n    base = start_bytes_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = str.encode(str_a[i])\n        instance[\"text2\"] = str.encode(str_b[i])\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"bytes_empty\")\n    result = client.infer(instances)\n    assert bytes.decode(result[0][\"text\"]) == \"\"\n    assert bytes.decode(result[1][\"text\"]) == str_b[1]\n    assert bytes.decode(result[2][\"text\"]) == \"\"\n\n\n@serving_test\ndef test_grpc_request_bytes_1d_array_input_failed():\n    base = start_bytes_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = [str.encode(str_a[i])]\n        instance[\"text2\"] = [str.encode(str_b[i])]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"bytes_concat\")\n    try:\n        client.infer(instances)\n        assert False\n    except RuntimeError as e:\n        assert \"Not support value type <class 'list'>\" in str(e)\n\n\n@serving_test\ndef test_grpc_request_bool_scalar_input_output_success():\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        instance[\"bool_val\"] = (i % 2 == 0)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"bool_not\")\n    result = client.infer(instances)\n    assert not result[0][\"value\"]\n    assert result[1][\"value\"]\n    assert not result[2][\"value\"]\n\n\n@serving_test\ndef test_grpc_request_bool_1d_array_input_output_success():\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i % 2 == 0)\n        val = [val] * i\n        instance[\"bool_val\"] = np.array(val).astype(np.bool)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"bool_not\")\n    result = client.infer(instances)\n    assert result[0][\"value\"].tolist() == []\n    assert result[1][\"value\"].tolist() == [True]\n    assert result[2][\"value\"].tolist() == [False, False]\n\n\n@serving_test\ndef test_grpc_request_bool_2d_array_input_output_success():\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i % 2 == 0)\n        val = [[val] * i] * i\n        if i == 0:\n            val = [[]]\n        instance[\"bool_val\"] = np.array(val).astype(np.bool)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"bool_not\")\n    result = client.infer(instances)\n    assert result[0][\"value\"].tolist() == [[]]\n    assert result[1][\"value\"].tolist() == [[True]]\n    assert result[2][\"value\"].tolist() == [[False, False], [False, False]]\n\n\n@serving_test\ndef test_grpc_request_bool_invalid_2d_array_input_failed():\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i % 2 == 0)\n        val = [[val, val], [val]]\n        instance[\"bool_val\"] = np.array(val)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"bool_not\")\n    try:\n        client.infer(instances)\n        assert False\n    except RuntimeError as e:\n        assert \"Unknown data type object\" in str(e)\n\n\n@serving_test\ndef test_grpc_request_int_scalar_input_output_success():\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i * 2) * (-1 if i % 2 == 0 else 1)  # 0, 2, -4\n        instance[\"int_val\"] = val\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"int_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"] == 1\n    assert result[1][\"value\"] == 3\n    assert result[2][\"value\"] == -3\n\n\ndef common_test_grpc_request_np_int_type_scalar_input_output_success(dtype):\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i * 2) * (-1 if i % 2 == 0 else 1)  # 0, 2, -4\n        instance[\"int_val\"] = dtype(val)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"int_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"] == 1\n    assert result[1][\"value\"] == 3\n    assert result[2][\"value\"] == -3\n\n\n@serving_test\ndef test_grpc_request_np_int8_type_scalar_input_output_success():\n    common_test_grpc_request_np_int_type_scalar_input_output_success(np.int8)\n\n\n@serving_test\ndef test_grpc_request_np_int16_type_scalar_input_output_success():\n    common_test_grpc_request_np_int_type_scalar_input_output_success(np.int16)\n\n\n@serving_test\ndef test_grpc_request_np_int32_type_scalar_input_output_success():\n    common_test_grpc_request_np_int_type_scalar_input_output_success(np.int32)\n\n\n@serving_test\ndef test_grpc_request_np_int64_type_scalar_input_output_success():\n    common_test_grpc_request_np_int_type_scalar_input_output_success(np.int64)\n\n\ndef common_test_grpc_request_np_uint_type_scalar_input_output_success(dtype):\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i * 2)  # 0, 2, 4\n        instance[\"int_val\"] = dtype(val)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"int_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"] == 1\n    assert result[1][\"value\"] == 3\n    assert result[2][\"value\"] == 5\n\n\n@serving_test\ndef test_grpc_request_np_uint8_type_scalar_input_output_success():\n    common_test_grpc_request_np_uint_type_scalar_input_output_success(np.uint8)\n\n\n@serving_test\ndef test_grpc_request_np_uint16_type_scalar_input_output_success():\n    common_test_grpc_request_np_uint_type_scalar_input_output_success(np.uint16)\n\n\n@serving_test\ndef test_grpc_request_np_uint32_type_scalar_input_output_success():\n    common_test_grpc_request_np_uint_type_scalar_input_output_success(np.uint32)\n\n\n@serving_test\ndef test_grpc_request_np_uint64_type_scalar_input_output_success():\n    common_test_grpc_request_np_uint_type_scalar_input_output_success(np.uint64)\n\n\ndef common_test_grpc_request_np_int_type_1d_array_input_output_success(dtype):\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i * 2) * (-1 if i % 2 == 0 else 1)  # 0, 2, -4\n        val = [val] * i\n        instance[\"int_val\"] = np.array(val).astype(dtype)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"int_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"].tolist() == []\n    assert result[1][\"value\"].tolist() == [3]\n    assert result[2][\"value\"].tolist() == [-3, -3]\n\n\n@serving_test\ndef test_grpc_request_np_int8_type_1d_array_input_output_success():\n    common_test_grpc_request_np_int_type_1d_array_input_output_success(np.int8)\n\n\n@serving_test\ndef test_grpc_request_np_int16_type_1d_array_input_output_success():\n    common_test_grpc_request_np_int_type_1d_array_input_output_success(np.int16)\n\n\n@serving_test\ndef test_grpc_request_np_int32_type_1d_array_input_output_success():\n    common_test_grpc_request_np_int_type_1d_array_input_output_success(np.int32)\n\n\n@serving_test\ndef test_grpc_request_np_int64_type_1d_array_input_output_success():\n    common_test_grpc_request_np_int_type_1d_array_input_output_success(np.int64)\n\n\ndef common_test_grpc_request_np_uint_type_1d_array_input_output_success(dtype):\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i * 2)  # 0, 2, 4\n        val = [val] * i\n        instance[\"int_val\"] = np.array(val).astype(dtype)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"int_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"].tolist() == []\n    assert result[1][\"value\"].tolist() == [3]\n    assert result[2][\"value\"].tolist() == [5, 5]\n\n\n@serving_test\ndef test_grpc_request_np_uint8_type_1d_array_input_output_success():\n    common_test_grpc_request_np_uint_type_1d_array_input_output_success(np.uint8)\n\n\n@serving_test\ndef test_grpc_request_np_uint16_type_1d_array_input_output_success():\n    common_test_grpc_request_np_uint_type_1d_array_input_output_success(np.uint16)\n\n\n@serving_test\ndef test_grpc_request_np_uint32_type_1d_array_input_output_success():\n    common_test_grpc_request_np_uint_type_1d_array_input_output_success(np.uint32)\n\n\n@serving_test\ndef test_grpc_request_np_uint64_type_1d_array_input_output_success():\n    common_test_grpc_request_np_uint_type_1d_array_input_output_success(np.uint64)\n\n\ndef common_test_grpc_request_np_int_type_2d_array_input_output_success(dtype):\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i * 2) * (-1 if i % 2 == 0 else 1)  # 0, 2, -4\n        val = [[val] * i] * i\n        if i == 0:\n            val = [[]]\n        instance[\"int_val\"] = np.array(val).astype(dtype)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"int_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"].tolist() == [[]]\n    assert result[1][\"value\"].tolist() == [[3]]\n    assert result[2][\"value\"].tolist() == [[-3, -3], [-3, -3]]\n\n\n@serving_test\ndef test_grpc_request_np_int8_type_2d_array_input_output_success():\n    common_test_grpc_request_np_int_type_2d_array_input_output_success(np.int8)\n\n\n@serving_test\ndef test_grpc_request_np_int16_type_2d_array_input_output_success():\n    common_test_grpc_request_np_int_type_2d_array_input_output_success(np.int16)\n\n\n@serving_test\ndef test_grpc_request_np_int32_type_2d_array_input_output_success():\n    common_test_grpc_request_np_int_type_2d_array_input_output_success(np.int32)\n\n\n@serving_test\ndef test_grpc_request_np_int64_type_2d_array_input_output_success():\n    common_test_grpc_request_np_int_type_2d_array_input_output_success(np.int64)\n\n\ndef common_test_grpc_request_np_uint_type_2d_array_input_output_success(dtype):\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i * 2)  # 0, 2, 4\n        val = [[val] * i] * i\n        if i == 0:\n            val = [[]]\n        instance[\"int_val\"] = np.array(val).astype(dtype)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"int_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"].tolist() == [[]]\n    assert result[1][\"value\"].tolist() == [[3]]\n    assert result[2][\"value\"].tolist() == [[5, 5], [5, 5]]\n\n\n@serving_test\ndef test_grpc_request_np_uint8_type_2d_array_input_output_success():\n    common_test_grpc_request_np_uint_type_2d_array_input_output_success(np.uint8)\n\n\n@serving_test\ndef test_grpc_request_np_uint16_type_2d_array_input_output_success():\n    common_test_grpc_request_np_uint_type_2d_array_input_output_success(np.uint16)\n\n\n@serving_test\ndef test_grpc_request_np_uint32_type_2d_array_input_output_success():\n    common_test_grpc_request_np_uint_type_2d_array_input_output_success(np.uint32)\n\n\n@serving_test\ndef test_grpc_request_np_uint64_type_2d_array_input_output_success():\n    common_test_grpc_request_np_uint_type_2d_array_input_output_success(np.uint64)\n\n\n@serving_test\ndef test_grpc_request_float_scalar_input_output_success():\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        instance[\"float_val\"] = i * 2.2\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"float_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"] == 1\n    assert result[1][\"value\"] == (2.2 + 1)\n    assert result[2][\"value\"] == (4.4 + 1)\n\n\ndef common_test_grpc_request_np_float_type_scalar_input_output_success(dtype):\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = (i * 2.2) * (-1 if i % 2 == 0 else 1)  # 0, 2.2, -4.4\n        val = np.array(val).astype(dtype)\n        y_data_list.append((val + 1).tolist())\n        instance[\"float_val\"] = val\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"float_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"].dtype == dtype\n    assert result[1][\"value\"].dtype == dtype\n    assert result[2][\"value\"].dtype == dtype\n    assert result[0][\"value\"].tolist() == y_data_list[0]\n    assert result[1][\"value\"].tolist() == y_data_list[1]\n    assert result[2][\"value\"].tolist() == y_data_list[2]\n\n\n@serving_test\ndef test_grpc_request_np_float16_scalar_input_output_success():\n    common_test_grpc_request_np_float_type_scalar_input_output_success(np.float16)\n\n\n@serving_test\ndef test_grpc_request_np_float32_scalar_input_output_success():\n    common_test_grpc_request_np_float_type_scalar_input_output_success(np.float32)\n\n\n@serving_test\ndef test_grpc_request_np_float64_scalar_input_output_success():\n    common_test_grpc_request_np_float_type_scalar_input_output_success(np.float64)\n\n\ndef common_test_grpc_request_np_float_type_1d_array_input_output_success(dtype):\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = (i * 2.2) * (-1 if i % 2 == 0 else 1)  # 0, 2.2, -4.4\n        val = [val] * i\n        val = np.array(val).astype(dtype)\n        y_data_list.append((val + 1).tolist())\n        instance[\"float_val\"] = val\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"float_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"].dtype == dtype\n    assert result[1][\"value\"].dtype == dtype\n    assert result[2][\"value\"].dtype == dtype\n    assert result[0][\"value\"].tolist() == y_data_list[0]\n    assert result[1][\"value\"].tolist() == y_data_list[1]\n    assert result[2][\"value\"].tolist() == y_data_list[2]\n\n\n@serving_test\ndef test_grpc_request_np_float16_1d_array_input_output_success():\n    common_test_grpc_request_np_float_type_1d_array_input_output_success(np.float16)\n\n\n@serving_test\ndef test_grpc_request_np_float32_1d_array_input_output_success():\n    common_test_grpc_request_np_float_type_1d_array_input_output_success(np.float32)\n\n\n@serving_test\ndef test_grpc_request_np_float64_1d_array_input_output_success():\n    common_test_grpc_request_np_float_type_1d_array_input_output_success(np.float64)\n\n\ndef common_test_grpc_request_np_float_type_2d_array_input_output_success(dtype):\n    base = start_bool_int_float_grpc_server()\n    # Client\n    instances = [{}, {}, {}]\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = (i * 2.2) * (-1 if i % 2 == 0 else 1)  # 0, 2.2, -4.4\n        val = [[val] * i] * i\n        if i == 0:\n            val = [[]]\n        val = np.array(val).astype(dtype)\n        y_data_list.append((val + 1).tolist())\n        instance[\"float_val\"] = val\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"float_plus_1\")\n    result = client.infer(instances)\n    assert result[0][\"value\"].dtype == dtype\n    assert result[1][\"value\"].dtype == dtype\n    assert result[2][\"value\"].dtype == dtype\n    assert result[0][\"value\"].tolist() == y_data_list[0]\n    assert result[1][\"value\"].tolist() == y_data_list[1]\n    assert result[2][\"value\"].tolist() == y_data_list[2]\n\n\n@serving_test\ndef test_grpc_request_np_float16_2d_array_input_output_success():\n    common_test_grpc_request_np_float_type_2d_array_input_output_success(np.float16)\n\n\n@serving_test\ndef test_grpc_request_np_float32_2d_array_input_output_success():\n    common_test_grpc_request_np_float_type_2d_array_input_output_success(np.float32)\n\n\n@serving_test\ndef test_grpc_request_np_float64_2d_array_input_output_success():\n    common_test_grpc_request_np_float_type_2d_array_input_output_success(np.float64)\n\n\n@serving_test\ndef test_grpc_request_unix_domain_socket_success():\n    base = init_str_servable()\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server_address = \"unix:unix_socket_files/test_grpc_request_unix_domain_socket_success\"\n    server.start_grpc_server(server_address)\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = str_a[i]\n        instance[\"text2\"] = str_b[i]\n\n    client = create_client(server_address, base.servable_name, \"str_concat\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert result[0][\"text\"] == str_a[0] + str_b[0]\n    assert result[1][\"text\"] == str_a[1] + str_b[1]\n    assert result[2][\"text\"] == str_a[2] + str_b[2]\n"
  },
  {
    "path": "tests/ut/python/tests/test_model_call.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving pipeline with client\"\"\"\n\nimport numpy as np\n\nfrom common import start_serving_server\nfrom common import serving_test, create_client\n\n\n@serving_test\ndef test_call_model_two_input_one_output_normal_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y = model.call(x1, x2)\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_two_input_one_output_multi_times_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4):\n    y1 = model.call(x1, x2)\n    y2 = model.call(x3, x4)\n    return y1 + y2\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(call_model, x1, x2, x3, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[2.1, 3.2], [4.3, 5.4]], np.float32)\n    x4 = np.array([[3.5, 4.6], [5.7, 6.8]], np.float32)\n    y = x1 + x2 + x3 + x4\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_two_input_one_output_multi_times_2success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4):\n    y1 = model.call(x1, x2)\n    y2 = model.call(x3, x4)\n    y = model.call(y1, y2)\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(call_model, x1, x2, x3, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[2.1, 3.2], [4.3, 5.4]], np.float32)\n    x4 = np.array([[3.5, 4.6], [5.7, 6.8]], np.float32)\n    y = x1 + x2 + x3 + x4\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_two_input_one_output_batch_call_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4):\n    instances = []\n    instances.append([x1, x2])\n    instances.append((x3, x4))\n    outputs  = model.call(instances) # return [[x1+x2], [x3+x4]]\n    y1 = outputs[0][0]\n    y2 = outputs[1][0]\n    \n    instances = []\n    instances.append((y1, y2))\n    outputs = model.call(instances)\n    y = outputs[0][0]\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(call_model, x1, x2, x3, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[2.1, 3.2], [4.3, 5.4]], np.float32)\n    x4 = np.array([[3.5, 4.6], [5.7, 6.8]], np.float32)\n    y = x1 + x2 + x3 + x4\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_batch_call_one_input_one_output_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add_1_1.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4):\n    instances = []\n    instances.append([x1])\n    instances.append([x2])\n    instances.append([x3])   \n    outputs = model.call(instances)\n    y1 = outputs[0][0]\n    y2 = outputs[1][0]\n    y3 = outputs[2][0]\n    y4 = model.call(x4)\n    return y1+y2+y3+y4\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(call_model, x1, x2, x3, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add_1_1.mindir\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[2.1, 3.2], [4.3, 5.4]], np.float32)\n    x4 = np.array([[3.5, 4.6], [5.7, 6.8]], np.float32)\n    y = x1 + x2 + x3 + x4\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_batch_call_one_input_two_output_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add_1_2.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4):\n    _, y1 = model.call(x1) # one instance\n    _, y2 = model.call(x2) # one instance\n    _, y3 = model.call(x3) # one instance\n    _, y4 = model.call(x4) # one instance\n    return y1+y2+y3+y4\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(call_model, x1, x2, x3, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add_1_2.mindir\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[2.1, 3.2], [4.3, 5.4]], np.float32)\n    x4 = np.array([[3.5, 4.6], [5.7, 6.8]], np.float32)\n    y = x1 + x2 + x3 + x4 + 4\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_batch_call_one_input_two_output_batch_call_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add_1_2.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4):\n    instances = []\n    instances.append([x1]) # one input\n    outputs = model.call(instances) # batch call, one instance\n    _, y1 = outputs[0]\n\n    instances = []\n    instances.append([x2]) # one input\n    outputs = model.call(instances) # batch call, one instance\n    _, y2 = outputs[0]\n    \n    instances = []\n    instances.append([x3]) # one input\n    instances.append([x4])   \n    outputs = model.call(instances) # batch call, two instances\n    _, y3 = outputs[0]\n    _, y4 = outputs[1]\n\n    return y1+y2+y3+y4\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(call_model, x1, x2, x3, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add_1_2.mindir\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[2.1, 3.2], [4.3, 5.4]], np.float32)\n    x4 = np.array([[3.5, 4.6], [5.7, 6.8]], np.float32)\n    y = x1 + x2 + x3 + x4 + 4\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_two_input_one_output_none_instances_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y = model.call()\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"Model(tensor_add.mindir).call() failed: no inputs provided\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_zero_instances_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y = model.call([])\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"Model(tensor_add.mindir).call() failed: Input instances count is 0\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_invalid_inputs_format_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y = model.call([x1, x2]) # expect to be model.call([[x1, x2]])\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"inputs format invalid\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_zero_inputs_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y = model.call([[]])\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 0 of instance 0 is not equal to the inputs count 2 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_data_size_error_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y = model.call(x1, x2)\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2, 3.3], [3.3, 4.4, 5.5]], np.float32)\n    x2 = np.array([[5.5, 6.6, 7.7], [7.7, 8.8, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"Given model input 0 size 24 not match the size 16 defined in model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_data_type_error_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y = model.call(x1, x2)\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.int32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.int32)\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"Given model input 0 data type kMSI_Int32 not match the data type kMSI_Float32 defined in model\" in \\\n           result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_call_batch_data_size_error_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4):\n    instances = []\n    instances.append((x1, x2))\n    instances.append((x3, x4))\n    ys = model.call(instances)\n    return ys[0][0] + ys[1][0]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(call_model, x1, x2, x3, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2, 3.3], [3.3, 4.4, 5.5]], np.float32)\n    x2 = np.array([[5.5, 6.6, 7.7], [7.7, 8.8, 8.8]], np.float32)\n    x3 = np.array([[1.1, 2.2, 3.3], [3.3, 4.4, 5.5]], np.float32)\n    x4 = np.array([[5.5, 6.6, 7.7], [7.7, 8.8, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"Given model input 0 size 24 not match the size 16 defined in model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_call_batch_data_type_error_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4):\n    instances = []\n    instances.append((x1, x2))\n    instances.append((x3, x4))\n    ys = model.call(instances)\n    return ys[0][0] + ys[1][0]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(call_model, x1, x2, x3, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.int32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.int32)\n    x3 = np.array([[1.1, 2.2], [3.3, 4.4]], np.int32)\n    x4 = np.array([[5.5, 6.6], [7.7, 8.8]], np.int32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"Given model input 0 data type kMSI_Int32 not match the data type kMSI_Float32 defined in model\" in \\\n           result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_more_inputs_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3):\n    y = model.call(x1, x2, x3)\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(call_model, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 3 of instance 0 is not equal to the inputs count 2 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_batch_call_more_inputs_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3):\n    y = model.call([[x1, x2, x3]])\n    return y[0][0]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(call_model, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 3 of instance 0 is not equal to the inputs count 2 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_batch_call_more_inputs2_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3):\n    y = model.call([[x1, x2], [x1, x2, x3]])\n    return y[0][0]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(call_model, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 3 of instance 1 is not equal to the inputs count 2 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_less_inputs_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3):\n    y = model.call(x1)\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(call_model, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 1 of instance 0 is not equal to the inputs count 2 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_batch_call_less_inputs_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3):\n    y = model.call([[x1]])\n    return y[0][0]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(call_model, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 1 of instance 0 is not equal to the inputs count 2 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_batch_call_less_inputs2_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3):\n    y = model.call([[x1], [x1, x2]])\n    return y[0][0]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(call_model, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 1 of instance 0 is not equal to the inputs count 2 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_batch_call_less_inputs3_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3):\n    y = model.call([[x1, x2], [x1]])\n    return y[0][0]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(call_model, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 1 of instance 1 is not equal to the inputs count 2 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_invalid_model_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\nmodel_invalid = register.model.Model(\"tensor_add_test.mindir\")\n\ndef call_model(x1, x2):\n    y = model_invalid.call(x1, x2)\n    return y[0][0]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"Model(tensor_add_test.mindir).call() failed: the model is not declared\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_with_stage_model_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y1 = model.call(x1, x2)\n    return y1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y1 = register.add_stage(call_model, x1, x2, outputs_count=1)\n    y2 = register.add_stage(model, y1, x3, outputs_count=1)\n    y3 = register.add_stage(call_model, y2, x4, outputs_count=1)\n    return y3\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[2.1, 3.2], [4.3, 5.4]], np.float32)\n    x4 = np.array([[3.5, 4.6], [5.7, 6.8]], np.float32)\n    y = x1 + x2 + x3 + x4\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_two_input_one_output_invalid_subgraph_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y = model.call(x1, x2, subgraph=1)\n    return y[0][0]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The model does not have subgraph of index 1, the subgraph count of the model is 1\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_two_subgraph_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"], model_format=\"MindIR\", \n                               with_batch_dim=False)\n\ndef call_model(x1, x2, x3):\n    y = model.call(x1, x2, subgraph=0)  # x1+x2\n    y = model.call(y, x3, subgraph=1)   # y-x3\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(call_model, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[7.5, 8.6], [9.7, 10.8]], np.float32)\n    y = x1 + x2 - x3\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_diff_input_output_two_subgraph_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=[\"tensor_add_2_3.mindir\", \"tensor_sub_3_2.mindir\"], model_format=\"MindIR\", \n                               with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4, x5):\n    y1, y2, y3 = model.call(x1, x2, subgraph=0)  # tensor_add_2_3: 2 input, 3 output\n    y4, y5 = model.call(x3, x4, x5, subgraph=1)   # tensor_sub_3_2: 3 input, 2 output\n    return y1+y4\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(call_model, x1, x2, x3, x4, x5, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add_2_3.mindir\", \"tensor_sub_3_2.mindir\"])\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[7.5, 8.6], [9.7, 10.8]], np.float32)\n    x4 = np.array([[8.5, 10.6], [6.7, 12.8]], np.float32)\n    x5 = np.array([[9.5, 11.6], [8.7, 13.8]], np.float32)\n    y = (x1 + x2) + (x3 - x4 - x5)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_diff_input_output_two_subgraph2_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=[\"tensor_add_3_2.mindir\", \"tensor_sub_2_3.mindir\"], model_format=\"MindIR\", \n                               with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4, x5):\n    y1, y2 = model.call(x1, x2, x3, subgraph=0)  # tensor_add_3_2: 3 input, 2 output\n    y3, y4, y5 = model.call(x4, x5, subgraph=1)   # tensor_sub_2_3: 2 input, 3 output\n    return y1+y3\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(call_model, x1, x2, x3, x4, x5, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add_3_2.mindir\", \"tensor_sub_2_3.mindir\"])\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[7.5, 8.6], [9.7, 10.8]], np.float32)\n    x4 = np.array([[8.5, 10.6], [6.7, 12.8]], np.float32)\n    x5 = np.array([[9.5, 11.6], [8.7, 13.8]], np.float32)\n    y = (x1 + x2 + x3) + (x4 - x5)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_diff_input_output_two_subgraph_inputs_count_not_match_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=[\"tensor_add_3_2.mindir\", \"tensor_sub_2_3.mindir\"], model_format=\"MindIR\", \n                               with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4, x5):\n    y1, y2 = model.call(x1, x2, x3, subgraph=0)  # tensor_add_3_2: 3 input, 2 output\n    y3, y4, y5 = model.call(x4, x5, x3, subgraph=1)   # tensor_sub_2_3: 2 input, 3 output\n    return y1+y3\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(call_model, x1, x2, x3, x4, x5, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add_3_2.mindir\", \"tensor_sub_2_3.mindir\"])\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[7.5, 8.6], [9.7, 10.8]], np.float32)\n    x4 = np.array([[8.5, 10.6], [6.7, 12.8]], np.float32)\n    x5 = np.array([[9.5, 11.6], [8.7, 13.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 3 of instance 0 is not equal to the inputs count 2 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_two_input_one_output_two_model_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ndef call_model(x1, x2, x3):\n    y = tensor_add.call(x1, x2)  # x1+x2\n    y = tensor_sub.call(y, x3)   # y-x3\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(call_model, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[7.5, 8.6], [9.7, 10.8]], np.float32)\n    y = x1 + x2 - x3\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_diff_input_output_two_model_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add_2_3.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub_3_2.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4, x5):\n    y1, y2, y3 = tensor_add.call(x1, x2)  # tensor_add_2_3: 2 input, 3 output\n    y4, y5 = tensor_sub.call(x3, x4, x5)   # tensor_sub_3_2: 3 input, 2 output\n    return y1+y4\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(call_model, x1, x2, x3, x4, x5, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add_2_3.mindir\", \"tensor_sub_3_2.mindir\"])\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[7.5, 8.6], [9.7, 10.8]], np.float32)\n    x4 = np.array([[8.5, 10.6], [6.7, 12.8]], np.float32)\n    x5 = np.array([[9.5, 11.6], [8.7, 13.8]], np.float32)\n    y = (x1 + x2) + (x3 - x4 - x5)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_diff_input_output_two_model2_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add_3_2.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub_2_3.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4, x5):\n    y1, y2 = tensor_add.call(x1, x2, x3)  # tensor_add_3_2: 3 input, 2 output\n    y3, y4, y5 = tensor_sub.call(x4, x5)   # tensor_sub_2_3: 2 input, 3 output\n    return y1+y3\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(call_model, x1, x2, x3, x4, x5, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add_3_2.mindir\", \"tensor_sub_2_3.mindir\"])\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[7.5, 8.6], [9.7, 10.8]], np.float32)\n    x4 = np.array([[8.5, 10.6], [6.7, 12.8]], np.float32)\n    x5 = np.array([[9.5, 11.6], [8.7, 13.8]], np.float32)\n    y = (x1 + x2 + x3) + (x4 - x5)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_call_model_diff_input_output_two_model_inputs_count_not_match_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add_3_2.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub_2_3.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2, x3, x4, x5):\n    y1, y2 = tensor_add.call(x1, x2)  # tensor_add_3_2: 3 input, 2 output\n    y3, y4, y5 = tensor_sub.call(x4, x5)   # tensor_sub_2_3: 2 input, 3 output\n    return y1+y3\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(call_model, x1, x2, x3, x4, x5, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add_3_2.mindir\", \"tensor_sub_2_3.mindir\"])\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    x3 = np.array([[7.5, 8.6], [9.7, 10.8]], np.float32)\n    x4 = np.array([[8.5, 10.6], [6.7, 12.8]], np.float32)\n    x5 = np.array([[9.5, 11.6], [8.7, 13.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert \"The inputs count 2 of instance 0 is not equal to the inputs count 3 of the model\" in result[\"error\"]\n\n\n@serving_test\ndef test_call_model_diff_input_output_two_model_with_bach_dim_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add_3_2.mindir\", model_format=\"MindIR\", with_batch_dim=True)\ntensor_sub = register.declare_model(model_file=\"tensor_sub_2_3.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef call_model(x1, x2, x3, x4, x5):\n    y1, y2 = tensor_add.call(x1, x2, x3)  # tensor_add_3_2: 3 input, 2 output\n    y3, y4, y5 = tensor_sub.call(x4, x5)   # tensor_sub_2_3: 2 input, 3 output\n    return y1+y3\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(call_model, x1, x2, x3, x4, x5, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add_3_2.mindir\", \"tensor_sub_2_3.mindir\"])\n    # Client\n    x1 = np.array([[3.3, 4.4]], np.float32)\n    x2 = np.array([[7.7, 8.8]], np.float32)\n    x3 = np.array([[9.7, 10.8]], np.float32)\n    x4 = np.array([[6.7, 12.8]], np.float32)\n    x5 = np.array([[8.7, 13.8]], np.float32)\n    y = (x1 + x2 + x3) + (x4 - x5)\n    instances = [{\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n"
  },
  {
    "path": "tests/ut/python/tests/test_model_context.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Test Model DeviceInfo\"\"\"\n\nimport os\nimport numpy as np\nfrom common import serving_test, start_serving_server, create_client\nfrom mindspore_serving.server.register import Context, GPUDeviceInfo, CPUDeviceInfo\nfrom mindspore_serving.server.register import AscendDeviceInfo, GpuOptions, AclOptions\n\n\n@serving_test\ndef test_model_context_device_info_set_get_success():\n    \"\"\"\n    Feature: Model Device info\n    Description: Test set and get device info\n    Expectation: the values gotten are equal to the values set.\n    \"\"\"\n    try:\n        context = Context(thread_affinity_core_list=1)\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'thread_affinity_core_list' should be tuple/list of int, but actually <class 'int'>\" in str(e)\n    context = Context(thread_num=3, thread_affinity_core_list=[1, 2, 3], enable_parallel=True)\n    model_context = context.model_context\n    assert model_context.thread_num == 3\n    assert set(model_context.thread_affinity_core_list) == {1, 2, 3}\n    assert model_context.enable_parallel == 1\n\n    # declare model and start_servable and load model and build model\n    gpu_device_info = GPUDeviceInfo(precision_mode=\"fp16\")\n    gpu_map = gpu_device_info.context_map\n    assert gpu_map[\"precision_mode\"] == \"fp16\"\n    assert gpu_map[\"device_type\"] == \"gpu\"\n\n    cpu_device_info = CPUDeviceInfo(precision_mode=\"fp16\")\n    cpu_map = cpu_device_info.context_map\n    assert cpu_map[\"precision_mode\"] == \"fp16\"\n    assert cpu_map[\"device_type\"] == \"cpu\"\n\n    ascend_device_info = AscendDeviceInfo(insert_op_cfg_path=\"some path of insert_op_cfg_path\",\n                                          input_format=\"NHWC1C0\",\n                                          input_shape=\"input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1\",\n                                          output_type=\"FP16\",\n                                          precision_mode=\"allow_mix_precision\",\n                                          op_select_impl_mode=\"high_precision\",\n                                          fusion_switch_config_path=\"some path of fusion_switch_config_path\",\n                                          buffer_optimize_mode=\"l1_and_l2_optimize\")\n    ascend310_map = ascend_device_info.context_map\n    assert ascend310_map[\"insert_op_cfg_path\"] == \"some path of insert_op_cfg_path\"\n    assert ascend310_map[\"input_format\"] == \"NHWC1C0\"\n    assert ascend310_map[\"input_shape\"] == \"input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1\"\n    assert ascend310_map[\"output_type\"] == \"FP16\"\n    assert ascend310_map[\"precision_mode\"] == \"allow_mix_precision\"\n    assert ascend310_map[\"op_select_impl_mode\"] == \"high_precision\"\n    assert ascend310_map[\"fusion_switch_config_path\"] == \"some path of fusion_switch_config_path\"\n    assert ascend310_map[\"buffer_optimize_mode\"] == \"l1_and_l2_optimize\"\n    assert ascend310_map[\"device_type\"] == \"ascend\"\n\n    context.append_device_info(gpu_device_info)\n    context.append_device_info(cpu_device_info)\n    context.append_device_info(ascend_device_info)\n\n    assert len(model_context.device_list) == 3\n    assert model_context.device_list[0][\"device_type\"] == \"gpu\"\n    assert model_context.device_list[1][\"precision_mode\"] == \"fp16\"\n    assert model_context.device_list[2][\"precision_mode\"] == \"allow_mix_precision\"\n\n\n@serving_test\ndef test_model_context_device_info_repeat_append_ascend_failed():\n    \"\"\"\n    Feature: Model Device info\n    Description: Repeat append AscendDeviceInfo\n    Expectation: raise RuntimeError\n    \"\"\"\n    context = Context()\n    context.append_device_info(AscendDeviceInfo())\n    try:\n        context.append_device_info(AscendDeviceInfo())\n        assert False\n    except RuntimeError as e:\n        assert \"Device info of type ascend has already been appended\" in str(e)\n\n\n@serving_test\ndef test_model_context_options_set_get_success():\n    \"\"\"\n    Feature: Model options\n    Description: Test set and get options\n    Expectation: the values gotten are equal to the values set.\n    \"\"\"\n    gpu_options = GpuOptions(precision_mode=\"fp16\")\n    gpu_device_list = gpu_options.context.model_context.device_list\n\n    assert gpu_device_list[0][\"device_type\"] == \"gpu\"\n    assert gpu_device_list[0][\"precision_mode\"] == \"fp16\"\n\n    acl_options = AclOptions(insert_op_cfg_path=\"some path of insert_op_cfg_path\",\n                             input_format=\"NHWC1C0\",\n                             input_shape=\"input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1\",\n                             output_type=\"FP16\",\n                             precision_mode=\"allow_mix_precision\",\n                             op_select_impl_mode=\"high_precision\",\n                             fusion_switch_config_path=\"some path of fusion_switch_config_path\",\n                             buffer_optimize_mode=\"l1_and_l2_optimize\")\n    acl_device_list = acl_options.context.model_context.device_list\n\n    assert acl_device_list[0][\"insert_op_cfg_path\"] == \"some path of insert_op_cfg_path\"\n    assert acl_device_list[0][\"input_format\"] == \"NHWC1C0\"\n    assert acl_device_list[0][\"input_shape\"] == \"input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1\"\n    assert acl_device_list[0][\"output_type\"] == \"FP16\"\n    assert acl_device_list[0][\"precision_mode\"] == \"allow_mix_precision\"\n    assert acl_device_list[0][\"op_select_impl_mode\"] == \"high_precision\"\n    assert acl_device_list[0][\"fusion_switch_config_path\"] == \"some path of fusion_switch_config_path\"\n    assert acl_device_list[0][\"buffer_optimize_mode\"] == \"l1_and_l2_optimize\"\n    assert acl_device_list[0][\"device_type\"] == \"ascend\"\n\n\n@serving_test\ndef test_model_context_gpu_device_info_serving_server_success():\n    \"\"\"\n    Feature: Model Device info\n    Description: Test set gpu device info\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nfrom mindspore_serving.server.register import Context, GPUDeviceInfo, CPUDeviceInfo\nfrom mindspore_serving.server.register import AscendDeviceInfo, GpuOptions, AclOptions\n\ncontext = Context()\ncontext.append_device_info(GPUDeviceInfo(precision_mode=\"fp16\"))\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False,\n                               context = context)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_GPU_DEVICE\"] = \"1\"\n    base = start_serving_server(servable_content, device_type=\"GPU\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_model_context_cpu_device_info_serving_server_success():\n    \"\"\"\n    Feature: Model Device info\n    Description: Test set cpu device info\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nfrom mindspore_serving.server.register import Context, GPUDeviceInfo, CPUDeviceInfo\nfrom mindspore_serving.server.register import AscendDeviceInfo, GpuOptions, AclOptions\n\ncontext = Context()\ncontext.append_device_info(CPUDeviceInfo(precision_mode=\"fp16\"))\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False,\n                               context = context)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_CPU_DEVICE\"] = \"1\"\n    base = start_serving_server(servable_content, device_type=\"CPU\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_model_context_ascend_device_info_serving_server_success():\n    \"\"\"\n    Feature: Model Device info\n    Description: Test set ascend device info\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nfrom mindspore_serving.server.register import Context, GPUDeviceInfo, CPUDeviceInfo\nfrom mindspore_serving.server.register import AscendDeviceInfo, GpuOptions, AclOptions\n\ncontext = Context()\ncontext.append_device_info(AscendDeviceInfo(input_format=\"NHWC1C0\"))\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False,\n                               context = context)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, device_type=\"Ascend\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_model_context_all_device_info_serving_server_success():\n    \"\"\"\n    Feature: Model Device info\n    Description: Test set cpu, gpu, ascend device info, and serving select one device info based on inference so\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nfrom mindspore_serving.server.register import Context, GPUDeviceInfo, CPUDeviceInfo\nfrom mindspore_serving.server.register import AscendDeviceInfo, GpuOptions, AclOptions\n\ncontext = Context()\ncontext.append_device_info(AscendDeviceInfo(input_format=\"NHWC1C0\"))\ncontext.append_device_info(GPUDeviceInfo(precision_mode=\"fp16\"))\ncontext.append_device_info(CPUDeviceInfo(precision_mode=\"fp16\"))\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False,\n                               context = context)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_model_context_acl_options_serving_server_success():\n    \"\"\"\n    Feature: Model Device info\n    Description: Test set ascend options\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nfrom mindspore_serving.server.register import Context, GPUDeviceInfo, CPUDeviceInfo\nfrom mindspore_serving.server.register import AscendDeviceInfo, GpuOptions, AclOptions\n\noptions = AclOptions(input_format=\"NHWC1C0\")\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False,\n                               options = options)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_model_context_gpu_options_serving_server_success():\n    \"\"\"\n    Feature: Model Device info\n    Description: Test set gpu options\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nfrom mindspore_serving.server.register import Context, GPUDeviceInfo, CPUDeviceInfo\nfrom mindspore_serving.server.register import AscendDeviceInfo, GpuOptions, AclOptions\n\noptions = GpuOptions(precision_mode=\"fp16\")\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False,\n                               options = options)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_model_context_gpu_options_invalid_parameter_failed():\n    \"\"\"\n    Feature: Model Device info\n    Description: Test set gpu options\n    Expectation: Serving server start failed.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nfrom mindspore_serving.server.register import Context, GPUDeviceInfo, CPUDeviceInfo\nfrom mindspore_serving.server.register import AscendDeviceInfo, GpuOptions, AclOptions\n\noptions = GpuOptions(precision_mode=\"origi\")\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False,\n                               options = options)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"Gpu device info 'precision_mode' can only be 'origin', 'fp16'\" in str(e)\n\n\n@serving_test\ndef test_model_context_gpu_options_invalid_parameter2_failed():\n    \"\"\"\n    Feature: Model Device info\n    Description: Test set gpu options\n    Expectation: Serving server start failed.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nfrom mindspore_serving.server.register import Context, GPUDeviceInfo, CPUDeviceInfo\nfrom mindspore_serving.server.register import AscendDeviceInfo, GpuOptions, AclOptions\n\noptions = GpuOptions(precision_xxx_mode=\"origin\")\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False,\n                               options = options)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"Set gpu device info failed, unsupported option precision_xxx_mode\" in str(e)\n\n\n@serving_test\ndef test_model_context_gpu_cpu_device_device_ids_none_serving_server_success():\n    \"\"\"\n    Feature: Model Device info\n    Description: device_ids=None, and support GPU, CPU, running on CPU\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_GPU_DEVICE\"] = \"1\"\n    os.environ[\"SERVING_ENABLE_CPU_DEVICE\"] = \"1\"\n    base = start_serving_server(servable_content, device_type=None, device_ids=None)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_model_context_only_support_gpu_device_device_ids_none_serving_server_failed():\n    \"\"\"\n    Feature: Model Device info\n    Description: device_ids=None, and only support GPU, running on CPU failed\n    Expectation: Serving server startup failed.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_GPU_DEVICE\"] = \"1\"\n    try:\n        start_serving_server(servable_content, device_type=None, device_ids=None)\n    except RuntimeError as e:\n        assert \"has models declared by declare_model, but parameter 'device_ids' of ServableStartConfig is not set in\" \\\n               \" Serving startup script when the MindSpore or Lite inference package not support CPU\" in str(e)\n"
  },
  {
    "path": "tests/ut/python/tests/test_multi_model.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\n\nfrom common import serving_test, create_client\nfrom common import start_serving_server\n\n\ndef is_float_equal(left, right):\n    return (np.abs(left - right) < 0.00001).all()\n\n\n@serving_test\ndef test_multi_model_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_sub, y, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 - x3\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_multi_model_2_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_sub, y, x3, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x4, outputs_count=1)\n    y = register.add_stage(tensor_sub, y, x5, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    instances = []\n    ys = []\n    for i in range(10):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[2.5, 3.3], [4.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x5 = np.array([[2.7, 3.8], [4.9, 5.0]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 - x3 + x4 - x5\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    for i in range(10):\n        assert is_float_equal(result[i][\"y\"], ys[i])\n\n\n@serving_test\ndef test_multi_model_with_batch_dim_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\ntensor_sub = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_sub, y, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 - x3\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_multi_model_with_function_front_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef test(x1, x2):\n    return x1+x2+1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(test, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_sub, y, x3, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[2.5, 3.3], [4.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x5 = np.array([[2.7, 3.8], [4.9, 5.0]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 + 1 - x3 + x4\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_multi_model_with_function_tail_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef test(x1, x2):\n    return x1+x2+1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(tensor_sub, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x3, outputs_count=1)\n    y = register.add_stage(test, y, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[2.5, 3.3], [4.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x5 = np.array([[2.7, 3.8], [4.9, 5.0]], np.float32) * 1.1 * (i + 1)\n        y = x1 - x2 + x3 + x4 + 1\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_multi_model_with_function_mid_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef test(x1, x2):\n    return x1+x2+1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(tensor_sub, x1, x2, outputs_count=1)\n    y = register.add_stage(test, y, x3, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[2.5, 3.3], [4.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x5 = np.array([[2.7, 3.8], [4.9, 5.0]], np.float32) * 1.1 * (i + 1)\n        y = x1 - x2 + x3 + 1 + x4\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_multi_model_with_function_interlace_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef test(x1, x2):\n    return x1+x2+1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5, x6):\n    y = register.add_stage(test, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_sub, y, x3, outputs_count=1)\n    y = register.add_stage(test, y, x4, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x5, outputs_count=1)\n    y = register.add_stage(test, y, x6, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[2.5, 3.3], [4.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x5 = np.array([[2.7, 3.8], [4.9, 5.0]], np.float32) * 1.1 * (i + 1)\n        x6 = np.array([[3.7, 4.8], [5.9, 6.0]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 - x3 + x4 + x5 + x6 + 3\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5, \"x6\": x6})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_multi_model_with_function_call_model_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\ntensor_sub = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef add_test(x1, x2):\n    return tensor_add.call(x1, x2)\n    \ndef sub_test(x1, x2):\n    return tensor_sub.call(x1, x2)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    y = register.add_stage(add_test, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_sub, y, x3, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x4, outputs_count=1)\n    y = register.add_stage(sub_test, y, x5, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[2.5, 3.3], [4.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x5 = np.array([[2.7, 3.8], [4.9, 5.0]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 - x3 + x4 - x5\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_multi_model_diff_input_output_count_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add_3_2.mindir\", model_format=\"MindIR\", with_batch_dim=True)\ntensor_sub = register.declare_model(model_file=\"tensor_sub_2_3.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=[\"y1\", \"y2\", \"y3\"])\ndef predict(x1, x2, x3):\n    y1, y2 = register.add_stage(tensor_add, x1, x2, x3, outputs_count=2)\n    y1, y2, y3 = register.add_stage(tensor_sub, y1, y2, outputs_count=3)\n    return y1, y2, y3\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[\"tensor_add_3_2.mindir\", \"tensor_sub_2_3.mindir\"])\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        # for tensor_add_3_2\n        y1 = x1 + x2 + x3\n        y2 = y1 + 1\n        # for tensor_sub_2_3\n        y1 = y1 - y2\n        y2 = y1 + 1\n        y3 = y1 + 2\n\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n        ys.append([y1, y2, y3])\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y1\"], ys[0][0])\n    assert is_float_equal(result[0][\"y2\"], ys[0][1])\n    assert is_float_equal(result[0][\"y3\"], ys[0][2])\n\n    assert is_float_equal(result[1][\"y1\"], ys[1][0])\n    assert is_float_equal(result[1][\"y2\"], ys[1][1])\n    assert is_float_equal(result[1][\"y3\"], ys[1][2])\n\n    assert is_float_equal(result[2][\"y1\"], ys[2][0])\n    assert is_float_equal(result[2][\"y2\"], ys[2][1])\n    assert is_float_equal(result[2][\"y3\"], ys[2][2])\n"
  },
  {
    "path": "tests/ut/python/tests/test_python_parallel.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\n\nimport numpy as np\n\nfrom common import ServingTestBase\nfrom common import serving_test, create_client\nfrom mindspore_serving import server\n\n\ndef start_serving_server(servable_content, model_file=\"tensor_add.mindir\", parallel_number=0, device_ids=0):\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content, model_file=model_file)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=device_ids,\n                                                      num_parallel_workers=parallel_number, version_number=1))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    return base\n\n\ndef is_float_equal(left, right):\n    return (np.abs(left - right) < 0.00001).all()\n\n\ndef check_infer_log(servable_name, version, device_id, extra_id):\n    if device_id is not None:\n        log_file = f\"serving_logs/log_{servable_name}_device{device_id}_version{version}.log\"\n    else:\n        log_file = f\"serving_logs/log_{servable_name}_extra{extra_id}_version{version}.log\"\n    if not os.path.isfile(log_file):\n        print(f\"Not found log file {log_file}\", flush=True)\n        return False\n    with open(log_file) as fp:\n        text = fp.read()\n    if \"WorkerRequestHandle Time Cost\" not in text:\n        print(f\"Not found log 'WorkerRequestHandle Time Cost' in log file {log_file}\", flush=True)\n        return False\n    print(f\"Found log 'WorkerRequestHandle Time Cost' in log file {log_file}\", flush=True)\n    return True\n\n\n@serving_test\ndef test_python_parallel_without_model_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, parallel_number=2, device_ids=0)\n    # Client\n    ys = []\n    instances = []\n    for i in range(20):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2})\n        ys.append(x1 + x2)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    for i in range(len(instances)):\n        assert is_float_equal(result[i][\"y\"], ys[i])\n    assert check_infer_log(base.servable_name, base.version_number, device_id=0, extra_id=None)\n    assert check_infer_log(base.servable_name, base.version_number, device_id=None, extra_id=0)\n\n\n@serving_test\ndef test_python_parallel_with_model_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef function_test(x1, x2):\n    return x1+1, x2+1\n\ndef function_test2(y):\n    return y + 1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    x1, x2 = register.add_stage(function_test, x1, x2, outputs_count=2)\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    y = register.add_stage(function_test2, y, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, parallel_number=2, device_ids=0)\n    # Client\n    ys = []\n    instances = []\n    for i in range(20):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2})\n        ys.append(x1 + x2 + 3)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    for i in range(len(instances)):\n        assert is_float_equal(result[i][\"y\"], ys[i])\n    assert check_infer_log(base.servable_name, base.version_number, device_id=0, extra_id=None)\n    assert check_infer_log(base.servable_name, base.version_number, device_id=None, extra_id=0)\n\n\n@serving_test\ndef test_python_parallel_with_call_model_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef function_call_model(x1, x2):\n    return model.call(x1, x2)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y1 = register.add_stage(function_call_model, x1, x2, outputs_count=1)\n    y2 = register.add_stage(model, x3, x4, outputs_count=1)\n    y = register.add_stage(function_call_model, y1, y2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, parallel_number=2, device_ids=0)\n    # Client\n    ys = []\n    instances = []\n    for i in range(20):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * (i + 1)\n        x3 = np.array([[3.1, 4.2], [5.3, 6.4]], np.float32) * (i + 1)\n        x4 = np.array([[0.5, 9.6], [8.7, 7.8]], np.float32) * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4})\n        y = (x1 + x2) + (x3 + x4)\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    for i in range(len(instances)):\n        assert is_float_equal(result[i][\"y\"], ys[i])\n    assert check_infer_log(base.servable_name, base.version_number, device_id=0, extra_id=None)\n    assert check_infer_log(base.servable_name, base.version_number, device_id=None, extra_id=0)\n\n\n@serving_test\ndef test_python_parallel_with_call_model_multi_process_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef function_call_model(x1, x2):\n    return model.call(x1, x2)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y1 = register.add_stage(function_call_model, x1, x2, outputs_count=1)\n    y2 = register.add_stage(model, x3, x4, outputs_count=1)\n    y = register.add_stage(function_call_model, y1, y2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, parallel_number=4, device_ids=(0, 1))\n    # Client\n    ys = []\n    instances = []\n    for i in range(20):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * (i + 1)\n        x3 = np.array([[3.1, 4.2], [5.3, 6.4]], np.float32) * (i + 1)\n        x4 = np.array([[0.5, 9.6], [8.7, 7.8]], np.float32) * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4})\n        y = (x1 + x2) + (x3 + x4)\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    for i in range(len(instances)):\n        assert is_float_equal(result[i][\"y\"], ys[i])\n    assert check_infer_log(base.servable_name, base.version_number, device_id=0, extra_id=None)\n    assert check_infer_log(base.servable_name, base.version_number, device_id=1, extra_id=None)\n    assert check_infer_log(base.servable_name, base.version_number, device_id=None, extra_id=0)\n    assert check_infer_log(base.servable_name, base.version_number, device_id=None, extra_id=1)\n\n\n@serving_test\ndef test_python_parallel_with_call_model_with_batch_size_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef function_call_model(x1, x2):\n    return model.call(x1, x2)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y1 = register.add_stage(function_call_model, x1, x2, outputs_count=1)\n    y2 = register.add_stage(model, x3, x4, outputs_count=1)\n    y = register.add_stage(function_call_model, y1, y2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, parallel_number=2, device_ids=0)\n    # Client\n    ys = []\n    instances = []\n    for i in range(20):\n        x1 = np.array([[3.3, 4.4]], np.float32) * (i + 1)\n        x2 = np.array([[7.7, 8.8]], np.float32) * (i + 1)\n        x3 = np.array([[5.3, 6.4]], np.float32) * (i + 1)\n        x4 = np.array([[8.7, 7.8]], np.float32) * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4})\n        y = (x1 + x2) + (x3 + x4)\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    for i in range(len(instances)):\n        assert is_float_equal(result[i][\"y\"], ys[i])\n    assert check_infer_log(base.servable_name, base.version_number, device_id=0, extra_id=None)\n    assert check_infer_log(base.servable_name, base.version_number, device_id=None, extra_id=0)\n\n\n@serving_test\ndef test_python_parallel_multi_models_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nadd_model = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\nsub_model = register.declare_model(model_file=\"tensor_sub.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef function_call_model(x1, x2):\n    return add_model.call(x1, x2)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y1 = register.add_stage(add_model, x1, x2, outputs_count=1)\n    y2 = register.add_stage(sub_model, x3, x4, outputs_count=1)\n    y = register.add_stage(function_call_model, y1, y2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, parallel_number=2, device_ids=0,\n                                model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n    # Client\n    ys = []\n    instances = []\n    for i in range(20):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * (i + 1)\n        x3 = np.array([[3.1, 4.2], [5.3, 6.4]], np.float32) * (i + 1)\n        x4 = np.array([[0.5, 9.6], [8.7, 7.8]], np.float32) * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4})\n        y = (x1 + x2) + (x3 - x4)\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    for i in range(len(instances)):\n        assert is_float_equal(result[i][\"y\"], ys[i])\n    assert check_infer_log(base.servable_name, base.version_number, device_id=0, extra_id=None)\n    assert check_infer_log(base.servable_name, base.version_number, device_id=None, extra_id=0)\n\n\n@serving_test\ndef test_python_parallel_multi_models_diff_input_output_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nadd_model = register.declare_model(model_file=\"tensor_add_2_3.mindir\", model_format=\"MindIR\", with_batch_dim=False)\nsub_model = register.declare_model(model_file=\"tensor_sub_3_2.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef function_call_model(x1, x2):\n    return x1 + x2\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5):\n    _,y1,_ = register.add_stage(add_model, x1, x2, outputs_count=3) # 2 input, 3 output\n    _, y2 = register.add_stage(sub_model, x3, x4, x5, outputs_count=2) # 3 input, 2 output\n    y = register.add_stage(function_call_model, y1, y2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, parallel_number=2, device_ids=0,\n                                model_file=[\"tensor_add_2_3.mindir\", \"tensor_sub_3_2.mindir\"])\n    # Client\n    ys = []\n    instances = []\n    for i in range(20):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * (i + 1)\n        x3 = np.array([[3.1, 4.2], [5.3, 6.4]], np.float32) * (i + 1)\n        x4 = np.array([[0.5, 9.6], [8.7, 7.8]], np.float32) * (i + 1)\n        x5 = np.array([[0.2, 9.5], [8.2, 7.1]], np.float32) * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5})\n        y = (x1 + x2 + 1) + (x3 - x4 - x5 + 1)\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    for i in range(len(instances)):\n        assert is_float_equal(result[i][\"y\"], ys[i])\n    assert check_infer_log(base.servable_name, base.version_number, device_id=0, extra_id=None)\n    assert check_infer_log(base.servable_name, base.version_number, device_id=None, extra_id=0)\n"
  },
  {
    "path": "tests/ut/python/tests/test_register_method.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving pipeline with client\"\"\"\n\nimport numpy as np\n\nfrom common import ServingTestBase\nfrom common import serving_test, create_client\nfrom common import start_serving_server\nfrom mindspore_serving import server\n\n\n@serving_test\ndef test_register_method_with_model_success():\n    \"\"\"\n    Feature: test register method\n    Description: method with only python function stage, python function has model.call\n    Expectation: success to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef call_model(x1, x2):\n    y = model.call(x1, x2)\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(call_model, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, version_number=1, start_version_number=1)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_register_method_without_add_stage_success():\n    \"\"\"\n    Feature: test register method\n    Description: method without any stages\n    Expectation: success to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=[\"x1\", \"x2\"])\ndef predict(x1, x2):\n    return x1, x2\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}] * 3\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert len(result) == 3\n    assert (result[0][\"x1\"] == x1).all()\n    assert (result[0][\"x2\"] == x2).all()\n\n\n@serving_test\ndef test_register_method_without_register_method_failed():\n    \"\"\"\n    Feature: test register method\n    Description: without any methods\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n    \"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"There is no method registered for servable\" in str(e)\n\n\n@serving_test\ndef test_register_method_two_input_one_output_one_model_stage_input_more_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model input count not equal to model stage input count\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"The inputs count 3 in register_method not equal to the count 2 defined in model\" in str(e)\n\n\n@serving_test\ndef test_register_method_two_input_one_output_one_model_stage_input_less_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model input count not equal to model stage input count\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"The inputs count 1 in register_method not equal to the count 2 defined in model\" in str(e)\n\n\n@serving_test\ndef test_register_method_two_input_one_output_one_model_stage_input_less2_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model input count not equal to some model stage input count\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_add, y, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"RegisterInputOutputInfo failed, inputs count 1 not match old count 2\" in str(e)\n\n\n@serving_test\ndef test_register_method_two_input_one_output_one_model_stage_input_less3_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model input count not equal to model stage input count\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, outputs_count=1)\n    return y\n    \n@register.register_method(output_names=\"y\")\ndef predict2(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"RegisterInputOutputInfo failed, inputs count 2 not match old count 1\" in str(e)\n\n\n@serving_test\ndef test_register_method_two_input_one_output_one_model_stage_with_batch_dim_input_more_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model input count not equal to model stage input count, with_batch_dim is True\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, x2, x3, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"The inputs count 3 in register_method not equal to the count 2 defined in model\" in str(e)\n\n\n@serving_test\ndef test_register_method_two_input_one_output_one_model_stage_with_batch_dim_input_less_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model input count not equal to model stage input count, with_batch_dim is True\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"The inputs count 1 in register_method not equal to the count 2 defined in model\" in str(e)\n\n\n@serving_test\ndef test_register_method_two_input_two_output_one_model_stage_output_more_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model output count not equal to model stage output count, with_batch_dim is True\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add_2_2.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2, y3 = register.add_stage(tensor_add, x1, x2, outputs_count=3)\n    return y1, y2\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add_2_2.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"The outputs count 3 in register_method not equal to the count 2 defined in model\" in str(e)\n\n\n@serving_test\ndef test_register_method_three_input_two_output_one_model_stage_output_less_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model output count not equal to model stage output count, with_batch_dim is True\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add_2_3.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(tensor_add, x1, x2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add_2_3.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"The outputs count 2 in register_method not equal to the count 3 defined in model\" in str(e)\n\n\n@serving_test\ndef test_register_method_three_input_two_output_one_model_stage_output_less2_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model output count not equal to some model stage output count, with_batch_dim is True\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add_2_3.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2, y3 = register.add_stage(tensor_add, x1, x2, outputs_count=3)\n    y1, y2 = register.add_stage(tensor_add, y1, y2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add_2_3.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"RegisterInputOutputInfo failed, outputs count 2 not match old count 3\" in str(e)\n\n\n@serving_test\ndef test_register_method_three_input_two_output_one_model_stage_output_less3_failed():\n    \"\"\"\n    Feature: test register method\n    Description: model output count not equal to some model stage output count, with_batch_dim is True\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add_2_3.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2, y3 = register.add_stage(tensor_add, x1, x2, outputs_count=3)\n    return y1, y2\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict2(x1, x2):\n    y1, y2 = register.add_stage(tensor_add, x1, x2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add_2_3.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"RegisterInputOutputInfo failed, outputs count 2 not match old count 3\" in str(e)\n\n\n@serving_test\ndef test_register_method_model_file_repeat_failed():\n    \"\"\"\n    Feature: test register method\n    Description: same model file repeatedly used in diff declare_model\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\ntensor_add2 = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=[\"y\"])\ndef predict(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"model file 'tensor_add.mindir' has already been used\" in str(e)\n\n\n@serving_test\ndef test_register_method_model_file_repeat2_failed():\n    \"\"\"\n    Feature: test register method\n    Description: same model file repeatedly used in diff declare_model\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"], model_format=\"MindIR\")\ntensor_add2 = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\n@register.register_method(output_names=[\"y\"])\ndef predict(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n        assert False\n    except RuntimeError as e:\n        assert \"model file 'tensor_add.mindir' has already been used\" in str(e)\n\n\n@serving_test\ndef test_register_method_model_file_repeat3_failed():\n    \"\"\"\n    Feature: test register method\n    Description: same model file repeatedly used in diff declare_model\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add2 = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\ntensor_add = register.declare_model(model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"], model_format=\"MindIR\")\n\n@register.register_method(output_names=[\"y\"])\ndef predict(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=[\"tensor_add.mindir\", \"tensor_sub.mindir\"])\n        assert False\n    except RuntimeError as e:\n        assert \"model file 'tensor_add.mindir' has already been used\" in str(e)\n\n\n@serving_test\ndef test_register_method_method_registered_repeat_failed():\n    \"\"\"\n    Feature: test register method\n    Description: methods with same name\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"Method add_cast has been registered more than once.\" in str(e)\n\n\n@serving_test\ndef test_register_method_input_arg_invalid_failed():\n    \"\"\"\n    Feature: test register method\n    Description: method input args invalid\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, **x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"'add_cast' input x2 cannot be VAR_KEYWORD !\" in str(e)\n\n\n@serving_test\ndef test_register_method_input_arg_invalid2_failed():\n    \"\"\"\n    Feature: test register method\n    Description: method input args invalid\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, *x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"'add_cast' input x2 cannot be VAR_POSITIONAL !\" in str(e)\n\n\n@serving_test\ndef test_register_method_function_stage_invalid_input_failed():\n    \"\"\"\n    Feature: test register method\n    Description: stage input args invalid\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\ndef func_test(x1, x2):\n    return x1+1, x2+1\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.add_stage(func_test, x1, np.ones([2,2]), outputs_count=2)\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"Each value of parameter *args is a placeholder for data and\" in str(e)\n\n\n@serving_test\ndef test_register_method_function_stage_invalid_input2_failed():\n    \"\"\"\n    Feature: test register method\n    Description: stage input args invalid\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\ndef postprocess(y, data):\n    return y\n    \n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    y = register.add_stage(postprocess, y, np.ones([2,2]), outputs_count=1)\n    return y\n\"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"Each value of parameter *args is a placeholder for data and\" in str(e)\n\n\n@serving_test\ndef test_register_method_model_stage_invalid_input_failed():\n    \"\"\"\n    Feature: test register method\n    Description: stage input args invalid\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(tensor_add, x1, np.ones([2,2]), outputs_count=1)\n    return y\n\"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"Each value of parameter *args is a placeholder for data and\" in str(e)\n\n\n@serving_test\ndef test_register_method_invalid_return_failed():\n    \"\"\"\n    Feature: test register method\n    Description: method return invalid\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n    \n@register.register_method(output_names=[\"y\", \"data\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y, np.ones([2,2])\n\"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"Each value returned is a placeholder for data and must come from the method\" in str(e)\n\n\n@serving_test\ndef test_register_method_function_stage_batch_input_count_not_same_failed():\n    \"\"\"\n    Feature: test register method\n    Description: function stage input count diff in diff method\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1, y2])\n    return results\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    x1, x2 = register.add_stage(func_test_batch, x1, x2, outputs_count=2, batch_size=4)\n    return y\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast2(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    x1, x2 = register.add_stage(func_test_batch, x1, x2, y, outputs_count=2, batch_size=4)\n    return y\n\"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert f\"'{base.servable_name}.func_test_batch' inputs count 3 \" \\\n               f\"not match last registered count 2\" in str(e)\n\n\n@serving_test\ndef test_register_method_function_stage_batch_input_count_not_same2_failed():\n    \"\"\"\n    Feature: test register method\n    Description: function stage input count diff in diff method\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1, y2])\n    return results\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.add_stage(func_test_batch, x1, x2, outputs_count=2, batch_size=4)\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast2(x1, x2, x3):\n    x1, x2 = register.add_stage(func_test_batch, x1, x2, x3, outputs_count=2, batch_size=4)\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert f\"'{base.servable_name}.func_test_batch' inputs count 3 \" \\\n               f\"not match last registered count 2\" in str(e)\n\n\n@serving_test\ndef test_register_method_function_stage_batch_output_count_not_same_failed():\n    \"\"\"\n    Feature: test register method\n    Description: function stage output count diff in diff method\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1, y2])\n    return results\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    x1, x2 = register.add_stage(func_test_batch, x1, x2, outputs_count=2, batch_size=4)\n    return y\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast2(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    x1, x2, x3 = register.add_stage(func_test_batch, x1, x2, outputs_count=3, batch_size=4)\n    return y\n\"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert f\"'{base.servable_name}.func_test_batch' outputs count 3 \" \\\n               f\"not match last registered count 2\" in str(e)\n\n\n@serving_test\ndef test_register_method_function_stage_batch_output_count_not_same2_failed():\n    \"\"\"\n    Feature: test register method\n    Description: function stage output count diff in diff method\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1, y2])\n    return results\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.add_stage(func_test_batch, x1, x2, outputs_count=2, batch_size=4)\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast2(x1, x2):\n    x1, x2, x3 = register.add_stage(func_test_batch, x1, x2, outputs_count=3, batch_size=4)\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert f\"'{base.servable_name}.func_test_batch' outputs count 3 \" \\\n               f\"not match last registered count 2\" in str(e)\n\n\n@serving_test\ndef test_register_method_method_output_count_not_match_output_names_failed():\n    \"\"\"\n    Feature: test register method\n    Description: outputs count registered not equal to the count return in function\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y, x2\n\"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"Method return output size 2 not match registered 1\" in str(e)\n\n\n@serving_test\ndef test_register_method_method_python_function_batch_size_exist_inconsistently_failed():\n    \"\"\"\n    Feature: test register method\n    Description: python function used in multi add_stage, one with batch_size, other without batch_size\n    Expectation: failed to start serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\ndef stage_test_fun(x1, x2):\n    return x1+x2\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(stage_test_fun, x1, x2, outputs_count=1)\n    return y\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(stage_test_fun, x1, x2, outputs_count=1, batch_size=4)\n    return y\n\"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"parameter 'batch_size' in multiple 'add_stage' should be enabled or disabled consistently\" in str(e)\n"
  },
  {
    "path": "tests/ut/python/tests/test_restful_base64_data.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving RESTful, with master, worker and client\"\"\"\n\nimport base64\n\nimport numpy as np\n\nfrom common import ServingTestBase, serving_test\nfrom common import servable_config_import, servable_config_declare_servable\nfrom common_restful import compare_float_value, check_number_result, post_restful\nfrom common_restful import start_str_restful_server, start_bytes_restful_server, start_bool_int_float_restful_server\nfrom mindspore_serving import server\n\n\ndef b64_decode_to_str(a):\n    return bytes.decode(base64.b64decode(a[\"b64\"]))\n\n\ndef common_test_restful_base64_str_scalar_input_output_success(shape):\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        if shape is None:\n            instance[\"text1\"] = {\"b64\": base64.b64encode(str.encode(str_a[i])).decode(), \"type\": \"str\"}\n            instance[\"text2\"] = {\"b64\": base64.b64encode(str.encode(str_b[i])).decode(), \"type\": \"str\"}\n        else:\n            instance[\"text1\"] = {\"b64\": base64.b64encode(str.encode(str_a[i])).decode(), \"type\": \"str\",\n                                 'shape': shape}\n            instance[\"text2\"] = {\"b64\": base64.b64encode(str.encode(str_b[i])).decode(), \"type\": \"str\",\n                                 'shape': shape}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"text\"] == str_a[0] + str_b[0]\n    assert result[1][\"text\"] == str_a[1] + str_b[1]\n    assert result[2][\"text\"] == str_a[2] + str_b[2]\n\n\n@serving_test\ndef test_restful_base64_str_scalar_input_output_success():\n    common_test_restful_base64_str_scalar_input_output_success(shape=None)\n\n\n@serving_test\ndef test_restful_base64_str_scalar_shape1_input_output_success():\n    common_test_restful_base64_str_scalar_input_output_success(shape=[1])\n\n\n@serving_test\ndef test_restful_base64_str_scalar_shape_empty_input_output_success():\n    common_test_restful_base64_str_scalar_input_output_success(shape=[])\n\n\n@serving_test\ndef test_restful_base64_empty_str_input_output_success():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = {\"b64\": base64.b64encode(str.encode(str_a[i])).decode(), \"type\": \"str\"}\n        instance[\"text2\"] = {\"b64\": base64.b64encode(str.encode(str_b[i])).decode(), \"type\": \"str\"}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_empty\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"text\"] == \"\"\n    assert result[1][\"text\"] == \"456\"\n    assert result[2][\"text\"] == \"\"\n\n\n@serving_test\ndef test_restful_base64_str_scalar_invalid_shape0_input_failed():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = {\"b64\": base64.b64encode(str.encode(str_a[i])).decode(), \"type\": \"str\", \"shape\": [0]}\n        instance[\"text2\"] = {\"b64\": base64.b64encode(str.encode(str_b[i])).decode(), \"type\": \"str\", \"shape\": [0]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", instances)\n    assert \"only support scalar when data type is string or bytes, please check 'type' or 'shape'\" \\\n           in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_base64_str_scalar_invalid_shape_input_failed():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = {\"b64\": base64.b64encode(str.encode(str_a[i])).decode(), \"type\": \"str\", 'shape': [2]}\n        instance[\"text2\"] = {\"b64\": base64.b64encode(str.encode(str_b[i])).decode(), \"type\": \"str\", 'shape': [2]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", instances)\n    assert \"json object, only support scalar when data type is string or bytes, please check 'type' or 'shape'\" \\\n           in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_base64_str_1d_array_failed():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = [{\"b64\": base64.b64encode(str.encode(str_a[i])).decode(), \"type\": \"str\"},\n                             {\"b64\": base64.b64encode(str.encode(str_a[i])).decode(), \"type\": \"str\"}]\n        instance[\"text2\"] = [{\"b64\": base64.b64encode(str.encode(str_b[i])).decode(), \"type\": \"str\"},\n                             {\"b64\": base64.b64encode(str.encode(str_b[i])).decode(), \"type\": \"str\"}]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", instances)\n    assert \"json array, string or bytes type only support one item\" in str(result[\"error_msg\"])\n\n\ndef common_test_restful_bytes_input_output_success(shape):\n    base = start_bytes_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        if shape is not None:\n            instance[\"text1\"] = {\"b64\": base64.b64encode(str.encode(str_a[i])).decode(), \"shape\": shape}\n            instance[\"text2\"] = {\"b64\": base64.b64encode(str.encode(str_b[i])).decode(), \"shape\": shape}\n        else:\n            instance[\"text1\"] = {\"b64\": base64.b64encode(str.encode(str_a[i])).decode()}\n            instance[\"text2\"] = {\"b64\": base64.b64encode(str.encode(str_b[i])).decode()}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bytes_concat\", instances)\n    result = result[\"instances\"]\n    assert b64_decode_to_str(result[0][\"text\"]) == str_a[0] + str_b[0]\n    assert b64_decode_to_str(result[1][\"text\"]) == str_a[1] + str_b[1]\n    assert b64_decode_to_str(result[2][\"text\"]) == str_a[2] + str_b[2]\n\n\n@serving_test\ndef test_restful_bytes_input_output_success():\n    common_test_restful_bytes_input_output_success(None)\n\n\n@serving_test\ndef test_restful_bytes_empty_shape_success():\n    common_test_restful_bytes_input_output_success([])\n\n\n@serving_test\ndef test_restful_bytes_shape1_success():\n    common_test_restful_bytes_input_output_success([1])\n\n\n@serving_test\ndef test_restful_empty_bytes_input_output_success():\n    base = start_bytes_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = {\"b64\": base64.b64encode(str.encode(str_a[i])).decode()}\n        instance[\"text2\"] = {\"b64\": base64.b64encode(str.encode(str_b[i])).decode()}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bytes_empty\", instances)\n    result = result[\"instances\"]\n    assert b64_decode_to_str(result[0][\"text\"]) == \"\"\n    assert b64_decode_to_str(result[1][\"text\"]) == \"456\"\n    assert b64_decode_to_str(result[2][\"text\"]) == \"\"\n\n\n@serving_test\ndef test_restful_bytes_1d_array_failed():\n    base = start_bytes_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = [{\"b64\": base64.b64encode(str.encode(str_a[i])).decode()},\n                             {\"b64\": base64.b64encode(str.encode(str_a[i])).decode()}]\n        instance[\"text2\"] = [{\"b64\": base64.b64encode(str.encode(str_b[i])).decode()},\n                             {\"b64\": base64.b64encode(str.encode(str_b[i])).decode()}]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bytes_concat\", instances)\n    assert \"json array, string or bytes type only support one item\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_bytes_invalid_shape_input_failed():\n    base = start_bytes_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = {\"b64\": base64.b64encode(str.encode(str_a[i])).decode(), 'shape': [0]}\n        instance[\"text2\"] = {\"b64\": base64.b64encode(str.encode(str_b[i])).decode(), 'shape': [0]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bytes_concat\", instances)\n    assert \"only support scalar when data type is string or bytes, please check 'type' or 'shape'\" \\\n           in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_bool_scalar_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = np.int8(i % 2 == 0)\n        instance[\"bool_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"bool\"}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    result = result[\"instances\"]\n    assert not result[0][\"value\"]\n    assert result[1][\"value\"]\n    assert not result[2][\"value\"]\n\n\n@serving_test\ndef test_restful_base64_bool_1d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = [(i % 2 == 0)] * (i + 1)\n        val = np.array(val)\n        instance[\"bool_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"bool\", \"shape\": [i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [False]\n    assert result[1][\"value\"] == [True, True]\n    assert result[2][\"value\"] == [False, False, False]\n\n\n@serving_test\ndef test_restful_base64_bool_2d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i % 2 == 0)\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val)\n        instance[\"bool_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"bool\",\n                                \"shape\": [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [[False]]\n    assert result[1][\"value\"] == [[True, True], [True, True]]\n    assert result[2][\"value\"] == [[False, False, False], [False, False, False], [False, False, False]]\n\n\n@serving_test\ndef test_restful_base64_int_scalar_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = np.int32(i * 2)\n        instance[\"int_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"int32\"}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == 1\n    assert result[1][\"value\"] == 3\n    assert result[2][\"value\"] == 5\n\n\n@serving_test\ndef test_restful_base64_int_1d_empty_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        if i % 2 == 0:\n            val = []\n        else:\n            val = [i * 2] * (i + 1)\n        val = np.array(val).astype(np.int32)\n        instance[\"int_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"int32\", \"shape\": val.shape}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == []\n    assert result[1][\"value\"] == [3, 3]\n    assert result[2][\"value\"] == []\n\n\n@serving_test\ndef test_restful_base64_int_2d_empty_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        if i % 2 == 0:\n            val = [[]]\n        else:\n            val = [i * 2] * (i + 1)\n        val = np.array(val).astype(np.int32)\n        instance[\"int_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"int32\", \"shape\": val.shape}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [[]]\n    assert result[1][\"value\"] == [3, 3]\n    assert result[2][\"value\"] == [[]]\n\n\n@serving_test\ndef test_restful_base64_int_2d_empty_invalid_shape_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for _, instance in enumerate(instances):\n        val = [[]]\n        val = np.array(val).astype(np.int32)\n        instance[\"int_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"int32\", \"shape\": [1, 2, 0, 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    assert \"json object, key is 'shape', invalid shape value\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_int_1d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = i * 2\n        val = [val] * (i + 1)\n        val = np.array(val).astype(np.int32)\n        instance[\"int_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"int32\", \"shape\": val.shape}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [1]\n    assert result[1][\"value\"] == [3, 3]\n    assert result[2][\"value\"] == [5, 5, 5]\n\n\ndef common_test_restful_base64_int_type_2d_array_input_output_success(dtype):\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    dtype_str_map = {np.int8: \"int8\", np.int16: \"int16\", np.int32: \"int32\", np.int64: \"int64\"}\n    assert dtype in dtype_str_map\n    for i, instance in enumerate(instances):\n        val = (i + 1) * 2 * (-1 if i % 2 == 0 else 1)  # -2, 4, -6\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        instance[\"int_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': dtype_str_map[dtype],\n                               \"shape\": val.shape}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [[-1]]\n    assert result[1][\"value\"] == [[5, 5], [5, 5]]\n    assert result[2][\"value\"] == [[-5, -5, -5], [-5, -5, -5], [-5, -5, -5]]\n\n\n@serving_test\ndef test_restful_base64_int8_2d_array_input_output_success():\n    common_test_restful_base64_int_type_2d_array_input_output_success(np.int8)\n\n\n@serving_test\ndef test_restful_base64_int16_2d_array_input_output_success():\n    common_test_restful_base64_int_type_2d_array_input_output_success(np.int16)\n\n\n@serving_test\ndef test_restful_base64_int32_2d_array_input_output_success():\n    common_test_restful_base64_int_type_2d_array_input_output_success(np.int32)\n\n\n@serving_test\ndef test_restful_base64_int64_2d_array_input_output_success():\n    common_test_restful_base64_int_type_2d_array_input_output_success(np.int64)\n\n\ndef common_test_restful_base64_uint_type_2d_array_input_output_success(dtype):\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    dtype_str_map = {np.uint8: \"uint8\", np.uint16: \"uint16\", np.uint32: \"uint32\", np.uint64: \"uint64\"}\n    assert dtype in dtype_str_map\n    for i, instance in enumerate(instances):\n        val = i * 2\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        instance[\"int_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': dtype_str_map[dtype],\n                               \"shape\": val.shape}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [[1]]\n    assert result[1][\"value\"] == [[3, 3], [3, 3]]\n    assert result[2][\"value\"] == [[5, 5, 5], [5, 5, 5], [5, 5, 5]]\n\n\n@serving_test\ndef test_restful_base64_uint8_2d_array_input_output_success():\n    common_test_restful_base64_uint_type_2d_array_input_output_success(np.uint8)\n\n\n@serving_test\ndef test_restful_base64_uint16_2d_array_input_output_success():\n    common_test_restful_base64_uint_type_2d_array_input_output_success(np.uint16)\n\n\n@serving_test\ndef test_restful_base64_uint32_2d_array_input_output_success():\n    common_test_restful_base64_uint_type_2d_array_input_output_success(np.uint32)\n\n\n@serving_test\ndef test_restful_base64_uint64_2d_array_input_output_success():\n    common_test_restful_base64_uint_type_2d_array_input_output_success(np.uint64)\n\n\n@serving_test\ndef test_restful_base64_float_scalar_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = np.float32(i * 2.2)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp32\"}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == 1.0\n    assert abs(result[1][\"value\"] - (2.2 + 1)) < 0.001\n    assert abs(result[2][\"value\"] - (4.4 + 1)) < 0.001\n\n\n@serving_test\ndef test_restful_base64_float_1d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = [i * 2.2 * (-1 if i % 2 == 0 else 1)] * (i + 1)  # [0], [2.2, 2.2], [-4.4, -4.4, -4.4]\n        val = np.array(val).astype(np.float32)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp32\", 'shape': [i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    check_number_result(result, y_data_list, \"value\")\n\n\ndef common_test_restful_base64_float_type_2d_array_input_output_success(dtype, dtype_str=None):\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n    if dtype_str is None:\n        dtype_str = dtype_str_map[dtype]\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': dtype_str,\n                                 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    check_number_result(result, y_data_list, \"value\")\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_input_output_success():\n    common_test_restful_base64_float_type_2d_array_input_output_success(np.float16)\n\n\n@serving_test\ndef test_restful_base64_float32_2d_array_input_output_success():\n    common_test_restful_base64_float_type_2d_array_input_output_success(np.float32)\n\n\n@serving_test\ndef test_restful_base64_float64_2d_array_input_output_success():\n    common_test_restful_base64_float_type_2d_array_input_output_success(np.float64)\n\n\n@serving_test\ndef test_restful_base64_float16_2_2d_array_input_output_success():\n    common_test_restful_base64_float_type_2d_array_input_output_success(np.float16, \"float16\")\n\n\n@serving_test\ndef test_restful_base64_float32_2_2d_array_input_output_success():\n    common_test_restful_base64_float_type_2d_array_input_output_success(np.float32, \"float32\")\n\n\n@serving_test\ndef test_restful_base64_float64_2_2d_array_input_output_success():\n    common_test_restful_base64_float_type_2d_array_input_output_success(np.float64, \"float64\")\n\n\n@serving_test\ndef test_restful_base64_mix_all_type_success():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += r\"\"\"\ndef func_test(bool_val, int_val, float_val, str_val, bytes_val):\n    return ~bool_val, int_val+1, float_val+1, str_val+\"123\", str.encode(bytes.decode(bytes_val.tobytes()) + \"456\") \n\n@register.register_method(output_names=['bool_val', 'int_val', 'float_val', 'str_val', 'bytes_val'])\ndef mix_all_type(bool_val, int_val, float_val, str_val, bytes_val):\n    bool_val, int_val, float_val, str_val, bytes_val = \\\n        register.add_stage(func_test, bool_val, int_val, float_val, str_val, bytes_val, outputs_count=5)\n    return bool_val, int_val, float_val, str_val, bytes_val\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\")\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        float_val = np.array([2.2, 3.3]).astype(np.float32)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(float_val.tobytes()).decode(), 'type': \"fp32\", 'shape': [2]}\n\n        int_val = np.array([2, 3]).astype(np.int32)\n        instance[\"int_val\"] = {\"b64\": base64.b64encode(int_val.tobytes()).decode(), 'type': \"int32\", 'shape': [2]}\n\n        bool_val = np.array([True, False])\n        instance[\"bool_val\"] = {\"b64\": base64.b64encode(bool_val.tobytes()).decode(), 'type': \"bool\", 'shape': [2]}\n\n        str_val = \"ABC\"\n        instance[\"str_val\"] = {\"b64\": base64.b64encode(str.encode(str_val)).decode(), 'type': \"str\", 'shape': []}\n\n        bytes_val = \"DEF\"\n        instance[\"bytes_val\"] = {\"b64\": base64.b64encode(str.encode(bytes_val)).decode(), 'type': \"bytes\", 'shape': []}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"mix_all_type\", instances)\n    result = result[\"instances\"]\n\n    for i in range(3):\n        compare_float_value(result[i][\"float_val\"], [3.2, 4.3])\n        assert result[i][\"int_val\"] == [3, 4]\n        assert result[i][\"bool_val\"] == [False, True]\n        assert result[i][\"str_val\"] == \"ABC123\"\n        assert b64_decode_to_str(result[i][\"bytes_val\"]) == \"DEF456\"\n\n\n@serving_test\ndef test_restful_base64_without_b64_key_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {'type': dtype_str_map[dtype], 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"'b64' should be specified only one time\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_b64_invalid_type_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {'b64': 123, 'type': dtype_str_map[dtype], 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"get scalar data failed, type is string, but json is not string type\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_b64_invalid_value_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        b64_val = base64.b64encode(val.tobytes()).decode()\n        b64_val = '+==+==' + b64_val[:len('+==+==')]\n        instance[\"float_val\"] = {'b64': b64_val, 'type': dtype_str_map[dtype], 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"is illegal b64 encode string\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_b64_value_empty_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    for i, instance in enumerate(instances):\n        instance[\"float_val\"] = {'b64': \"\", 'type': dtype_str_map[dtype], 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"decode base64 size:0; Given info: type:float16; type size:2; element nums:1\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_dtype_unknow_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"dtype_unknow\",\n                                 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"Parser request failed, json object, specified type:'dtype_unknow' is illegal\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_dtype_empty_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"\",\n                                 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"Parser request failed, json object, specified type:'' is illegal\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_dtype_invalid_type_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': 1,\n                                 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"json object, key is 'type', value should be string type\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_dtype_not_match_empty_data_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = [[]]\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp16\",\n                                 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"Parser request failed, size is not matched\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_dtype_not_match_size_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 2)] * (i + 2)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp32\",\n                                 'shape': [i + 2, i + 2]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"Parser request failed, size is not matched\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_shape_large_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 1)] * (i + 1)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': dtype_str_map[dtype],\n                                 'shape': [i + 2, i + 2]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"Parser request failed, size is not matched\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_shape_small_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 2)] * (i + 2)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': dtype_str_map[dtype],\n                                 'shape': [i + 1, i + 1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"Parser request failed, size is not matched\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_shape_small2_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    dtype_str_map = {np.float16: \"fp16\", np.float32: \"fp32\", np.float64: \"fp64\"}\n    assert dtype in dtype_str_map\n\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 2)] * (i + 2)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': dtype_str_map[dtype],\n                                 'shape': [i + 2, i]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"Parser request failed, size is not matched\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_empty_shape_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 2)] * (i + 2)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp16\",\n                                 'shape': []}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"Parser request failed, size is not matched\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_none_shape_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 2)] * (i + 2)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp16\"}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"Parser request failed, size is not matched\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_invalid_2d_shape_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 2)] * (i + 2)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp16\", \"shape\": [[]]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"json object, key is 'shape', array value should be unsigned integer\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_invalid_shape_str_shape_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 2)] * (i + 2)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp16\", \"shape\": [\"abc\"]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"json object, key is 'shape', array value should be unsigned integer\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_float_shape_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 2)] * (i + 2)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp16\", \"shape\": [1.1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"json object, key is 'shape', array value should be unsigned integer\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_base64_float16_2d_array_negative_shape_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n\n    dtype = np.float16\n    y_data_list = []\n    for i, instance in enumerate(instances):\n        val = i * 2.2 * (-1 if i % 2 == 0 else 1)  # 0, 2.2 ,-4.4\n        val = [[val] * (i + 2)] * (i + 2)\n        val = np.array(val).astype(dtype)\n        y_data_list.append(val + 1)\n        instance[\"float_val\"] = {\"b64\": base64.b64encode(val.tobytes()).decode(), 'type': \"fp16\", \"shape\": [-1]}\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    assert \"json object, key is 'shape', array value should be unsigned integer\" in result[\"error_msg\"]\n"
  },
  {
    "path": "tests/ut/python/tests/test_restful_json_data.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving RESTful, with master, worker and client\"\"\"\n\nimport numpy as np\nfrom common import serving_test\nfrom common_restful import compare_float_value, post_restful\nfrom common_restful import start_str_restful_server, start_bool_int_float_restful_server\n\n\n@serving_test\ndef test_restful_str_scalar_input_output_success():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = str_a[i]\n        instance[\"text2\"] = str_b[i]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"text\"] == str_a[0] + str_b[0]\n    assert result[1][\"text\"] == str_a[1] + str_b[1]\n    assert result[2][\"text\"] == str_a[2] + str_b[2]\n\n\n@serving_test\ndef test_restful_str_scalar_shape1_input_output_success():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = [str_a[i]]\n        instance[\"text2\"] = [str_b[i]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"text\"] == str_a[0] + str_b[0]\n    assert result[1][\"text\"] == str_a[1] + str_b[1]\n    assert result[2][\"text\"] == str_a[2] + str_b[2]\n\n\n@serving_test\ndef test_restful_empty_str_input_output_success():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = str_a[i]\n        instance[\"text2\"] = str_b[i]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_empty\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"text\"] == \"\"\n    assert result[1][\"text\"] == \"456\"\n    assert result[2][\"text\"] == \"\"\n\n\n@serving_test\ndef test_restful_str_2d_array_one_item_input_output_failed():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = [[str_a[i]]]\n        instance[\"text2\"] = [[str_b[i]]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", instances)\n    assert \"bytes or string type input  shape can only be (1,) or empty, but given shape is [1, 1]\" \\\n           in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_str_1d_array_input_failed():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = [str_a[i], str_a[i]]\n        instance[\"text2\"] = [str_b[i], str_b[i]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", instances)\n    assert \"json array, string or bytes type only support one item\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_str_invalid_array_input_failed():\n    base = start_str_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    str_a = [\"ABC\", \"DEF\", \"HIJ\"]\n    str_b = [\"123\", \"456\", \"789\"]\n    for i, instance in enumerate(instances):\n        instance[\"text1\"] = [str_a[i], [str_a[i]]]\n        instance[\"text2\"] = [str_b[i], [str_b[i]]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", instances)\n    assert \"json array, string or bytes type only support one item\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_str_invalid_str_message_failed():\n    base = start_str_restful_server()\n    # Client\n    post_payload = np.array([1.1, 2.2], np.float32).tobytes()\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"str_concat\", None, post_payload=post_payload)\n    assert \"Illegal JSON format\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_bool_scalar_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        instance[\"bool_val\"] = (i % 2 == 0)\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    result = result[\"instances\"]\n    assert not result[0][\"value\"]\n    assert result[1][\"value\"]\n    assert not result[2][\"value\"]\n\n\n@serving_test\ndef test_restful_bool_1d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        instance[\"bool_val\"] = [(i % 2 == 0)] * (i + 1)\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [False]\n    assert result[1][\"value\"] == [True, True]\n    assert result[2][\"value\"] == [False, False, False]\n\n\n@serving_test\ndef test_restful_bool_2d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = (i % 2 == 0)\n        val = [[val] * (i + 1)] * (i + 1)\n        instance[\"bool_val\"] = val\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [[False]]\n    assert result[1][\"value\"] == [[True, True], [True, True]]\n    assert result[2][\"value\"] == [[False, False, False], [False, False, False], [False, False, False]]\n\n\n@serving_test\ndef test_restful_bool_invalid_array_array_scalar_mix_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[False], True]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"invalid json array: json type is not array\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_bool_invalid_array2_scalar_array_mix_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [False, [True]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, data should be number, bool, string or bytes\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_bool_invalid_array3_array_dim_not_match_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[False, True], [True]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"invalid json array: json size is 1, the dim 1 expected to be 2\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_bool_invalid_array4_array_dim_not_match_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[[False, True]], [[True]]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"invalid json array: json size is 1, the dim 2 expected to be 2\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_int_scalar_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = i * 2\n        instance[\"int_val\"] = val\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == 1\n    assert result[1][\"value\"] == 3\n    assert result[2][\"value\"] == 5\n\n\n@serving_test\ndef test_restful_int_empty_input_output_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        if i % 2 == 0:\n            val = []\n        else:\n            val = [i * 2] * (i + 1)\n        instance[\"int_val\"] = val\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    assert \"json array, shape is empty\" in result[\"error_msg\"]\n\n\n@serving_test\ndef test_restful_int_1d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = i * 2\n        val = [val] * (i + 1)\n        instance[\"int_val\"] = val\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [1]\n    assert result[1][\"value\"] == [3, 3]\n    assert result[2][\"value\"] == [5, 5, 5]\n\n\n@serving_test\ndef test_restful_int_2d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = i * 2\n        val = [[val] * (i + 1)] * (i + 1)\n        instance[\"int_val\"] = val\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"int_plus_1\", instances)\n    result = result[\"instances\"]\n    assert result[0][\"value\"] == [[1]]\n    assert result[1][\"value\"] == [[3, 3], [3, 3]]\n    assert result[2][\"value\"] == [[5, 5, 5], [5, 5, 5], [5, 5, 5]]\n\n\n@serving_test\ndef test_restful_float_scalar_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = i * 2.2\n        instance[\"float_val\"] = val\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    result = result[\"instances\"]\n    compare_float_value(result[0][\"value\"], 1.0)\n    compare_float_value(result[1][\"value\"], 2.2 + 1)\n    compare_float_value(result[2][\"value\"], 4.4 + 1)\n\n\n@serving_test\ndef test_restful_float_1d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = [i * 2.2] * (i + 1)\n        instance[\"float_val\"] = val\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    result = result[\"instances\"]\n    compare_float_value(result[0][\"value\"], [1.0])\n    compare_float_value(result[1][\"value\"], [3.2, 3.2])\n    compare_float_value(result[2][\"value\"], [5.4, 5.4, 5.4])\n\n\n@serving_test\ndef test_restful_float_2d_array_input_output_success():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for i, instance in enumerate(instances):\n        val = i * 2.2\n        val = [[val] * (i + 1)] * (i + 1)\n        instance[\"float_val\"] = val\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"float_plus_1\", instances)\n    result = result[\"instances\"]\n    compare_float_value(result[0][\"value\"], [[1.0]])\n    compare_float_value(result[1][\"value\"], [[3.2, 3.2], [3.2, 3.2]])\n    compare_float_value(result[2][\"value\"], [[5.4, 5.4, 5.4], [5.4, 5.4, 5.4], [5.4, 5.4, 5.4]])\n\n\n@serving_test\ndef test_restful_mix_bool_int_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[False, True], [1, 1]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, elements type is not equal\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_bool_int2_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[False, 1]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, elements type is not equal\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_float_int_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[1.1, 1.2], [1, 1]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, elements type is not equal\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_float_int2_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[1.1, 1]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, elements type is not equal\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_int_float_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[1, 1], [1.1, 1.2]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, elements type is not equal\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_int_float2_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[1, 1.2]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, elements type is not equal\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_str_float_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[\"a\", \"b\"], [1.1, 1.2]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"string or bytes type only support one item\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_str_float2_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[\"a\", 1.2]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"string or bytes type only support one item\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_float_str_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[1.1, 1.2], [\"a\", \"b\"]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, elements type is not equal\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_float_str2_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[1.1, \"b\"]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, elements type is not equal\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_bytes_str_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[{\"b64\": \"\"}, {\"b64\": \"\"}], [\"a\", \"b\"]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"string or bytes type only support one item\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_bytes_bool_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[{\"b64\": \"\"}, {\"b64\": \"\"}], [True, False]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"string or bytes type only support one item\" in result['error_msg']\n\n\n@serving_test\ndef test_restful_mix_bool_bytes_input_failed():\n    base = start_bool_int_float_restful_server()\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"bool_val\"] = [[True, False], [{\"b64\": \"\"}, {\"b64\": \"\"}]]\n\n    result = post_restful(\"localhost:5500\", base.servable_name, \"bool_not\", instances)\n    assert \"json array, data should be number, bool, string or bytes\" in result['error_msg']\n"
  },
  {
    "path": "tests/ut/python/tests/test_restful_request.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving RESTful, with master, worker and client\"\"\"\n\nimport json\n\nimport requests\nimport numpy as np\n\nfrom common import ServingTestBase, serving_test, generate_cert\nfrom common_restful import create_multi_instances_fp32, create_multi_instances_with_batch_fp32\nfrom common_restful import check_number_result, post_restful\nfrom mindspore_serving import server\n\n\n@serving_test\ndef test_restful_request_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\")\n    # Client\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"localhost:5500\", base.servable_name, \"add_common\", instances)\n    check_number_result(result, y_data_list)\n\n\n@serving_test\ndef test_https_one_way_auth_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert()\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.key\", custom_ca=\"ca.crt\",\n                                  verify_client=False)\n    server.start_restful_server(\"0.0.0.0:5500\", ssl_config=ssl_config)\n    # Client\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"0.0.0.0:5500\", base.servable_name, \"add_common\", instances, https=True)\n    check_number_result(result, y_data_list)\n\n\n@serving_test\ndef test_https_mutual_auth_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert(server_ip=\"127.0.0.1\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.key\", custom_ca=\"ca.crt\",\n                                  verify_client=True)\n    server.start_restful_server(\"0.0.0.0:5500\", ssl_config=ssl_config)\n    # Client\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"127.0.0.1:5500\", base.servable_name, \"add_common\", instances, https=True)\n    check_number_result(result, y_data_list)\n\n\n@serving_test\ndef test_https_client_auth_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert(server_ip=\"127.0.0.1\")\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.key\", custom_ca=\"ca.crt\",\n                                  verify_client=False)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"127.0.0.1:5500\", ssl_config=ssl_config)\n    # Client\n    instance_count = 3\n    data = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"127.0.0.1:5500\", base.servable_name, \"add_common\", data[0], verify=\"client.crt\",\n                          https=True)\n\n    print(result)\n    assert \"post failed\" in result\n\n\n@serving_test\ndef test_https_missing_cert_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert(server_ip=\"127.0.0.1\")\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.key\", custom_ca=\"ca.crt\",\n                                  verify_client=True)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"127.0.0.1:5500\", ssl_config=ssl_config)\n    # Client\n    instance_count = 3\n    data = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"127.0.0.1:5500\", base.servable_name, \"add_common\", data[0], cert=None, https=True)\n\n    print(result)\n    assert \"post failed\" in result\n\n\n@serving_test\ndef test_https_unmatched_cert_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert(server_ip=\"127.0.0.1\")\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"client.key\", custom_ca=\"ca.crt\",\n                                  verify_client=False)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    try:\n        server.start_restful_server(\"127.0.0.1:5500\", ssl_config=ssl_config)\n        assert False\n    except RuntimeError as e:\n        assert \"Serving Error: load private_key from client.key failed\" in str(e)\n\n\n@serving_test\ndef test_restful_request_multi_times_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\")\n    for instance_count in range(1, 5):\n        instances, y_data_list = create_multi_instances_fp32(instance_count)\n        result = post_restful(\"localhost:5500\", base.servable_name, \"add_common\", instances)\n        check_number_result(result, y_data_list)\n\n\n@serving_test\ndef test_restful_request_multi_times_int32_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\")\n\n    for instance_count in range(1, 5):\n        instances = []\n        # instance 1\n        y_data_list = []\n        for i in range(instance_count):\n            x1 = np.asarray([[1.1, 2.2], [3.3, 4.4]]).astype(np.int32) * (i + 1)\n            x2 = np.asarray([[5.5, 6.6], [7.7, 8.8]]).astype(np.int32) * (i + 1)\n            y_data_list.append((x1 + x2).astype(np.float32))\n            instances.append({\"x1\": x1.tolist(), \"x2\": x2.tolist()})\n        result = post_restful(\"localhost:5500\", base.servable_name, \"add_cast\", instances)\n        check_number_result(result, y_data_list)\n\n\n@serving_test\ndef test_restful_request_servable_invalid_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:5500\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    # Client\n    instance_count = 3\n    instances, _ = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"localhost:5500\", base.servable_name + \"_error\", \"add_common\", instances)\n    assert \"servable is not available\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_request_method_invalid_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:5500\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    # Client\n    instance_count = 3\n    instances, _ = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"localhost:5500\", base.servable_name, \"add_common\" + \"_error\", instances)\n    assert \"method is not available\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_request_with_version_number_0_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:5500\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    # Client\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"localhost:5500\", base.servable_name, \"add_common\", instances, 0)\n    check_number_result(result, y_data_list)\n\n\n@serving_test\ndef test_restful_request_with_version_number_1_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:5500\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    # Client\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"localhost:5500\", base.servable_name, \"add_common\", instances, 1)\n    check_number_result(result, y_data_list)\n\n\n@serving_test\ndef test_restful_request_with_version_number_2_invalid_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:5500\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    # Client\n    instance_count = 3\n    instances, _ = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"localhost:5500\", base.servable_name, \"add_common\", instances, 2)\n    assert \"servable is not available\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_request_version_number_negative_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:5500\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    # Client\n    instance_count = 3\n    instances, _ = create_multi_instances_fp32(instance_count)\n    result = post_restful(\"localhost:5500\", base.servable_name, \"add_common\", instances, -1)\n    assert \"please check url, version number range failed\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_request_without_model_invalid_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:5500\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    # Client\n    instance_count = 3\n    instances, _ = create_multi_instances_fp32(instance_count)\n\n    instances_map = {\"instances\": instances}\n    post_payload = json.dumps(instances_map)\n    print(\"request:\", post_payload)\n    request_url = \"http://localhost:5500/x/:add_common\"\n    result = requests.post(request_url, data=post_payload)\n    print(\"result\", result.text)\n    result = json.loads(result.text)\n    assert \"please check url, the keyword:[model] must contain\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_request_without_method_invalid_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:5500\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    # Client\n    instance_count = 3\n    instances, _ = create_multi_instances_fp32(instance_count)\n\n    instances_map = {\"instances\": instances}\n    post_payload = json.dumps(instances_map)\n    print(\"request:\", post_payload)\n    request_url = f\"http://localhost:5500/model/{base.servable_name}\"\n    result = requests.post(request_url, data=post_payload)\n    print(\"result\", result.text)\n    result = json.loads(result.text)\n    assert \"please check url, the keyword:[service method] must contain.\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_request_servable_version_reverse_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:5500\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    # Client\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n\n    instances_map = {\"instances\": instances}\n    post_payload = json.dumps(instances_map)\n    print(\"request:\", post_payload)\n    request_url = f\"http://localhost:5500/version/0/model/{base.servable_name}:add_common\"\n    result = requests.post(request_url, data=post_payload)\n    print(\"result\", result.text)\n    result = json.loads(result.text)\n    check_number_result(result, y_data_list)\n\n\n@serving_test\ndef test_restful_request_preprocess_raise_exception_with_batch_failed():\n    base = ServingTestBase()\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef add_trans_datatype(x1, x2):\n    raise RuntimeError(\"invalid preprocess\")\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.add_stage(add_trans_datatype, x1, x2, outputs_count=2, tag=\"Preprocess\")  # cast input to float32\n    y = register.add_stage(model, x1, x2, outputs_count=1)    \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\")\n    # Client\n    instance_count = 12\n    instances, _ = create_multi_instances_with_batch_fp32(instance_count)\n    result = post_restful(\"localhost:5500\", base.servable_name, \"add_cast\", instances)\n\n    print(result)\n    assert \"Preprocess Failed\" in str(result[\"error_msg\"])\n\n\n@serving_test\ndef test_restful_request_larger_than_server_receive_max_size():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\", max_msg_mb_size=1)  # 1MB\n    # Client\n    instances = []\n    x1 = np.ones([1024, 1024], np.float32)\n    x2 = np.ones([1024, 1024], np.float32)\n    instances.append({\"x1\": x1.tolist(), \"x2\": x2.tolist()})\n    # more than 1MB msg\n    result = post_restful(\"localhost:5500\", base.servable_name + \"_error\", \"add_common\", instances)\n\n    print(result)\n    assert \"http message is bigger than 1048576\" in str(result[\"error_msg\"])\n"
  },
  {
    "path": "tests/ut/python/tests/test_server_client.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving with master, worker and client\"\"\"\n\nimport os\nimport time\nimport signal\nimport psutil\n\nimport numpy as np\n\nfrom common import ServingTestBase, serving_test, create_client, generate_cert\nfrom common import servable_config_import, servable_config_declare_servable, servable_config_preprocess_cast\nfrom common import servable_config_method_add_common, servable_config_method_add_cast\nfrom common import start_serving_server\nfrom mindspore_serving import server\nfrom mindspore_serving.client import SSLConfig\n\n\ndef create_multi_instances_fp32(instance_count):\n    instances = []\n    # instance 1\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1, 2.2], [3.3, 4.4]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5, 6.6], [7.7, 8.8]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x1\": x1, \"x2\": x2})\n    return instances, y_data_list\n\n\ndef check_result(result, y_data_list):\n    assert len(result) == len(y_data_list)\n    for result_item, y_data in zip(result, y_data_list):\n        assert (result_item[\"y\"] == y_data).all()\n\n\ndef is_float_equal(left, right):\n    return (np.abs(left - right) < 0.00001).all()\n\n\n@serving_test\ndef test_grpc_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = client.infer(instances)\n\n    print(result)\n    check_result(result, y_data_list)\n\n\n@serving_test\ndef test_grpc_multi_times_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    for instance_count in range(1, 5):\n        instances, y_data_list = create_multi_instances_fp32(instance_count)\n        result = client.infer(instances)\n        check_result(result, y_data_list)\n\n\n@serving_test\ndef test_grpc_async_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result_future = client.infer_async(instances)\n    result = result_future.result()\n\n    print(result)\n    check_result(result, y_data_list)\n\n\n@serving_test\ndef test_grpc_async_multi_times_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client, use with avoid affecting the next use case\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    for instance_count in range(1, 5):\n        instances, y_data_list = create_multi_instances_fp32(instance_count)\n        result_future = client.infer_async(instances)\n        result = result_future.result()\n        check_result(result, y_data_list)\n\n\n@serving_test\ndef test_grpc_start_grpc_twice_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    try:\n        server.start_grpc_server(\"0.0.0.0:4500\")\n        assert False\n    except RuntimeError as e:\n        assert \"Serving Error: Serving gRPC server is already running\" in str(e)\n\n\n@serving_test\ndef test_grpc_start_restful_server_twice_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_restful_server(\"0.0.0.0:5500\")\n    try:\n        server.start_restful_server(\"0.0.0.0:4500\")\n        assert False\n    except RuntimeError as e:\n        assert \"Serving Error: RESTful server is already running\" in str(e)\n\n\n@serving_test\ndef test_grpc_alone_repeat_grpc_and_restful_port_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_grpc_server(\"0.0.0.0:7600\")\n    try:\n        server.start_restful_server(\"0.0.0.0:7600\")\n        assert False\n    except RuntimeError as e:\n        assert \"Serving Error: RESTful server start failed, bind to the socket address 0.0.0.0:7600 failed\" in str(e)\n\n\n@serving_test\ndef test_grpc_alone_repeat_grpc_and_restful_port2_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_restful_server(\"0.0.0.0:7600\")\n    try:\n        server.start_grpc_server(\"0.0.0.0:7600\")\n        assert False\n    except RuntimeError as e:\n        assert \"Serving Error: Serving gRPC server start failed, create server failed, address\" in str(e)\n\n\n@serving_test\ndef test_grpc_servable_content_success():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += servable_config_method_add_common\n    servable_content += servable_config_method_add_cast\n\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    result = client.infer(instances)\n\n    print(result)\n    check_result(result, y_data_list)\n\n\n@serving_test\ndef test_grpc_one_way_auth_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert()\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.key\", custom_ca=\"ca.crt\",\n                                  verify_client=False)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\", ssl_config=ssl_config)\n\n    ssl_config = SSLConfig(custom_ca=\"ca.crt\")\n    client = create_client(\"0.0.0.0:5500\", base.servable_name, \"add_common\", ssl_config=ssl_config)\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = client.infer(instances)\n\n    print(result)\n    check_result(result, y_data_list)\n\n\n@serving_test\ndef test_grpc_mutual_auth_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert(server_ip=\"127.0.0.1\")\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.key\", custom_ca=\"ca.crt\",\n                                  verify_client=True)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"127.0.0.1:5500\", ssl_config=ssl_config)\n\n    ssl_config = SSLConfig(certificate=\"client.crt\", private_key=\"client.key\", custom_ca=\"ca.crt\")\n    client = create_client(\"127.0.0.1:5500\", base.servable_name, \"add_common\", ssl_config=ssl_config)\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = client.infer(instances)\n\n    print(result)\n    check_result(result, y_data_list)\n\n\n@serving_test\ndef test_grpc_client_auth_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert(server_ip=\"127.0.0.1\")\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.key\", custom_ca=\"ca.crt\",\n                                  verify_client=False)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"127.0.0.1:5500\", ssl_config=ssl_config)\n\n    ssl_config = SSLConfig(custom_ca=\"client.crt\")\n    client = create_client(\"127.0.0.1:5500\", base.servable_name, \"add_common\", ssl_config=ssl_config)\n    instance_count = 3\n    data = create_multi_instances_fp32(instance_count)\n    result = client.infer(data[0])\n\n    print(result)\n    assert \"unavailable\" in result[\"error\"]\n\n\n@serving_test\ndef test_grpc_missing_cert_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert(server_ip=\"127.0.0.1\")\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.key\", custom_ca=\"ca.crt\",\n                                  verify_client=True)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"127.0.0.1:5500\", ssl_config=ssl_config)\n\n    ssl_config = SSLConfig(custom_ca=\"ca.crt\")\n    client = create_client(\"127.0.0.1:5500\", base.servable_name, \"add_common\", ssl_config=ssl_config)\n    instance_count = 3\n    data = create_multi_instances_fp32(instance_count)\n    result = client.infer(data[0])\n\n    print(result)\n    assert \"unavailable\" in result[\"error\"]\n\n\n@serving_test\ndef test_grpc_unmatched_cert_failed():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    generate_cert(server_ip=\"127.0.0.1\")\n    ssl_config = server.SSLConfig(certificate=\"server.crt\", private_key=\"server.crt\", custom_ca=\"ca.crt\",\n                                  verify_client=True)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    try:\n        server.start_grpc_server(\"127.0.0.1:5500\", ssl_config=ssl_config)\n        assert False\n    except RuntimeError as e:\n        assert \"Serving Error: Serving gRPC server start failed, create server failed, address\" in str(e)\n\n\n@serving_test\ndef test_grpc_preprocess_outputs_count_not_match_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += r\"\"\"\ndef add_trans_datatype(x1, x2):\n    return x1.astype(np.float32)\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)  # cast input to float32\n    y = register.call_servable(x1, x2)    \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    instance_count = 3\n    instances, _ = create_multi_instances_fp32(instance_count)\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_cast\")\n    result = client.infer(instances)\n\n    print(result)\n    assert \"Preprocess Failed\" in str(result[\"error\"]) or \"servable is not available\" in str(result[\"error\"])\n\n\n@serving_test\ndef test_grpc_postprocess_outputs_count_not_match_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += r\"\"\"\ndef add_trans_datatype(x1, x2):\n    return x1.astype(np.float32)\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.call_servable(x1, x2)    \n    y, y2 = register.call_postprocess(add_trans_datatype, y, x2)\n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    instance_count = 3\n    instances, _ = create_multi_instances_fp32(instance_count)\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_cast\")\n    result = client.infer(instances)\n\n    print(result)\n    assert \"Postprocess Failed\" in str(result[\"error\"]) or \"servable is not available\" in str(result[\"error\"])\n\n\n@serving_test\ndef test_grpc_preprocess_update_numpy_success():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += r\"\"\"\ndef preprocess(x3):\n    x3[0] = 123\n    return x3    \n    \ndef postprocess(x3, x4):\n    return x3 + 1, x4 + 2\n\n@register.register_method(output_names=[\"x3\", \"x4\"])\ndef add_cast(x1, x2, x3):\n    x4 = register.call_preprocess(preprocess, x3) # [123, 1, 1], expect x3 is x4, same as python function call\n    y = register.call_servable(x1, x2)    \n    x3, x4 = register.call_postprocess(postprocess, x3, x4)\n    return x3, x4 \n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    instances = [{}, {}, {}]\n    for instance in instances:\n        instance[\"x1\"] = np.ones([2, 2]).astype(np.float32)\n        instance[\"x2\"] = np.ones([2, 2]).astype(np.float32)\n        instance[\"x3\"] = np.ones([3]).astype(np.int32)\n\n    # Client, use with avoid affecting the next use case\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_cast\")\n    result = client.infer(instances)\n    print(result)\n\n    x3 = (np.array([123, 1, 1]) + 1).tolist()\n    x4 = (np.array([123, 1, 1]) + 2).tolist()\n\n    assert result[0][\"x3\"].tolist() == x3\n    assert result[0][\"x4\"].tolist() == x4\n    assert result[1][\"x3\"].tolist() == x3\n    assert result[1][\"x4\"].tolist() == x4\n    assert result[2][\"x3\"].tolist() == x3\n    assert result[2][\"x4\"].tolist() == x4\n\n\n@serving_test\ndef test_grpc_larger_than_server_receive_max_size():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\", max_msg_mb_size=1)  # 1MB\n    # Client\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    instances = []\n    # instance 1\n    y_data_list = []\n    x1 = np.ones([1024, 1024], np.float32)\n    x2 = np.ones([1024, 1024], np.float32)\n    y_data_list.append(x1 + x2)\n    instances.append({\"x1\": x1, \"x2\": x2})\n    result = client.infer(instances)  # more than 1MB msg\n\n    print(result)\n    assert \"Grpc Error, (8, 'resource exhausted')\" in str(result[\"error\"])\n\n\n@serving_test\ndef test_server_client_input_param_less():\n    # fail returned from Worker::RunAsync\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_method_add_common\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    instance_count = 3\n\n    instances = []\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1], [3.3]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5], [7.7]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x3\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    result = client.infer(instances)\n    print(result)\n    assert \"Cannot find input x1 in instance input\" in result[\"error\"]\n\n\n@serving_test\ndef test_server_client_servable_not_available():\n    # fail returned from Worker::RunAsync\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_method_add_common\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    instance_count = 3\n\n    instances = []\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1], [3.3]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5], [7.7]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x3\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name + \"error\", \"add_common\")\n    result = client.infer(instances)\n    print(result)\n    assert \"servable is not available\" in result[\"error\"]\n\n\n@serving_test\ndef test_server_client_max_request_count():\n    # fail returned from Worker::RunAsync\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += r\"\"\"\nimport time\ndef preprocess(x1, x2):\n    time.sleep(1)    \n    return x1, x2\n    \n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):\n    x1, x2 = register.call_preprocess(preprocess, x1, x2)\n    y = register.call_servable(x1, x2)\n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    server.master.context.set_max_enqueued_requests(1)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    x1 = np.asarray([[1.1, 2.2], [3.3, 4.4]]).astype(np.float32)\n    x2 = np.asarray([[5.5, 6.6], [7.7, 8.8]]).astype(np.float32)\n    instance = {\"x1\": x1, \"x2\": x2}\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    result_list = []\n    for _ in range(2):\n        result = client.infer_async(instance)\n        result_list.append(result)\n\n    result0 = result_list[0].result()\n    result1 = result_list[1].result()\n    print(result0)\n    print(result1)\n    assert \"error\" in result0 or \"error\" in result1\n    if \"error\" in result0:\n        assert \"error\" not in result1\n        assert \"Serving Error: enqueued requests count exceeds the limit 1\" in result0[\"error\"]\n    else:\n        assert \"error\" not in result0\n        assert \"Serving Error: enqueued requests count exceeds the limit 1\" in result1[\"error\"]\n\n\n@serving_test\ndef test_server_client_one_model_stage_with_batch_dim_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    x1 = np.array([[3.3, 4.4]], np.float32)\n    x2 = np.array([[7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}] * 3\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n    assert (result[1][\"y\"] == y).all()\n    assert (result[2][\"y\"] == y).all()\n\n\n@serving_test\ndef test_server_client_one_model_stage_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}] * 3\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n    assert (result[1][\"y\"] == y).all()\n    assert (result[2][\"y\"] == y).all()\n\n\n@serving_test\ndef test_server_client_with_batch_dim_data_size_invalid_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    x1 = np.array([[3.3, 4.4]], np.float32)\n    x2 = np.array([[7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}, {\"x1\": x1, \"x2\": x2}, {\"x1\": x1, \"x2\": x2}]\n    instances[1][\"x2\"] = np.array([[7.7, 8.8, 9.9]], np.float32)\n    print(instances)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n    assert \"Given model input 1 size 12 not match the size 8 defined in model\" in result[1][\"error\"]\n    assert (result[2][\"y\"] == y).all()\n\n\n@serving_test\ndef test_server_client_with_batch_dim_data_type_invalid_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    x1 = np.array([[3.3, 4.4]], np.float32)\n    x2 = np.array([[7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}, {\"x1\": x1, \"x2\": x2}, {\"x1\": x1, \"x2\": x2}]\n    instances[1][\"x2\"] = np.array([[7.7, 9.9]], np.int32)\n    print(instances)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n    assert \"Given model input 1 data type kMSI_Int32 not match the data type kMSI_Float32 defined in model\" in \\\n           result[1][\"error\"]\n    assert (result[2][\"y\"] == y).all()\n\n\n@serving_test\ndef test_server_client_data_size_invalid_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}, {\"x1\": x1, \"x2\": x2}, {\"x1\": x1, \"x2\": x2}]\n    instances[1][\"x2\"] = np.array([[5.5, 6.6, 8.8], [7.7, 8.8, 9.9]], np.float32)\n    print(instances)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n    assert \"Given model input 1 size 24 not match the size 16 defined in model\" in result[1][\"error\"]\n    assert (result[2][\"y\"] == y).all()\n\n\n@serving_test\ndef test_server_client_data_type_invalid_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}, {\"x1\": x1, \"x2\": x2}, {\"x1\": x1, \"x2\": x2}]\n    instances[1][\"x2\"] = np.array([[5.5, 6.8], [7.7, 9.9]], np.int32)\n    print(instances)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n    assert \"Given model input 1 data type kMSI_Int32 not match the data type kMSI_Float32 defined in model\" in \\\n           result[1][\"error\"]\n    assert (result[2][\"y\"] == y).all()\n\n\n@serving_test\ndef test_server_client_two_model_stage_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 + x3\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_server_client_one_model_stage_with_function_front_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef add_test(x1, x2):\n    return x1 + x2 + 1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(add_test, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 + x3 + 1\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_server_client_one_model_stage_with_function_tail_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef add_test(x1, x2):\n    return x1 + x2 + 1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    y = register.add_stage(add_test, y, x3, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 + x3 + 1\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_server_client_one_model_stage_with_function_front_and_tail_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef add_test(x1, x2):\n    return x1 + x2 + 1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(add_test, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x3, outputs_count=1)\n    y = register.add_stage(add_test, y, x4, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[3.5, 4.3], [5.2, 6.4]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 + x3 + x4 + 2\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_server_client_one_model_stage_with_function_front_and_tail_double_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef add_test(x1, x2):\n    return x1 + x2 + 1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5, x6):\n    y = register.add_stage(add_test, x1, x2, outputs_count=1)\n    y = register.add_stage(add_test, y, x3, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x4, outputs_count=1)\n    y = register.add_stage(add_test, y, x5, outputs_count=1)\n    y = register.add_stage(add_test, y, x6, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[3.5, 4.3], [5.2, 6.4]], np.float32) * 1.1 * (i + 1)\n        x5 = np.array([[1.5, 2.3], [3.2, 4.4]], np.float32) * 1.1 * (i + 1)\n        x6 = np.array([[5.5, 6.3], [7.2, 8.4]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 + x3 + x4 + x5 + x6 + 4\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5, \"x6\": x6})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_server_client_two_model_stage_with_function_front_and_tail_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef add_test(x1, x2):\n    return x1 + x2 + 1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5, x6):\n    y = register.add_stage(add_test, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x3, outputs_count=1)\n    y = register.add_stage(add_test, y, x4, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x5, outputs_count=1)\n    y = register.add_stage(add_test, y, x6, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[8.5, 7.3], [6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[3.5, 4.3], [5.2, 6.4]], np.float32) * 1.1 * (i + 1)\n        x5 = np.array([[1.5, 2.3], [3.2, 4.4]], np.float32) * 1.1 * (i + 1)\n        x6 = np.array([[5.5, 6.3], [7.2, 8.4]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 + x3 + x4 + x5 + x6 + 3\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5, \"x6\": x6})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_server_client_two_model_stage_with_function_front_and_tail_with_batch_dim_success():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef add_test(x1, x2):\n    return x1 + x2 + 1\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4, x5, x6):\n    y = register.add_stage(add_test, x1, x2, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x3, outputs_count=1)\n    y = register.add_stage(add_test, y, x4, outputs_count=1)\n    y = register.add_stage(tensor_add, y, x5, outputs_count=1)\n    y = register.add_stage(add_test, y, x6, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[6.2, 5.4]], np.float32) * 1.1 * (i + 1)\n        x4 = np.array([[5.2, 6.4]], np.float32) * 1.1 * (i + 1)\n        x5 = np.array([[3.2, 4.4]], np.float32) * 1.1 * (i + 1)\n        x6 = np.array([[7.2, 8.4]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2 + x3 + x4 + x5 + x6 + 3\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3, \"x4\": x4, \"x5\": x5, \"x6\": x6})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_server_client_worker_exit_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = client.infer(instances)\n\n    print(result)\n    check_result(result, y_data_list)\n\n    cur_process = psutil.Process(os.getpid())\n    children = cur_process.children(recursive=False)\n    for item in children:\n        os.kill(item.pid, signal.SIGINT)\n    time.sleep(2)\n    result = client.infer(instances)\n    print(result)\n    assert \"Grpc Error, (14, 'unavailable')\" in result[\"error\"]\n\n\n@serving_test\ndef test_server_client_worker_kill_restart_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    # Client\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    instance_count = 3\n    instances, y_data_list = create_multi_instances_fp32(instance_count)\n    result = client.infer(instances)\n\n    print(result)\n    check_result(result, y_data_list)\n\n    cur_process = psutil.Process(os.getpid())\n    children = cur_process.children(recursive=False)\n    for item in children:\n        os.kill(item.pid, signal.SIGKILL)\n    time.sleep(3)\n    result = client.infer(instances)\n    print(result)\n    check_result(result, y_data_list)\n\n\n@serving_test\ndef test_server_client_worker_kill_no_restart_success():\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n    server.start_grpc_server(\"0.0.0.0:5500\")\n\n    cur_process = psutil.Process(os.getpid())\n    children = cur_process.children(recursive=False)\n    for item in children:\n        os.kill(item.pid, signal.SIGKILL)\n    time.sleep(3)\n\n    # Client\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    instance_count = 3\n    instances, _ = create_multi_instances_fp32(instance_count)\n    result = client.infer(instances)\n    print(result)\n    assert \"Grpc Error, (14, 'unavailable')\" in result[\"error\"]\n\n\n@serving_test\ndef test_start_server_invalid_grpc_address_failed():\n    try:\n        server.start_grpc_server(\"invalid address\")\n        assert False\n    except RuntimeError as e:\n        assert \"The format of the Serving gRPC address 'invalid address' is illegal\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_grpc_address2_failed():\n    try:\n        server.start_grpc_server(\"127.0.0.1\")\n        assert False\n    except RuntimeError as e:\n        assert \"The format of the Serving gRPC address '127.0.0.1' is illegal\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_grpc_address3_failed():\n    try:\n        server.start_grpc_server(\"127.0.0.0.1:5000\")\n        assert False\n    except RuntimeError as e:\n        assert \"Serving gRPC server start failed, create server failed, address 127.0.0.0.1:5000\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_grpc_address4_failed():\n    try:\n        server.start_grpc_server(\"127.0.0.1:5000000\")\n        assert False\n    except RuntimeError as e:\n        assert \"The port of the Serving gRPC address '127.0.0.1:5000000' is out of legal range [1 ~ 65535]\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_grpc_address5_failed():\n    try:\n        server.start_grpc_server(\"unix:\")\n        assert False\n    except RuntimeError as e:\n        assert \"Empty grpc server unix domain socket address\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_grpc_address6_failed():\n    try:\n        server.start_grpc_server(\"127.0.256.1:5000\")\n        assert False\n    except RuntimeError as e:\n        assert \"Serving gRPC server start failed, create server failed, address 127.0.256.1:5000\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_grpc_address7_failed():\n    try:\n        server.start_grpc_server(\"127.0.0.1:5000:5000\")\n        assert False\n    except RuntimeError as e:\n        assert \"Serving gRPC server start failed, create server failed, address 127.0.0.1:5000:5000\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_restful_address_failed():\n    try:\n        server.start_restful_server(\"invalid address\")\n        assert False\n    except RuntimeError as e:\n        assert \"The format of the RESTful server address 'invalid address' is illegal\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_restful_address2_failed():\n    try:\n        server.start_restful_server(\"127.0.0.1\")\n        assert False\n    except RuntimeError as e:\n        assert \"The format of the RESTful server address '127.0.0.1' is illegal\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_restful_address3_failed():\n    try:\n        server.start_restful_server(\"127.0.0.0.1:5000\")\n        assert False\n    except RuntimeError as e:\n        assert \"RESTful server start failed, bind to the socket address 127.0.0.0.1:5000 failed\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_restful_address4_failed():\n    try:\n        server.start_restful_server(\"127.0.0.1:5000000\")\n        assert False\n    except RuntimeError as e:\n        assert \"The port of the RESTful server address '127.0.0.1:5000000' is out of legal range [1 ~ 65535]\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_restful_address5_failed():\n    try:\n        server.start_restful_server(\"127.0.256.1:5000\")\n        assert False\n    except RuntimeError as e:\n        assert \"RESTful server start failed, bind to the socket address 127.0.256.1:5000 failed\" in str(e)\n\n\n@serving_test\ndef test_start_server_invalid_restful_address6_failed():\n    try:\n        server.start_restful_server(\"unix:address_temp\")\n        assert False\n    except RuntimeError as e:\n        assert \"RESTful server does not support binding to unix domain socket\" in str(e)\n"
  },
  {
    "path": "tests/ut/python/tests/test_serving_log.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport os\nimport sys\nimport subprocess\n\nfrom common import serving_test\n\n\ndef start_new_log_process(log_py_context, env_set):\n    \"\"\"start new process with log\"\"\"\n\n    with open(\"test_log.py\", \"w\") as fp:\n        fp.write(log_py_context)\n    log_file = os.path.join(os.getcwd(), \"test_log.py\")\n    log_text = os.path.join(os.getcwd(), \"test_log.txt\")\n    print(f\"\\npython {log_file} >& {log_text}\")\n    arg = f\"{sys.executable} {log_file}\"\n    args = arg.split()\n\n    new_env = os.environ.copy()\n    new_env.update(env_set)\n\n    with open(log_text, \"w\") as fp:\n        sub = subprocess.Popen(args=args, shell=False, stdout=fp, stderr=fp, env=new_env)\n        sub.wait()\n\n    with open(log_text, \"r\") as fp:\n        lines = fp.read()\n        find_info = (lines.find(\"[INFO]\") != -1)\n        find_warning = (lines.find(\"[WARNING]\") != -1)\n        find_error = (lines.find(\"[ERROR]\") != -1)\n        print(\"log_text:------------------\")\n        print(lines)\n        print(\"log_text end------------------\")\n    os.system(f\"rm -f {log_file} {log_text}\")\n    return find_info, find_warning, find_error\n\n\ndef start_new_log_process_py(env_set):\n    \"\"\"start new process with python log\"\"\"\n    log_py_context = r\"\"\"\nfrom mindspore_serving import log as logger\nfrom mindspore_serving import server\ndef log_process():\n    logger.info(\"info msg test\")\n    logger.warning(\"warning msg test\")\n    logger.error(\"error msg test\")\n    logger.debug(\"debug msg test\")\n\nlog_process()\n    \"\"\"\n    return start_new_log_process(log_py_context, env_set)\n\n\ndef start_new_log_process_cpp(env_set):\n    \"\"\"start new process with cpp log\"\"\"\n    log_py_context = r\"\"\"\nfrom mindspore_serving import log as logger\nfrom mindspore_serving import server\ndef log_process():\n    # info\n    server.start_grpc_server(\"0.0.0.0:5500\")\n    try:\n        # error\n        server.start_grpc_server(\"0.0.0.0:5500\")\n    except RuntimeError:\n        pass\n\nlog_process()\n    \"\"\"\n    return start_new_log_process(log_py_context, env_set)\n\n\n@serving_test\ndef test_log_level_python_debug():\n    find_info, find_warning, find_error = start_new_log_process_py({\"GLOG_v\": \"0\"})\n    assert find_info\n    assert find_warning\n    assert find_error\n\n\n@serving_test\ndef test_log_level_python_info():\n    find_info, find_warning, find_error = start_new_log_process_py({\"GLOG_v\": \"1\"})\n    assert find_info\n    assert find_warning\n    assert find_error\n\n\n@serving_test\ndef test_log_level_python_warning():\n    find_info, find_warning, find_error = start_new_log_process_py({\"GLOG_v\": \"2\"})\n    assert not find_info\n    assert find_warning\n    assert find_error\n\n\n@serving_test\ndef test_log_level_python_error():\n    find_info, find_warning, find_error = start_new_log_process_py({\"GLOG_v\": \"3\"})\n    assert not find_info\n    assert not find_warning\n    assert find_error\n\n\n@serving_test\ndef test_log_level_cpp_debug():\n    find_info, _, find_error = start_new_log_process_cpp({\"GLOG_v\": \"0\"})\n    assert find_info\n    assert find_error\n\n\n@serving_test\ndef test_log_level_cpp_info():\n    find_info, _, find_error = start_new_log_process_cpp({\"GLOG_v\": \"1\"})\n    assert find_info\n    assert find_error\n\n\n@serving_test\ndef test_log_level_cpp_warning():\n    find_info, _, find_error = start_new_log_process_cpp({\"GLOG_v\": \"2\"})\n    assert not find_info\n    assert find_error\n\n\n@serving_test\ndef test_log_level_cpp_error():\n    find_info, _, find_error = start_new_log_process_cpp({\"GLOG_v\": \"3\"})\n    assert not find_info\n    assert find_error\n\n\n@serving_test\ndef test_log_level_cpp_debug2():\n    find_info, _, find_error = start_new_log_process_cpp({\"GLOG_v\": \"3\", \"MS_SUBMODULE_LOG_v\": \"{SERVING:0}\"})\n    assert find_info\n    assert find_error\n\n\n@serving_test\ndef test_log_level_cpp_info2():\n    find_info, _, find_error = start_new_log_process_cpp({\"GLOG_v\": \"3\", \"MS_SUBMODULE_LOG_v\": \"{SERVING:1}\"})\n    assert find_info\n    assert find_error\n\n\n@serving_test\ndef test_log_level_cpp_warning2():\n    find_info, _, find_error = start_new_log_process_cpp({\"GLOG_v\": \"3\", \"MS_SUBMODULE_LOG_v\": \"{SERVING:2}\"})\n    assert not find_info\n    assert find_error\n\n\n@serving_test\ndef test_log_level_cpp_error2():\n    find_info, _, find_error = start_new_log_process_cpp({\"GLOG_v\": \"3\", \"MS_SUBMODULE_LOG_v\": \"{SERVING:3}\"})\n    assert not find_info\n    assert find_error\n"
  },
  {
    "path": "tests/ut/python/tests/test_stage_function.py",
    "content": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\n\nfrom common import serving_test, create_client\nfrom common import start_serving_server\n\n\ndef is_float_equal(left, right):\n    return (np.abs(left - right) < 0.00001).all()\n\n\n@serving_test\ndef test_stage_function_one_function_stage_float_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Test stage with two inputs, one output\n    Expectation: Serving server work ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef test_concat(x1, x2):\n    return x1 + x2\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(test_concat, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    x1s = []\n    x2s = []\n    x1s.append(np.array([[101.1, 205.2], [41.3, 62.4]], np.float32))\n    x2s.append(np.array([[3.5, 5.6], [7.7, 9.8]], np.float32))\n    x1s.append(np.array([[41.3, 32.2], [4.1, 3.9]], np.float32))\n    x2s.append(np.array([[1.4, 4.5], [9.6, 19.7]], np.float32))\n    x1s.append(np.array([[11.1, 21.2], [41.9, 61.8]], np.float32))\n    x2s.append(np.array([[31.5, 51.7], [71.4, 91.3]], np.float32))\n    for i in range(3):\n        instances.append({\"x1\": x1s[i], \"x2\": x2s[i]})\n        y = x1s[i] + x2s[i]\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == ys[0]).all()\n    assert (result[1][\"y\"] == ys[1]).all()\n    assert (result[2][\"y\"] == ys[2]).all()\n\n\n@serving_test\ndef test_stage_function_one_function_stage_two_output_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Test stage with one input, two outputs\n    Expectation: Serving server work ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef test_concat(x1):\n    return x1 + 1, x1-1\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1):\n    y1, y2 = register.add_stage(test_concat, x1, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    y1s = []\n    y2s = []\n    x1s = []\n    x1s.append(np.array([[101.1, 205.2], [41.3, 62.4]], np.float32))\n    x1s.append(np.array([[41.3, 32.2], [4.1, 3.9]], np.float32))\n    x1s.append(np.array([[11.1, 21.2], [41.9, 61.8]], np.float32))\n    for i in range(3):\n        instances.append({\"x1\": x1s[i]})\n        y1s.append(x1s[i] + 1)\n        y2s.append(x1s[i] - 1)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y1\"] == y1s[0]).all()\n    assert (result[1][\"y1\"] == y1s[1]).all()\n    assert (result[2][\"y1\"] == y1s[2]).all()\n    assert (result[0][\"y2\"] == y2s[0]).all()\n    assert (result[1][\"y2\"] == y2s[1]).all()\n    assert (result[2][\"y2\"] == y2s[2]).all()\n\n\n@serving_test\ndef test_stage_function_one_function_stage_output_more_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Test stage declared outputs_count < python function outputs count\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test(x1, x2):\n    return x1+x2, x1-x2, 1\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test, x1, x2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}] * 3\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_output_less_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Test stage declared outputs_count > python function outputs count\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test(x1, x2):\n    return x1+x2\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test, x1, x2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}] * 3\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_error_outputs_count_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Test stage declared outputs_count > python function outputs count\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test(x1, x2):\n    return x1+x2\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test, x1, x2, outputs_count=3)\n    return y1, y2\n    \"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"too many values to unpack (expected 2)\" in str(e)\n\n\n@serving_test\ndef test_stage_function_one_function_stage_error_outputs_count2_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Test stage declared outputs_count < python function outputs count\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test(x1, x2):\n    return x1+x2\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test, x1, x2, outputs_count=1)\n    return y1, y2\n    \"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"cannot unpack non-iterable _TensorDef object\" in str(e)\n\n\n@serving_test\ndef test_stage_function_one_function_stage_input_more_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Test stage declared inputs count < python function inputs count\n    Expectation: Serving server startup error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test(x1, x2, x3):\n    return x1, x2\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test, x1, x2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"function func_test input args count 3 not match the count 2 registered in method\" in str(e)\n\n\n@serving_test\ndef test_stage_function_one_function_stage_input_less_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Test stage declared inputs count > python function inputs count\n    Expectation: Serving server startup error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test(x1):\n    return x1, x2\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test, x1, x2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    try:\n        start_serving_server(servable_content)\n        assert False\n    except RuntimeError as e:\n        assert \"function func_test input args count 1 not match the count 2 registered in method\" in str(e)\n\n\n@serving_test\ndef test_stage_function_one_function_stage_raise_exception_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function raise exception\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test(x1, x2):\n    raise RuntimeError(\"runtime error text\")\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test, x1, x2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}] * 3\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_none_outputs_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function return None\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test(x1, x2):\n    print(\"none outputs\")\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test, x1, x2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}] * 3\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_invalid_output_dtype_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function return invalid data, dtype is not supported\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test(x1, x2):\n    return x1.dtype, x2.dtype\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test, x1, x2, outputs_count=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    instances = [{\"x1\": x1, \"x2\": x2}] * 3\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, and result output count is 1, tuple/list\n    Expectation: Serving server work ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y = instance[0] + instance[1]\n        results.append([y])\n    return results\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=2)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2\n        instances.append({\"x1\": x1, \"x2\": x2})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size2_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, and result output count is 1, not tuple/list\n    Expectation: Serving server work ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y = instance[0] + instance[1]\n        results.append(y)\n    return results\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=2)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2\n        instances.append({\"x1\": x1, \"x2\": x2})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size3_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, use yield, not tuple/list\n    Expectation: Serving server work ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y = instance[0] + instance[1]\n        yield y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=2)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2\n        instances.append({\"x1\": x1, \"x2\": x2})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size4_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, use yield, use tuple/list\n    Expectation: Serving server work ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y = instance[0] + instance[1]\n        yield [y]\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=2)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2\n        instances.append({\"x1\": x1, \"x2\": x2})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_equal1_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, batch size = 1\n    Expectation: Serving server work ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y = instance[0] + instance[1]\n        yield y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2\n        instances.append({\"x1\": x1, \"x2\": x2})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_0_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, batch size=0, batch size is determined by system\n    Expectation: Serving server work ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y = instance[0] + instance[1]\n        yield y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=0)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    ys = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        y = x1 + x2\n        instances.append({\"x1\": x1, \"x2\": x2})\n        ys.append(y)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y\"], ys[0])\n    assert is_float_equal(result[1][\"y\"], ys[1])\n    assert is_float_equal(result[2][\"y\"], ys[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_error_batch_size_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, batch size is invalid\n    Expectation: Serving server startup failed.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y = instance[0] + instance[1]\n        yield y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=-1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'batch_size' should be >= 0\" in str(e)\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_two_outputs_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, yield, result outputs count is 2\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        yield y1, y2\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test_batch, x1, x2, outputs_count=2, batch_size=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    y1s = []\n    y2s = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        y1 = x1 + x2\n        y2 = x1 - x2\n        instances.append({\"x1\": x1, \"x2\": x2})\n        y1s.append(y1)\n        y2s.append(y2)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y1\"], y1s[0])\n    assert is_float_equal(result[1][\"y1\"], y1s[1])\n    assert is_float_equal(result[2][\"y1\"], y1s[2])\n    assert is_float_equal(result[0][\"y2\"], y2s[0])\n    assert is_float_equal(result[1][\"y2\"], y2s[1])\n    assert is_float_equal(result[2][\"y2\"], y2s[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_two_outputs_multi_times_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, multi stage\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        yield y1, y2\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test_batch, x1, x2, outputs_count=2, batch_size=2)\n    y1, y2 = register.add_stage(func_test_batch, y1, y2, outputs_count=2, batch_size=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    y1s = []\n    y2s = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        y1, y2 = x1 + x2, x1 - x2\n        y1, y2 = y1 + y2, y1 - y2\n        instances.append({\"x1\": x1, \"x2\": x2})\n        y1s.append(y1)\n        y2s.append(y2)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y1\"], y1s[0])\n    assert is_float_equal(result[1][\"y1\"], y1s[1])\n    assert is_float_equal(result[2][\"y1\"], y1s[2])\n    assert is_float_equal(result[0][\"y2\"], y2s[0])\n    assert is_float_equal(result[1][\"y2\"], y2s[1])\n    assert is_float_equal(result[2][\"y2\"], y2s[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_two_outputs2_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, result output count is 2\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1, y2])\n    return results\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2 = register.add_stage(func_test_batch, x1, x2, outputs_count=2, batch_size=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    y1s = []\n    y2s = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        y1 = x1 + x2\n        y2 = x1 - x2\n        instances.append({\"x1\": x1, \"x2\": x2})\n        y1s.append(y1)\n        y2s.append(y2)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y1\"], y1s[0])\n    assert is_float_equal(result[1][\"y1\"], y1s[1])\n    assert is_float_equal(result[2][\"y1\"], y1s[2])\n    assert is_float_equal(result[0][\"y2\"], y2s[0])\n    assert is_float_equal(result[1][\"y2\"], y2s[1])\n    assert is_float_equal(result[2][\"y2\"], y2s[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_input_more_success():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, used inputs count 2 < declared inputs count 3\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1, y2])\n    return results\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2, x3):\n    y1, y2 = register.add_stage(func_test_batch, x1, x2, x3, outputs_count=2, batch_size=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    y1s = []\n    y2s = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x3 = np.array([[1.5, 2.6], [3.7, 4.8]], np.float32) * 1.1 * (i + 1)\n        y1 = x1 + x2\n        y2 = x1 - x2\n        instances.append({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n        y1s.append(y1)\n        y2s.append(y2)\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert is_float_equal(result[0][\"y1\"], y1s[0])\n    assert is_float_equal(result[1][\"y1\"], y1s[1])\n    assert is_float_equal(result[2][\"y1\"], y1s[2])\n    assert is_float_equal(result[0][\"y2\"], y2s[0])\n    assert is_float_equal(result[1][\"y2\"], y2s[1])\n    assert is_float_equal(result[2][\"y2\"], y2s[2])\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_input_less_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, used inputs count 2 > declared inputs count 1\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1, y2])\n    return results\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1):\n    y1, y2 = register.add_stage(func_test_batch, x1, outputs_count=2, batch_size=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    for i in range(3):\n        x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32) * 1.1 * (i + 1)\n        instances.append({\"x1\": x1})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_output_more_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, outputs count 2 < declared outputs_count 3\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1, y2])\n    return results\n\n@register.register_method(output_names=[\"y1\", \"y2\"])\ndef predict(x1, x2):\n    y1, y2, y3 = register.add_stage(func_test_batch, x1, x2, outputs_count=3, batch_size=2)\n    return y1, y2\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    for i in range(3):\n        x1 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[1.5, 2.6], [3.7, 4.8]], np.float32) * 1.1 * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_output_less_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, outputs count 2 > declared outputs_count 1\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1, y2])\n    return results\n\n@register.register_method(output_names=[\"y1\"])\ndef predict(x1, x2):\n    y1 = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=2)\n    return y1\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    for i in range(3):\n        x1 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[1.5, 2.6], [3.7, 4.8]], np.float32) * 1.1 * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_output_less2_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, outputs count 2 > declared outputs_count 1, yield\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        yield y1, y2\n\n@register.register_method(output_names=[\"y1\"])\ndef predict(x1, x2):\n    y1 = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=2)\n    return y1\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    for i in range(3):\n        x1 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[1.5, 2.6], [3.7, 4.8]], np.float32) * 1.1 * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_raise_exception_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, raise exception\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    raise RuntimeError(\"runtime error test\")\n\n@register.register_method(output_names=[\"y1\"])\ndef predict(x1, x2):\n    y1 = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=2)\n    return y1\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    for i in range(3):\n        x1 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[1.5, 2.6], [3.7, 4.8]], np.float32) * 1.1 * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_none_return_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, return None\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    pass\n\n@register.register_method(output_names=[\"y1\"])\ndef predict(x1, x2):\n    y1 = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=2)\n    return y1\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    for i in range(3):\n        x1 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[1.5, 2.6], [3.7, 4.8]], np.float32) * 1.1 * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_stage_function_one_function_stage_batch_size_invalid_output_dtype_failed():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, return invalid data\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef func_test_batch(instances):\n    results = []\n    for instance in instances:\n        y1 = instance[0] + instance[1]\n        y2 = instance[0] - instance[1]\n        results.append([y1.dtype, y2.dtype])\n    return results\n\n@register.register_method(output_names=[\"y1\"])\ndef predict(x1, x2):\n    y1 = register.add_stage(func_test_batch, x1, x2, outputs_count=1, batch_size=2)\n    return y1\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=\"tensor_add.mindir\")\n    # Client\n    instances = []\n    for i in range(3):\n        x1 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32) * 1.1 * (i + 1)\n        x2 = np.array([[1.5, 2.6], [3.7, 4.8]], np.float32) * 1.1 * (i + 1)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\")\n    result = client.infer(instances)\n    print(\"result\", result)\n    if isinstance(result, dict):\n        assert \"servable is not available\" in result[\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[\"error\"]\n    else:\n        assert \"servable is not available\" in result[0][\"error\"] \\\n               or f\"Call Function '{base.servable_name}.func_test_batch' Failed\" in result[0][\"error\"]\n\n\n@serving_test\ndef test_servable_postprocess_result_count_less():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, return instances count less then input\n        instances count\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef postprocess(instances):\n    count = len(instances)\n    for i in range(count -1):\n        yield i\n    \n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    y = register.add_stage(postprocess, y, outputs_count=1, batch_size=4, tag=\"Postprocess\")\n    return y\n\"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    instance_count = 2\n\n    instances = []\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1], [3.3]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5], [7.7]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    result = client.infer(instances)\n    print(result)\n    assert \"Postprocess Failed\" in str(result[1][\"error\"]) or 'servable is not available' in str(result[1][\"error\"])\n\n\n@serving_test\ndef test_servable_postprocess_result_count_more():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, return instances count more then input\n        instances count\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef postprocess(instances):\n    count = len(instances)\n    for i in range(count + 1):\n        yield i\n    \n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    y = register.add_stage(postprocess, y, outputs_count=1, batch_size=4, tag=\"Postprocess\")\n    return y\n\"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    instance_count = 2\n\n    instances = []\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1], [3.3]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5], [7.7]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == instance_count\n    assert result[0][\"y\"] == 0\n    assert result[1][\"y\"] == 1\n\n\n@serving_test\ndef test_stage_function_preprocess_result_count_less():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, return instances count less then input\n        instances count\n    Expectation: Serving server report error.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef preprocess(instances):\n    count = len(instances)\n    for i in range(count-1):\n        yield i\n    \n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):\n    x3 = register.add_stage(preprocess, x1, outputs_count=1, batch_size=4, tag=\"Preprocess\")\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return x3\n\"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    instance_count = 2\n\n    instances = []\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1], [3.3]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5], [7.7]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    result = client.infer(instances)\n    print(result)\n    if isinstance(result, list):\n        assert \"Preprocess Failed\" in str(result[1][\"error\"]) or \"servable is not available\" in str(result[1][\"error\"])\n    else:\n        assert \"Preprocess Failed\" in str(result[\"error\"]) or \"servable is not available\" in str(result[\"error\"])\n\n\n@serving_test\ndef test_stage_function_preprocess_result_count_more():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Stage python function run with batch_size parameter, return instances count more then input\n        instances count\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef preprocess(instances):\n    count = len(instances)\n    for i in range(count+1):\n        yield i\n    \n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):\n    x3 = register.add_stage(preprocess, x1, outputs_count=1, batch_size=4, tag=\"Preprocess\")\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return x3\n\"\"\"\n    base = start_serving_server(servable_content)\n    # Client\n    instance_count = 3\n\n    instances = []\n    y_data_list = []\n    for i in range(instance_count):\n        x1 = np.asarray([[1.1], [3.3]]).astype(np.float32) * (i + 1)\n        x2 = np.asarray([[5.5], [7.7]]).astype(np.float32) * (i + 1)\n        y_data_list.append(x1 + x2)\n        instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == instance_count\n\n\n@serving_test\ndef test_stage_function_push_no_forc_array():\n    \"\"\"\n    Feature: test servable_config.py stage\n    Description: Preprocess return numpy array not C_CONTIGUOUS\n    Expectation: Serving server work well.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=True)\n\ndef preprocess(x1):\n    x1 = x1.reshape(3,4)\n    x1 = x1[1:3,2:3]\n    return x1\n    \n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):\n    x1 = register.add_stage(preprocess, x1, outputs_count=1, tag=\"Preprocess\")\n    y = register.add_stage(model, x1, x2, outputs_count=1)\n    return y\n\"\"\"\n    base = start_serving_server(servable_content)\n    instances = []\n    x1 = np.arange(12).astype(np.float32)\n    x2 = np.asarray([[5.5], [7.7]]).astype(np.float32)\n    instances.append({\"x1\": x1, \"x2\": x2})\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\")\n    result = client.infer(instances)\n    print(result)\n    assert len(result) == 1\n    assert \"y\" in result[0]\n"
  },
  {
    "path": "tests/ut/python/tests/test_start_servable_config.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving: test servable_config\"\"\"\n\nfrom common import ServingTestBase, serving_test\nfrom mindspore_serving import server\n\n# test servable_config.py\nservable_config_import = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\"\"\"\n\nservable_config_declare_servable = r\"\"\"\nregister.declare_servable(servable_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\"\"\"\n\nservable_config_preprocess_cast = r\"\"\"\ndef add_trans_datatype(x1, x2):\n    return x1.astype(np.float32), x2.astype(np.float32)\n\"\"\"\n\nservable_config_method_add_common = r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_common(x1, x2):  # only support float32 inputs\n    y = register.call_servable(x1, x2)\n    return y\n\"\"\"\n\nservable_config_method_add_cast = r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)  # cast input to float32\n    y = register.call_servable(x1, x2)\n    return y\n\"\"\"\n\n\n@serving_test\ndef test_register_method_common_success():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += servable_config_method_add_common\n    servable_content += servable_config_method_add_cast\n\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n\n\n@serving_test\ndef test_register_method_no_declare_servable_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    # servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += servable_config_method_add_common\n    servable_content += servable_config_method_add_cast\n\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"There is no model declared, you can use declare_model to declare models\" in str(e)\n\n\n@serving_test\ndef test_register_method_reference_invalid_preprocess_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    # servable_content += servable_config_preprocess_cast\n    servable_content += servable_config_method_add_common\n    servable_content += servable_config_method_add_cast\n\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"name 'add_trans_datatype' is not defined\" in str(e)\n\n\n# preprocess order error\n@serving_test\ndef test_register_method_preprocess_after_predict_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.call_servable(x1, x2)    \n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)\n    return x1\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_servable should be invoked after call_preprocess\" in str(e)\n\n\n@serving_test\ndef test_register_method_preprocess_after_postprocess_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_postprocess(add_trans_datatype, x1, x2)\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)    \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_postprocess or call_postprocess_pipeline should be invoked after call_preprocess\" in str(e)\n\n\n@serving_test\ndef test_register_method_preprocess_after_postprocess_pipeline_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_postprocess_pipeline(add_trans_datatype, x1, x2)\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)    \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_postprocess or call_postprocess_pipeline should be invoked after call_preprocess\" in str(e)\n\n\n# preprocess_pipeline order error\n@serving_test\ndef test_register_method_preprocess_pipeline_after_predict_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.call_servable(x1, x2)    \n    x1, x2 = register.call_preprocess_pipeline(add_trans_datatype, x1, x2)\n    return x1\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_servable should be invoked after call_preprocess_pipeline\" in str(e)\n\n\n@serving_test\ndef test_register_method_preprocess_pipeline_after_postprocess_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_postprocess(add_trans_datatype, x1, x2)\n    x1, x2 = register.call_preprocess_pipeline(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)    \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_postprocess or call_postprocess_pipeline should be invoked after call_preprocess_pipeline\" \\\n               in str(e)\n\n\n@serving_test\ndef test_register_method_preprocess_pipeline_after_postprocess_pipeline_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_postprocess_pipeline(add_trans_datatype, x1, x2)\n    x1, x2 = register.call_preprocess_pipeline(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)    \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_postprocess or call_postprocess_pipeline should be invoked after call_preprocess_pipeline\" \\\n               in str(e)\n\n\n# repeat preprocess\n@serving_test\ndef test_register_method_preprocess_twice_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)    \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_preprocess or call_preprocess_pipeline should not be invoked more than once\" in str(e)\n\n\n@serving_test\ndef test_register_method_preprocess_twice2_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)\n    x1, x2 = register.call_preprocess_pipeline(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)    \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_preprocess or call_preprocess_pipeline should not be invoked more than once\" in str(e)\n\n\n@serving_test\ndef test_register_method_preprocess_pipeline_twice_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_preprocess_pipeline(add_trans_datatype, x1, x2)\n    x1, x2 = register.call_preprocess_pipeline(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)    \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_preprocess or call_preprocess_pipeline should not be invoked more than once\" in str(e)\n\n\n# repeat postprocess\n@serving_test\ndef test_register_method_postprocess_twice_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\ndef postprocess(y):\n    return y.astype(np.int32)\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.call_servable(x1, x2)  \n    y = register.call_postprocess(postprocess, y)\n    y = register.call_postprocess(postprocess, y)  \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_postprocess or call_postprocess_pipeline should not be invoked more than once\" in str(e)\n\n\n@serving_test\ndef test_register_method_postprocess_twice2_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\ndef postprocess(y):\n    return y.astype(np.int32)\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.call_servable(x1, x2)  \n    y = register.call_postprocess_pipeline(postprocess, y)\n    y = register.call_postprocess(postprocess, y)  \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_postprocess or call_postprocess_pipeline should not be invoked more than once\" in str(e)\n\n\n@serving_test\ndef test_register_method_postprocess_pipeline_twice_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\ndef postprocess(y):\n    return y.astype(np.int32)\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.call_servable(x1, x2)  \n    y = register.call_postprocess_pipeline(postprocess, y)\n    y = register.call_postprocess_pipeline(postprocess, y)  \n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_postprocess or call_postprocess_pipeline should not be invoked more than once\" in str(e)\n\n\n# call servable repeat\n@serving_test\ndef test_register_method_call_servable_twice_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)\n    y = register.call_servable(x1, x2)\n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_servable should not be invoked more than once\" in str(e)\n\n\n@serving_test\ndef test_register_method_call_servable_after_postprocess_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_postprocess(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)\n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_postprocess or call_postprocess_pipeline should be invoked after call_servable\" in str(e)\n\n\n@serving_test\ndef test_register_method_call_servable_after_postprocess_pipeline_failed():\n    base = ServingTestBase()\n    servable_content = servable_config_import\n    servable_content += servable_config_declare_servable\n    servable_content += servable_config_preprocess_cast\n    servable_content += r\"\"\"\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_postprocess_pipeline(add_trans_datatype, x1, x2)\n    y = register.call_servable(x1, x2)\n    return y\n\"\"\"\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"call_postprocess or call_postprocess_pipeline should be invoked after call_servable\" in str(e)\n\n\n@serving_test\ndef test_register_method_without_call_servable_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nregister.declare_servable(servable_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef add_trans_datatype(x1, x2):\n    return x1.astype(np.float32), x2.astype(np.float32)\n\ndef add_func(x1, x2):\n    return x1+x2   \n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2)  # cast input to float32\n    y = register.call_postprocess(add_func, x1, x2)\n    return y\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"Not find the invoke of 'call_servable'\" in str(e)\n\n\n@serving_test\ndef test_register_method_invalid_call_servable():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    if True:\n        y = register.call_servable(model, x1, x2)\n        return y\n    return x1\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"conditions and loops are not supported in register_method when the interface 'call_servable' is used,\" \\\n               \" use 'add_stage' to replace 'call_servable'\" in str(e)\n\n\n@serving_test\ndef test_register_method_invalid_call_servable2():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\nmodel2 = register.declare_model(model_file=\"tensor_add2.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.call_servable(x1, x2)\n    return y\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"There are more than one servable declared when the interface 'call_servable' is used, use 'add_stage'\" \\\n               \" to replace 'call_servable'\" in str(e)\n\n\n@serving_test\ndef test_register_method_invalid_call_preprocess():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef preprocess(x1, x2):\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    if True:\n        y = register.call_preprocess(preprocess, x1, x2)\n        return y\n    return x1\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"conditions and loops are not supported in register_method when the interface 'call_preprocess'\" \\\n               \" is used, use 'add_stage' to replace 'call_preprocess'\" in str(e)\n\n\n@serving_test\ndef test_register_method_invalid_call_preprocess_pipeline():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef preprocess(x1, x2):\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    if True:\n        y = register.call_preprocess_pipeline(preprocess, x1, x2)\n        return y\n    return x1\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"conditions and loops are not supported in register_method when the interface\" \\\n               \" 'call_preprocess_pipeline' is used, use 'add_stage' to replace 'call_preprocess_pipeline'\" in str(e)\n\n\n@serving_test\ndef test_register_method_invalid_call_postprocess():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef preprocess(x1, x2):\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    if True:\n        y = register.call_postprocess(preprocess, x1, x2)\n        return y\n    return x1\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"conditions and loops are not supported in register_method when the interface 'call_postprocess'\" \\\n               \" is used, use 'add_stage' to replace 'call_postprocess'\" in str(e)\n\n\n@serving_test\ndef test_register_method_invalid_call_postprocess_pipeline():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef preprocess(x1, x2):\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    if True:\n        y = register.call_postprocess_pipeline(preprocess, x1, x2)\n        return y\n    return x1\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"conditions and loops are not supported in register_method when the interface \" \\\n               \"'call_postprocess_pipeline' is used, use 'add_stage' to replace 'call_postprocess_pipeline'\" in str(e)\n\n\n@serving_test\ndef test_register_method_invalid_call_preprocess_with_condition():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef preprocess(x1, x2):\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.call_preprocess(preprocess, x1, x2)\n    if True:\n        y = register.call_postprocess(preprocess, x1, x2)\n        return y\n    return y\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"complex statements such as conditions and loops are not supported in register_method when the \" \\\n               \"interface 'call_preprocess' is used, use 'add_stage' to replace 'call_preprocess'\" in str(e)\n\n\n@serving_test\ndef test_register_method_invalid_call_preprocess_with_condition2():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef preprocess(x1, x2):\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    if True:\n        y = register.call_postprocess(preprocess, x1, x2)\n        return y\n    y = register.call_preprocess(preprocess, x1, x2)\n    return y\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"complex statements such as conditions and loops are not supported in register_method when the \" \\\n               \"interface 'call_preprocess' is used, use 'add_stage' to replace 'call_preprocess'\" in str(e)\n\n\n@serving_test\ndef test_register_method_mix_call_xxx_add_stage_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef preprocess(x1, x2):\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.call_preprocess(preprocess, x1, x2)\n    y = register.call_servable(y, x3)\n    y = register.call_postprocess(preprocess, y, x4)\n    y = register.add_stage(preprocess, y, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"complex statements such as conditions and loops are not supported in register_method when the\" in str(e)\n\n\n@serving_test\ndef test_register_method_mix_call_xxx_add_stage2_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef preprocess(x1, x2):\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2, x3, x4):\n    y = register.add_stage(preprocess, x1, x2, outputs_count=1)\n    y = register.call_preprocess(preprocess, y, x2)\n    y = register.call_servable(y, x3)\n    y = register.call_postprocess(preprocess, y, x4)\n    return y\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"complex statements such as conditions and loops are not supported in register_method when the\" in str(e)\n\n\n@serving_test\ndef test_register_method_mix_call_xxx_add_stage3_failed():\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\", with_batch_dim=False)\n\ndef preprocess(x1, x2):\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    if True:\n        y = register.call_postprocess(preprocess, x1, x2)\n        return y\n    y = register.add_stage(preprocess, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"complex statements such as conditions and loops are not supported in register_method when the\" in str(e)\n"
  },
  {
    "path": "tests/ut/python/tests/test_start_sevables.py",
    "content": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test Serving with master, worker and client\"\"\"\nimport shutil\nimport os\nimport numpy as np\n\nfrom common import ServingTestBase, serving_test, start_serving_server, create_client\nfrom mindspore_serving import server\nfrom mindspore_serving.server._servable_local import merge_config\n\n\n@serving_test\ndef test_start_servable_servable_dir_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: servable dir is not exist\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir + \"_error\", base.servable_name, device_ids=0))\n        assert False\n    except RuntimeError as e:\n        assert \"Check servable config failed, directory \" in str(e)\n\n\n# start_servable\n@serving_test\ndef test_start_worker_no_servable_config_file_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: servable_config.py is not exist\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"no_exist_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, version_number=0))\n        assert False\n    except RuntimeError as e:\n        assert \"Check servable config failed, file \" in str(e)\n\n\n@serving_test\ndef test_start_worker_no_model_file_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: model file is not exist\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\", model_file=\"tensor_add_error.mindir\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, version_number=0))\n        assert False\n    except RuntimeError as e:\n        assert \"Load model failed, servable directory: \" in str(e)\n\n\n@serving_test\ndef test_start_servable_servable_dir_empty_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'servable_directory' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(server.ServableStartConfig(\"\", base.servable_name, device_ids=0, version_number=0))\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'servable_directory' should not be empty str\" in str(e)\n\n\n@serving_test\ndef test_start_worker_type_servable_dir_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'servable_directory' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(server.ServableStartConfig(1, base.servable_name, device_ids=0, version_number=0))\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'servable_directory' should be str, but actually \" in str(e)\n\n\n@serving_test\ndef test_start_worker_type_servable_name_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'servable_name' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, False, device_ids=0, version_number=0))\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'servable_name' should be str, but actually \" in str(e)\n\n\n@serving_test\ndef test_start_servable_version_number_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: There is no specified version model\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, version_number=2))\n        assert False\n    except RuntimeError as e:\n        assert \"There is no specified version directory of models, specified version number: 2\" in str(e)\n\n\n@serving_test\ndef test_start_servable_version_number_invalid2_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: There is no valid version directory\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(0, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, version_number=0))\n        assert False\n    except RuntimeError as e:\n        assert \"There is no valid version directory of models\" in str(e)\n\n\n@serving_test\ndef test_start_worker_type_version_number_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'version_number' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, version_number=False))\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'version_number' should be int, but actually \" in str(e)\n\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, version_number=-1))\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'version_number' should be >= 0\" in str(e)\n\n\n@serving_test\ndef test_start_worker_type_device_id_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'device_ids' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, version_number=1, device_ids=\"1\"))\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'device_ids' should be int or tuple/list of int, but actually\" in str(e)\n\n\n@serving_test\ndef test_start_worker_device_id_range_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'device_ids' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, version_number=1, device_ids=-1))\n        assert False\n    except RuntimeError as e:\n        assert \"The item value '-1' in parameter 'device_ids' should be >= 0\" in str(e)\n\n\n@serving_test\ndef test_start_worker_type_device_type_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'device_type' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, version_number=1,\n                                       device_type=123))\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'device_type' should be str, but actually\" in str(e)\n\n\n@serving_test\ndef test_start_worker_device_type_value_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'device_type' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0,\n                                                          device_type=\"InvalidDeviceType\"))\n        assert False\n    except RuntimeError as e:\n        assert \"Unsupported device type 'InvalidDeviceType', only support 'Ascend', 'GPU', 'CPU' and None, \" \\\n               \"case ignored\" in str(e)\n\n\n@serving_test\ndef test_start_worker_device_type_value_invalid2_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'device_type' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, device_type=\"\"))\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'device_type' should not be empty str\" in str(e)\n\n\n@serving_test\ndef test_start_worker_type_device_type_none_success():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'device_type' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(\n        server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, device_type=None))\n\n\n@serving_test\ndef test_start_worker_type_device_type_none2_success():\n    \"\"\"\n    Feature: test start servables\n    Description: input parameter 'device_type' invalid\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(\n        server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, device_type='None'))\n\n\n@serving_test\ndef test_servable_start_config_merge_same_version_same_device_ids_success():\n    \"\"\"\n    Feature: test merge servable config\n    Description: specified version 1 and newest version 0 can merge to one config of version 1\n    Expectation: success to merge config.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=2, version_number=0)\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=2, version_number=1)\n    config_ret = merge_config((config0, config1))\n    assert len(config_ret) == 1\n    assert config_ret[0].version_number == 1\n    assert len(config_ret[0].device_ids) == 1\n    assert config_ret[0].device_ids[0] == 2\n\n\n@serving_test\ndef test_servable_start_config_merge_same_version_diff_device_ids_success():\n    \"\"\"\n    Feature: test merge servable config\n    Description: specified version 1 with diff device can merge to one config with device_ids merged\n    Expectation: success to merge config.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=1, version_number=1)\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=(0, 2), version_number=1)\n    config_ret = merge_config((config0, config1))\n    assert len(config_ret) == 1\n    assert config_ret[0].version_number == 1\n    assert len(config_ret[0].device_ids) == 3\n    assert 0 in config_ret[0].device_ids\n    assert 1 in config_ret[0].device_ids\n    assert 2 in config_ret[0].device_ids\n\n\n@serving_test\ndef test_servable_start_config_merge_diff_version_diff_device_ids_success():\n    \"\"\"\n    Feature: test merge servable config\n    Description: specified version 1 and newest version 0 with diff device can merge to one config of version 1 with\n        device_ids merged\n    Expectation: success to merge config.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name, \"1\"),\n                    os.path.join(base.servable_dir, base.servable_name, \"2\"))\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=1, version_number=0)\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=2, version_number=1)\n    config_ret = merge_config((config0, config1))\n    assert len(config_ret) == 2\n    assert config_ret[0].version_number == 2  # newest version\n    assert len(config_ret[0].device_ids) == 1\n    assert config_ret[0].device_ids[0] == 1\n\n    assert config_ret[1].version_number == 1\n    assert len(config_ret[1].device_ids) == 1\n    assert config_ret[1].device_ids[0] == 2\n\n\n@serving_test\ndef test_servable_start_config_merge_diff_version_same_device_ids_failed():\n    \"\"\"\n    Feature: test merge servable config\n    Description: specified version 1 and newest version 0 with same device is invalid\n    Expectation: failed to merge config.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name, \"1\"),\n                    os.path.join(base.servable_dir, base.servable_name, \"2\"))\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=2, version_number=0)\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=2, version_number=1)\n    try:\n        server.start_servables((config0, config1))\n        assert False\n    except RuntimeError as e:\n        assert \"Ascend 910 device id 2 is used repeatedly in servable\" in str(e)\n\n\n@serving_test\ndef test_servable_start_config_same_servable_name_diff_directory_failed():\n    \"\"\"\n    Feature: test merge servable config\n    Description: specified version 1 and newest version 0 with diff servable directory is invalid\n    Expectation: failed to merge config.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=2, version_number=0)\n    config1 = server.ServableStartConfig(base.servable_dir + \"2\", base.servable_name, device_ids=2, version_number=1)\n    try:\n        server.start_servables((config0, config1))\n        assert False\n    except RuntimeError as e:\n        assert f\"The servable directory of servable name {base.servable_name} is different in multiple configurations\" \\\n               in str(e)\n\n\n@serving_test\ndef test_servable_start_config_multi_servable_same_device_id():\n    \"\"\"\n    Feature: test merge servable config\n    Description: diff servable same with same device id is invalid\n    Expectation: failed to merge config.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name),\n                    os.path.join(base.servable_dir, base.servable_name + \"2\"))\n\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=2, version_number=0)\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name + \"2\", device_ids=2, version_number=1)\n    try:\n        server.start_servables((config0, config1))\n        assert False\n    except RuntimeError as e:\n        assert \"Ascend 910 device id 2 is used repeatedly in servable\" in str(e)\n\n\n@serving_test\ndef test_servable_start_config_multi_servable_diff_device_id():\n    \"\"\"\n    Feature: test merge servable config\n    Description: servable name are same, some are diff\n    Expectation: success to merge config.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name),\n                    os.path.join(base.servable_dir, base.servable_name + \"2\"))\n\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=(1, 3), version_number=0)\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name + \"2\", device_ids=2, version_number=1)\n    config3 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=(4, 5), version_number=0)\n    config_ret = merge_config((config0, config1, config3))\n    assert len(config_ret) == 2\n    print(config_ret[0].servable_name)\n    print(config_ret[1].servable_name)\n\n    assert config_ret[0].version_number == 1  # newest version\n    assert len(config_ret[0].device_ids) == 4\n    assert tuple(config_ret[0].device_ids) == (1, 3, 4, 5)\n\n    assert config_ret[1].version_number == 1\n    assert len(config_ret[1].device_ids) == 1\n    assert config_ret[1].device_ids[0] == 2\n\n\n@serving_test\ndef test_servable_start_config_merge_diff_version_diff_dec_key_success():\n    \"\"\"\n    Feature: test merge servable config\n    Description: diff version with diff dec key\n    Expectation: success to merge config.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name, \"1\"),\n                    os.path.join(base.servable_dir, base.servable_name, \"2\"))\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=1, version_number=0,\n                                         dec_key=(\"ABC\" * 8).encode(), dec_mode='AES-GCM')\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=2, version_number=1,\n                                         dec_key=(\"DEF\" * 8).encode(), dec_mode='AES-CBC')\n    config_ret = merge_config((config0, config1))\n    assert len(config_ret) == 2\n    assert config_ret[0].dec_key == (\"ABC\" * 8).encode()  # newest version\n    assert config_ret[0].dec_mode == \"AES-GCM\"\n\n    assert config_ret[1].dec_key == (\"DEF\" * 8).encode()  # newest version\n    assert config_ret[1].dec_mode == \"AES-CBC\"\n\n\n@serving_test\ndef test_servable_start_config_merge_same_version_diff_dec_key_failed():\n    \"\"\"\n    Feature: test merge servable config\n    Description: same version with diff dec key\n    Expectation: failed to merge config.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=1, version_number=0,\n                                         dec_key=(\"ABC\" * 8).encode(), dec_mode='AES-GCM')\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=2, version_number=1,\n                                         dec_key=(\"DEF\" * 8).encode(), dec_mode='AES-CBC')\n    try:\n        server.start_servables((config0, config1))\n        assert False\n    except RuntimeError as e:\n        assert \"The dec key or dec mode of servable name\" in str(e)\n\n\n@serving_test\ndef test_servable_start_config_with_dec_success():\n    \"\"\"\n    Feature: test start servable with dec\n    Description: test start servable with dec\n    Expectation: success to start servable.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\ntensor_add = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\n\n@register.register_method(output_names=[\"y\"])\ndef add_cast(x1, x2):\n    y = register.add_stage(tensor_add, x1, x2, outputs_count=1)\n    return y\n\"\"\"\n    base = ServingTestBase()\n    base.init_servable_with_servable_config(1, servable_content)\n    server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0,\n                                                      dec_key=\"ABCDEFGHABCDEFGH\".encode(), dec_mode='AES-GCM'))\n\n\n@serving_test\ndef test_start_servables_without_declared_model_none_device_ids_start_version0_success():\n    \"\"\"\n    Feature: test start servables\n    Description: no models, no device ids, with extra workers, no version directory, start version number 0\n    Expectation: serving server running ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[], device_ids=None,\n                                version_number=0, start_version_number=0)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\", version_number=1)\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_start_servables_without_declared_model_none_device_ids_start_version1_success():\n    \"\"\"\n    Feature: test start servables\n    Description: no models, no device ids, with extra workers, no version directory, start version number 1\n    Expectation: serving server running ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[], device_ids=None,\n                                version_number=0, start_version_number=1)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}] * 10\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\", version_number=1)\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_start_servables_without_declared_model_with_device_ids_start_version0_success():\n    \"\"\"\n    Feature: test start servables\n    Description: no models, with device ids, without extra workers, no version directory, start version number 0\n    Expectation: serving server running ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[], device_ids=0,\n                                version_number=0, start_version_number=0)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}] * 10\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\", version_number=1)\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_start_servables_without_declared_model_with_device_ids_start_version0_with_extra_worker_success():\n    \"\"\"\n    Feature: test start servables\n    Description: no models, with device ids, without extra workers, no version directory, start version number 0\n    Expectation: serving server running ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[], device_ids=0, num_parallel_workers=2,\n                                version_number=0, start_version_number=0)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}] * 10\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\", version_number=1)\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_start_servables_without_declared_model_with_device_ids_start_version1_with_extra_worker_success():\n    \"\"\"\n    Feature: test start servables\n    Description: no models, with device ids, with extra workers, no version directory, start version number 1\n    Expectation: serving server running ok.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    base = start_serving_server(servable_content, model_file=[], device_ids=0, num_parallel_workers=2,\n                                version_number=0, start_version_number=1)\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}] * 10\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"predict\", version_number=1)\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_start_servables_with_declared_model_none_device_ids_start_version0_with_extra_worker_fail():\n    \"\"\"\n    Feature: test start servables\n    Description: with models, none device ids, with extra workers, no version directory, start version number 0\n    Expectation: failed to serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\",\n                             device_ids=None, num_parallel_workers=2,\n                             version_number=None, start_version_number=0)\n        assert False\n    except RuntimeError as e:\n        assert \"There is no valid version directory of models\" in str(e)\n\n\n@serving_test\ndef test_start_servables_with_declared_model_none_device_ids_start_version1_with_extra_worker_fail():\n    \"\"\"\n    Feature: test start servables\n    Description: with models, none device ids, with extra workers, no version directory, start version number 1\n    Expectation: failed to serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\",\n                             device_ids=None, num_parallel_workers=2,\n                             version_number=None, start_version_number=1)\n        assert False\n    except RuntimeError as e:\n        assert \"There is no valid version directory of models\" in str(e)\n\n\n@serving_test\ndef test_start_servables_with_declared_model_none_device_ids_start_version0_with_version_dir_fail():\n    \"\"\"\n    Feature: test start servables\n    Description: with models, none device ids, with extra workers, with version directory, start version number 1\n    Expectation: failed to serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\",\n                             device_ids=None, num_parallel_workers=2,\n                             version_number=1, start_version_number=0)\n        assert False\n    except RuntimeError as e:\n        # \"Servable '{}'  has models declared by declare_model, but parameter 'device_ids'\"\n        assert \" has models declared by declare_model, but parameter 'device_ids'\" in str(e)\n\n\n@serving_test\ndef test_start_servables_with_declared_model_none_device_ids_start_version1_with_version_dir_fail():\n    \"\"\"\n    Feature: test start servables\n    Description: with models, none device ids, with extra workers, with version directory, start version number 1\n    Expectation: failed to serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\",\n                             device_ids=None, num_parallel_workers=2,\n                             version_number=1, start_version_number=1)\n        assert False\n    except RuntimeError as e:\n        # \"Servable '{}'  has models declared by declare_model, but parameter 'device_ids'\"\n        assert \" has models declared by declare_model, but parameter 'device_ids'\" in str(e)\n\n\n@serving_test\ndef test_start_servables_with_declared_model_with_device_ids_start_version0_without_version_dir_fail():\n    \"\"\"\n    Feature: test start servables\n    Description: with models, with device ids, with extra workers, without version directory, start version number 0\n    Expectation: failed to serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\",\n                             device_ids=1, num_parallel_workers=2,\n                             version_number=None, start_version_number=0)\n        assert False\n    except RuntimeError as e:\n        assert \"There is no valid version directory of models\" in str(e)\n\n\n@serving_test\ndef test_start_servables_with_declared_model_with_device_ids_start_version1_without_version_dir_fail():\n    \"\"\"\n    Feature: test start servables\n    Description: with models, with device ids, with extra workers, without version directory, start version number 1\n    Expectation: failed to serving server.\n    \"\"\"\n    servable_content = r\"\"\"\nimport numpy as np\nfrom mindspore_serving.server import register\n\nmodel = register.declare_model(model_file=\"tensor_add.mindir\", model_format=\"MindIR\")\ndef function_test(x1, x2):\n    y = x1+x2\n    return y\n\n@register.register_method(output_names=\"y\")\ndef predict(x1, x2):\n    y = register.add_stage(function_test, x1, x2, outputs_count=1)\n    return y\n    \"\"\"\n    try:\n        start_serving_server(servable_content, model_file=\"tensor_add.mindir\",\n                             device_ids=1, num_parallel_workers=2,\n                             version_number=None, start_version_number=1)\n        assert False\n    except RuntimeError as e:\n        assert \"There is no valid version directory of models\" in str(e)\n\n\n@serving_test\ndef test_start_servables_enable_cpu_none_device_id_cpu_device_type_success():\n    \"\"\"\n    Feature: test start servables\n    Description: target cpu, device ids none, device type CPU\n    Expectation: serving server running ok.\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_CPU_DEVICE\"] = \"1\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(\n        server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=None, device_type=\"CPU\"))\n    server.start_grpc_server(\"localhost:5500\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\", version_number=1)\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_start_servables_enable_cpu_none_device_id_none_device_type_none_success():\n    \"\"\"\n    Feature: test start servables\n    Description: enable cpu, device ids none, device type none\n    Expectation: serving server running ok.\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_CPU_DEVICE\"] = \"1\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(\n        server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=None, device_type=None))\n    server.start_grpc_server(\"localhost:5500\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\", version_number=1)\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_start_servables_enable_cpu_device_type_with_device_id_cpu_device_type_success():\n    \"\"\"\n    Feature: test start servables\n    Description: target cpu, with device ids, device type CPU\n    Expectation: serving server running ok.\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_CPU_DEVICE\"] = \"1\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    server.start_servables(\n        server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, device_type=\"CPU\"))\n    server.start_grpc_server(\"localhost:5500\")\n    # Client\n    x1 = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)\n    x2 = np.array([[5.5, 6.6], [7.7, 8.8]], np.float32)\n    y = x1 + x2\n    instances = [{\"x1\": x1, \"x2\": x2}]\n\n    client = create_client(\"localhost:5500\", base.servable_name, \"add_common\", version_number=1)\n    result = client.infer(instances)\n    print(\"result\", result)\n    assert (result[0][\"y\"] == y).all()\n\n\n@serving_test\ndef test_start_servables_ascend_device_reuse_device_ids_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: Ascend device, target device type Ascend, reuse device failed\n    Expectation: Serving server startup failed.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name),\n                    os.path.join(base.servable_dir, base.servable_name + \"_x\"))\n    try:\n        config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, device_type=\"Ascend\")\n        config1 = server.ServableStartConfig(base.servable_dir, base.servable_name + \"_x\", device_ids=0,\n                                             device_type=\"Ascend\")\n        server.start_servables([config0, config1])\n        assert False\n    except RuntimeError as e:\n        assert \"Ascend 910 device id 0 is used repeatedly in servable\" in str(e)\n\n\n@serving_test\ndef test_start_servables_ascend_device_reuse_device_ids_none_device_type_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: Ascend device, target device type none, reuse device failed\n    Expectation: Serving server startup failed.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name),\n                    os.path.join(base.servable_dir, base.servable_name + \"_x\"))\n    try:\n        config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0)\n        config1 = server.ServableStartConfig(base.servable_dir, base.servable_name + \"_x\", device_ids=0)\n        server.start_servables([config0, config1])\n        assert False\n    except RuntimeError as e:\n        assert \"Ascend 910 device id 0 is used repeatedly in servable\" in str(e)\n\n\n@serving_test\ndef test_start_servables_ascend_device_without_reuse_device_ids_none_device_type_success():\n    \"\"\"\n    Feature: test start servables\n    Description: Ascend device, target device type Ascend, without reuse device success\n    Expectation: Serving server work well.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name),\n                    os.path.join(base.servable_dir, base.servable_name + \"_x\"))\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, device_type=\"Ascend\")\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name + \"_x\", device_ids=1,\n                                         device_type=\"Ascend\")\n    server.start_servables([config0, config1])\n\n\n@serving_test\ndef test_start_servables_gpu_device_reuse_device_ids_success():\n    \"\"\"\n    Feature: test start servables\n    Description: GPU device, target device type GPU, reuse device success\n    Expectation: Serving server work well.\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_GPU_DEVICE\"] = \"1\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name),\n                    os.path.join(base.servable_dir, base.servable_name + \"_x\"))\n\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, device_type=\"GPU\")\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name + \"_x\", device_ids=0, device_type=\"GPU\")\n    server.start_servables([config0, config1])\n\n\n@serving_test\ndef test_start_servables_gpu_device_reuse_device_ids_none_device_type_success():\n    \"\"\"\n    Feature: test start servables\n    Description: GPU device, target device type GPU, reuse device success\n    Expectation: Serving server work well.\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_GPU_DEVICE\"] = \"1\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name),\n                    os.path.join(base.servable_dir, base.servable_name + \"_x\"))\n    config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0)\n    config1 = server.ServableStartConfig(base.servable_dir, base.servable_name + \"_x\", device_ids=0)\n    server.start_servables([config0, config1])\n\n\n@serving_test\ndef test_start_servables_gpu_device_ascend_device_type_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: GPU device, target device type Ascend\n    Expectation: Serving server start failed.\n    \"\"\"\n    os.environ[\"SERVING_ENABLE_GPU_DEVICE\"] = \"1\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    shutil.copytree(os.path.join(base.servable_dir, base.servable_name),\n                    os.path.join(base.servable_dir, base.servable_name + \"_x\"))\n    try:\n\n        config0 = server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, device_type=\"Ascend\")\n        config1 = server.ServableStartConfig(base.servable_dir, base.servable_name + \"_x\", device_ids=1,\n                                             device_type=\"Ascend\")\n        server.start_servables([config0, config1])\n        assert False\n    except RuntimeError as e:\n        assert f\"The device type 'ascend' of servable name {base.servable_name} is inconsistent with current \" \\\n               f\"running environment\" in str(e)\n\n\n@serving_test\ndef test_start_servable_number_of_worker_invalid_failed():\n    \"\"\"\n    Feature: test start servables\n    Description: num_parallel_workers not in range[0,64]\n    Expectation: failed to serving server.\n    \"\"\"\n    base = ServingTestBase()\n    base.init_servable(1, \"add_servable_config.py\")\n    try:\n        server.start_servables(\n            server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=0, num_parallel_workers=65))\n        assert False\n    except RuntimeError as e:\n        assert \"Parameter 'num_parallel_workers' should be in range [0,64]\" in str(e)\n"
  },
  {
    "path": "tests/ut/runtest.sh",
    "content": "#!/bin/bash\n# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nset -e\n\nCURRPATH=$(\n  cd \"$(dirname $0)\" || exit\n  pwd\n)\n\nif [ $# -gt 0 ]; then\n  if [ $1 == \"python\" ]; then\n    echo \"run python ut\"\n    bash ${CURRPATH}/python/runtest.sh $2\n  elif [ $1 == \"cpp\" ]; then\n    echo \"run cpp ut\"\n    bash ${CURRPATH}/cpp/runtest.sh\n  fi\nelse\n  echo \"run all ut\"\n  # 1.run python testcases\n  bash ${CURRPATH}/python/runtest.sh $2\n\n  # 2.run c++ ut testcases\n  bash ${CURRPATH}/cpp/runtest.sh\nfi\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/cell.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"include/api/cell.h\"\n#include \"include/api/context.h\"\n#include \"cxx_api/factory.h\"\n#include \"cxx_api/graph/graph_impl.h\"\n\nnamespace mindspore {\nstd::vector<Output> CellBase::operator()(const std::vector<Input> &inputs) const { return Clone()->Construct(inputs); }\n\nParameterCell::ParameterCell(const ParameterCell &cell) {\n  auto tmp_ptr = cell.tensor_.Clone();\n  tensor_ = *tmp_ptr;\n  MSTensor::DestroyTensorPtr(tmp_ptr);\n}\n\nParameterCell &ParameterCell::operator=(const ParameterCell &cell) {\n  if (&cell == this) {\n    return *this;\n  }\n  auto tmp_ptr = cell.tensor_.Clone();\n  tensor_ = *tmp_ptr;\n  MSTensor::DestroyTensorPtr(tmp_ptr);\n  return *this;\n}\n\nParameterCell::ParameterCell(ParameterCell &&cell) : tensor_(cell.tensor_) {}\n\nParameterCell &ParameterCell::operator=(ParameterCell &&cell) {\n  if (&cell == this) {\n    return *this;\n  }\n  tensor_ = cell.tensor_;\n  return *this;\n}\n\nParameterCell::ParameterCell(const MSTensor &tensor) {\n  auto tmp_ptr = tensor.Clone();\n  tensor_ = *tmp_ptr;\n  MSTensor::DestroyTensorPtr(tmp_ptr);\n}\n\nParameterCell &ParameterCell::operator=(const MSTensor &tensor) {\n  auto tmp_ptr = tensor.Clone();\n  tensor_ = *tmp_ptr;\n  MSTensor::DestroyTensorPtr(tmp_ptr);\n  return *this;\n}\n\nParameterCell::ParameterCell(MSTensor &&tensor) : tensor_(tensor) {}\n\nParameterCell &ParameterCell::operator=(MSTensor &&tensor) {\n  tensor_ = tensor;\n  return *this;\n}\n\nGraphCell::GraphCell(const Graph &graph) : graph_(std::make_shared<Graph>(graph)) { MS_EXCEPTION_IF_NULL(graph_); }\n\nGraphCell::GraphCell(const std::shared_ptr<Graph> &graph) : graph_(graph) { MS_EXCEPTION_IF_NULL(graph_); }\n\nGraphCell::GraphCell(Graph &&graph) : graph_(std::make_shared<Graph>(graph)) { MS_EXCEPTION_IF_NULL(graph_); }\n\nvoid GraphCell::SetContext(const std::shared_ptr<Context> &context) {\n  if (executor_ == nullptr) {\n    executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);\n    if (executor_ == nullptr) {\n      MS_LOG(ERROR) << \"Create graph impl for device target \" << g_device_target << \" failed.\";\n      return;\n    }\n    executor_->SetGraph(graph_);\n  }\n  executor_->SetContext(context);\n}\n\nStatus GraphCell::Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {\n  if (executor_ == nullptr) {\n    executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);\n    if (executor_ == nullptr) {\n      MS_LOG(ERROR) << \"Create graph impl for device target \" << g_device_target << \" failed.\";\n      return kMEFailed;\n    }\n    executor_->SetGraph(graph_);\n  }\n  return executor_->Run(inputs, outputs);\n}\n\nStatus GraphCell::Load(uint32_t device_id) {\n  if (executor_ == nullptr) {\n    executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);\n    if (executor_ == nullptr) {\n      MS_LOG(ERROR) << \"Create graph impl for device target \" << g_device_target << \" failed.\";\n      return kMEFailed;\n    }\n    executor_->SetGraph(graph_);\n  }\n  return executor_->Load(device_id);\n}\n\nstd::vector<MSTensor> GraphCell::GetInputs() {\n  if (executor_ == nullptr) {\n    executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);\n    if (executor_ == nullptr) {\n      MS_LOG(ERROR) << \"Create graph impl for device target \" << g_device_target << \" failed.\";\n      return {};\n    }\n    executor_->SetGraph(graph_);\n  }\n  return executor_->GetInputs();\n}\n\nstd::vector<MSTensor> GraphCell::GetOutputs() {\n  if (executor_ == nullptr) {\n    executor_ = Factory<GraphCell::GraphImpl>::Instance().Create(g_device_target);\n    if (executor_ == nullptr) {\n      MS_LOG(ERROR) << \"Create graph impl for device target \" << g_device_target << \" failed.\";\n      return {};\n    }\n    executor_->SetGraph(graph_);\n  }\n  return executor_->GetOutputs();\n}\n\nInputAndOutput::InputAndOutput() : cell_(nullptr), prev_(), index_(-1) {}\n\nInputAndOutput::InputAndOutput(const MSTensor &tensor) : prev_(), index_(-1) {\n  auto tmp_ptr = tensor.Clone();\n  cell_ = std::make_shared<ParameterCell>(*tmp_ptr);\n  MSTensor::DestroyTensorPtr(tmp_ptr);\n}\nInputAndOutput::InputAndOutput(MSTensor &&tensor)\n    : cell_(std::make_shared<ParameterCell>(tensor)), prev_(), index_(-1) {}\n\nInputAndOutput::InputAndOutput(const std::shared_ptr<CellBase> &cell, const std::vector<InputAndOutput> &prev,\n                               int32_t index)\n    : cell_(cell), prev_(prev), index_(index) {}\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/context.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"include/api/context.h\"\n#include <any>\n#include <map>\n#include <type_traits>\n#include \"cxx_api/factory.h\"\n#include \"utils/log_adapter.h\"\n\nconstexpr auto kModelOptionCpuEnableFP16 = \"mindspore.option.cpu.enable_fp16\";\nconstexpr auto kModelOptionGPUEnableFP16 = \"mindspore.option.gpu.enable_fp16\";\nconstexpr auto kModelOptionKirinNpuFrequency = \"mindspore.option.kirin_npu.frequency\";\nconstexpr auto kModelOptionDeviceID = \"mindspore.option.device_id\";\nconstexpr auto kModelOptionGPUDeviceID = kModelOptionDeviceID;\nconstexpr auto kModelOptionGPUPrecisionMode = \"mindspore.option.gpu.precision_mode\";\nconstexpr auto kModelOptionAscend910DeviceID = kModelOptionDeviceID;\nconstexpr auto kModelOptionAscend310DeviceID = kModelOptionDeviceID;\nconstexpr auto kModelOptionAscend310InsertOpCfgPath = \"mindspore.option.ascend310.insert_op_config_file_path\";\nconstexpr auto kModelOptionAscend310InputFormat = \"mindspore.option.ascend310.input_format\";\nconstexpr auto kModelOptionAscend310InputShapeMap = \"mindspore.option.ascend310.input_shape_map\";\nconstexpr auto kModelOptionAscend310InputShape = \"mindspore.option.ascend310.input_shape\";\nconstexpr auto kModelOptionAscend310OutputType = \"mindspore.option.ascend310.output_type\";\nconstexpr auto kModelOptionAscend310PrecisionMode = \"mindspore.option.ascend310.precision_mode\";\nconstexpr auto kModelOptionAscend310OpSelectImplMode = \"mindspore.option.ascend310.op_select_impl_mode\";\nconstexpr auto KModelOptionAscend310FusionSwitchCfgPath = \"mindspore.option.ascend310.fusion_switch_config_file_path\";\nconstexpr auto kModelOptionAscend310DynamicBatchSize = \"mindspore.option.ascend310.dynamic_batch_size\";\nconstexpr auto kModelOptionAscend310BufferOptimize = \"mindspore.option.ascend310.buffer_optimize\";\n\nnamespace mindspore {\nclass Allocator {};\n\nstruct Context::Data {\n  std::vector<std::shared_ptr<DeviceInfoContext>> device_info_list;\n  int32_t thread_num;\n  bool enable_parallel_ = false;\n  std::vector<int32_t> affinity_core_list_;\n  int affinity_mode_ = 2;\n};\n\nstruct DeviceInfoContext::Data {\n  std::map<std::string, std::any> params;\n};\n\nContext::Context() : data_(std::make_shared<Data>()) {}\n\ntemplate <class T, typename U = std::remove_cv_t<std::remove_reference_t<T>>>\nstatic const U &GetValue(const std::shared_ptr<DeviceInfoContext::Data> &data, const std::string &key) {\n  static const U empty_result{};\n  if (data == nullptr) {\n    return empty_result;\n  }\n  auto iter = data->params.find(key);\n  if (iter == data->params.end()) {\n    return empty_result;\n  }\n  const std::any &value = iter->second;\n  if (value.type() != typeid(U)) {\n    return empty_result;\n  }\n\n  return std::any_cast<const U &>(value);\n}\n\nvoid Context::SetThreadNum(int32_t thread_num) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->thread_num = thread_num;\n}\nint32_t Context::GetThreadNum() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return data_->thread_num;\n}\n\nvoid Context::SetEnableParallel(bool is_parallel) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->enable_parallel_ = is_parallel;\n}\n\nbool Context::GetEnableParallel() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return data_->enable_parallel_;\n}\n\nvoid Context::SetThreadAffinity(int mode) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->affinity_mode_ = mode;\n}\nint Context::GetThreadAffinityMode() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return data_->affinity_mode_;\n}\n\nvoid Context::SetThreadAffinity(const std::vector<int> &core_list) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->affinity_core_list_ = core_list;\n}\nstd::vector<int32_t> Context::GetThreadAffinityCoreList() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return data_->affinity_core_list_;\n}\n\nstd::vector<std::shared_ptr<DeviceInfoContext>> &Context::MutableDeviceInfo() {\n  MS_EXCEPTION_IF_NULL(data_);\n  return data_->device_info_list;\n}\n\nDeviceInfoContext::DeviceInfoContext() : data_(std::make_shared<Data>()) {}\n\nvoid CPUDeviceInfo::SetEnableFP16(bool is_fp16) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionCpuEnableFP16] = is_fp16;\n}\nbool CPUDeviceInfo::GetEnableFP16() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return GetValue<bool>(data_, kModelOptionCpuEnableFP16);\n}\n\nvoid GPUDeviceInfo::SetEnableFP16(bool is_fp16) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionGPUEnableFP16] = is_fp16;\n}\nbool GPUDeviceInfo::GetEnableFP16() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return GetValue<bool>(data_, kModelOptionGPUEnableFP16);\n}\n\nvoid KirinNPUDeviceInfo::SetFrequency(int frequency) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionKirinNpuFrequency] = frequency;\n}\nint KirinNPUDeviceInfo::GetFrequency() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return GetValue<int>(data_, kModelOptionKirinNpuFrequency);\n}\n\nvoid GPUDeviceInfo::SetDeviceID(uint32_t device_id) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionGPUDeviceID] = device_id;\n}\n\nuint32_t GPUDeviceInfo::GetDeviceID() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return GetValue<uint32_t>(data_, kModelOptionGPUDeviceID);\n}\n\nint GPUDeviceInfo::GetRankID() const {\n  MS_LOG(ERROR) << \"Unsupported Feature.\";\n  return 0;\n}\n\nint GPUDeviceInfo::GetGroupSize() const {\n  MS_LOG(ERROR) << \"Unsupported Feature.\";\n  return 0;\n}\n\nvoid GPUDeviceInfo::SetPrecisionMode(const std::vector<char> &precision_mode) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionGPUPrecisionMode] = CharToString(precision_mode);\n}\nstd::vector<char> GPUDeviceInfo::GetPrecisionModeChar() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  const std::string &ref = GetValue<std::string>(data_, kModelOptionGPUPrecisionMode);\n  return StringToChar(ref);\n}\n\nvoid AscendDeviceInfo::SetDeviceID(uint32_t device_id) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionAscend310DeviceID] = device_id;\n}\nuint32_t AscendDeviceInfo::GetDeviceID() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return GetValue<uint32_t>(data_, kModelOptionAscend310DeviceID);\n}\n\nvoid AscendDeviceInfo::SetInsertOpConfigPath(const std::vector<char> &cfg_path) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionAscend310InsertOpCfgPath] = CharToString(cfg_path);\n}\nstd::vector<char> AscendDeviceInfo::GetInsertOpConfigPathChar() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310InsertOpCfgPath);\n  return StringToChar(ref);\n}\n\nvoid AscendDeviceInfo::SetInputFormat(const std::vector<char> &format) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionAscend310InputFormat] = CharToString(format);\n}\nstd::vector<char> AscendDeviceInfo::GetInputFormatChar() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310InputFormat);\n  return StringToChar(ref);\n}\n\nvoid AscendDeviceInfo::SetInputShape(const std::vector<char> &shape) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionAscend310InputShape] = CharToString(shape);\n}\nstd::vector<char> AscendDeviceInfo::GetInputShapeChar() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310InputShape);\n  return StringToChar(ref);\n}\n\nvoid AscendDeviceInfo::SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size) {\n  MS_EXCEPTION_IF_NULL(data_);\n  std::string batchs = \"\";\n  for (size_t i = 0; i < dynamic_batch_size.size(); ++i) {\n    if (i != 0) {\n      batchs.push_back(',');\n    }\n    batchs += std::to_string(dynamic_batch_size[i]);\n  }\n  data_->params[kModelOptionAscend310DynamicBatchSize] = batchs;\n}\nstd::vector<char> AscendDeviceInfo::GetDynamicBatchSizeChar() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310DynamicBatchSize);\n  return StringToChar(ref);\n}\n\nvoid AscendDeviceInfo::SetDynamicImageSize(const std::vector<char> &) { return; }\n\nstd::vector<char> AscendDeviceInfo::GetDynamicImageSizeChar() const { return std::vector<char>(); }\n\nvoid AscendDeviceInfo::SetPrecisionMode(const std::vector<char> &precision_mode) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionAscend310PrecisionMode] = CharToString(precision_mode);\n}\nstd::vector<char> AscendDeviceInfo::GetPrecisionModeChar() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310PrecisionMode);\n  return StringToChar(ref);\n}\n\nvoid AscendDeviceInfo::SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionAscend310OpSelectImplMode] = CharToString(op_select_impl_mode);\n}\nstd::vector<char> AscendDeviceInfo::GetOpSelectImplModeChar() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310OpSelectImplMode);\n  return StringToChar(ref);\n}\n\nvoid AscendDeviceInfo::SetFusionSwitchConfigPath(const std::vector<char> &cfg_path) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[KModelOptionAscend310FusionSwitchCfgPath] = CharToString(cfg_path);\n}\nstd::vector<char> AscendDeviceInfo::GetFusionSwitchConfigPathChar() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  const std::string &ref = GetValue<std::string>(data_, KModelOptionAscend310FusionSwitchCfgPath);\n  return StringToChar(ref);\n}\n\nvoid AscendDeviceInfo::SetInputShapeMap(const std::map<int, std::vector<int>> &shape) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionAscend310InputShapeMap] = shape;\n}\nstd::map<int, std::vector<int>> AscendDeviceInfo::GetInputShapeMap() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return GetValue<std::map<int, std::vector<int>>>(data_, kModelOptionAscend310InputShapeMap);\n}\n\nvoid AscendDeviceInfo::SetOutputType(enum DataType output_type) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionAscend310OutputType] = output_type;\n}\nenum DataType AscendDeviceInfo::GetOutputType() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  return GetValue<enum DataType>(data_, kModelOptionAscend310OutputType);\n}\n\nvoid AscendDeviceInfo::SetBufferOptimizeMode(const std::vector<char> &buffer_optimize_mode) {\n  MS_EXCEPTION_IF_NULL(data_);\n  data_->params[kModelOptionAscend310BufferOptimize] = CharToString(buffer_optimize_mode);\n}\nstd::vector<char> AscendDeviceInfo::GetBufferOptimizeModeChar() const {\n  MS_EXCEPTION_IF_NULL(data_);\n  const std::string &ref = GetValue<std::string>(data_, kModelOptionAscend310BufferOptimize);\n  return StringToChar(ref);\n}\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/factory.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_CCSRC_CXX_API_FACTORY_H\n#define MINDSPORE_CCSRC_CXX_API_FACTORY_H\n#include <functional>\n#include <map>\n#include <string>\n#include <vector>\n#include <memory>\n#include <utility>\n#include \"utils/utils.h\"\n\nnamespace mindspore {\ninline enum DeviceType g_device_target = kInvalidDeviceType;\n\nstatic inline LogStream &operator<<(LogStream &stream, DeviceType device_type) {\n  std::map<DeviceType, std::string> type_str_map = {\n    {kAscend, \"Ascend\"}, {kAscend910, \"Ascend910\"}, {kAscend310, \"Ascend310\"}, {kGPU, \"GPU\"}, {kCPU, \"CPU\"}};\n  auto it = type_str_map.find(device_type);\n  if (it != type_str_map.end()) {\n    stream << it->second;\n  } else {\n    stream << \"[InvalidDeviceType: \" << static_cast<int>(device_type) << \"]\";\n  }\n  return stream;\n}\n\ntemplate <class T>\nclass Factory {\n  using U = std::function<std::shared_ptr<T>()>;\n\n public:\n  Factory(const Factory &) = delete;\n  void operator=(const Factory &) = delete;\n\n  static Factory &Instance() {\n    static Factory instance;\n    return instance;\n  }\n\n  void Register(U &&creator) { creators_.push_back(creator); }\n\n  std::shared_ptr<T> Create(enum DeviceType device_type) {\n    for (auto &item : creators_) {\n      MS_EXCEPTION_IF_NULL(item);\n      auto val = item();\n      if (val->CheckDeviceSupport(device_type)) {\n        return val;\n      }\n    }\n    MS_LOG(WARNING) << \"Unsupported device target \" << device_type;\n    return nullptr;\n  }\n\n private:\n  Factory() = default;\n  ~Factory() = default;\n  std::vector<U> creators_;\n};\n\ntemplate <class T>\nclass Registrar {\n  using U = std::function<std::shared_ptr<T>()>;\n\n public:\n  explicit Registrar(U creator) { Factory<T>::Instance().Register(std::move(creator)); }\n  ~Registrar() = default;\n};\n\n#define API_FACTORY_REG(BASE_CLASS, DERIVE_CLASS)                          \\\n  static const Registrar<BASE_CLASS> g_api_##DERIVE_CLASS##_registrar_reg( \\\n    []() { return std::make_shared<DERIVE_CLASS>(); });\n}  // namespace mindspore\n#endif  // MINDSPORE_CCSRC_CXX_API_FACTORY_H\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"cxx_api/graph/ascend/ascend_graph_impl.h\"\n#include <algorithm>\n#include \"include/api/context.h\"\n#include \"cxx_api/factory.h\"\n#include \"stub/graph_impl_stub.h\"\n\nnamespace mindspore {\nAPI_FACTORY_REG(GraphCell::GraphImpl, AscendGraphImpl);\n\nAscendGraphImpl::AscendGraphImpl() { graph_imp_stub_ = std::make_shared<GraphImplStubAdd>(); }\n\nAscendGraphImpl::~AscendGraphImpl() {}\n\nstd::vector<MSTensor> AscendGraphImpl::GetInputs() { return graph_imp_stub_->GetInputs(); }\n\nstd::vector<MSTensor> AscendGraphImpl::GetOutputs() { return graph_imp_stub_->GetOutputs(); }\n\nStatus AscendGraphImpl::Load(uint32_t device_id) {\n  graph_imp_stub_->SetGraph(graph_);\n  graph_imp_stub_->SetContext(graph_context_);\n  return graph_imp_stub_->Load(device_id);\n}\n\nStatus AscendGraphImpl::Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {\n  return graph_imp_stub_->Run(inputs, outputs);\n}\n\nbool AscendGraphImpl::CheckDeviceSupport(mindspore::DeviceType device_type) {\n  return graph_imp_stub_->CheckDeviceSupport(device_type);\n}\n\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/graph/ascend/ascend_graph_impl.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_CCSRC_CXX_API_GRAPH_MS_ASCEND_GRAPH_IMPL_H\n#define MINDSPORE_CCSRC_CXX_API_GRAPH_MS_ASCEND_GRAPH_IMPL_H\n#include <functional>\n#include <map>\n#include <string>\n#include <vector>\n#include <memory>\n#include <utility>\n#include \"include/api/status.h\"\n#include \"include/api/graph.h\"\n#include \"cxx_api/graph/graph_impl.h\"\n#include \"cxx_api/model/model_impl.h\"\n\nnamespace mindspore {\nclass AscendGraphImpl : public GraphCell::GraphImpl {\n public:\n  AscendGraphImpl();\n  ~AscendGraphImpl() override;\n\n  Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;\n  Status Load(uint32_t device_id) override;\n  std::vector<MSTensor> GetInputs() override;\n  std::vector<MSTensor> GetOutputs() override;\n  bool CheckDeviceSupport(mindspore::DeviceType device_type) override;\n private:\n  std::shared_ptr<GraphCell::GraphImpl> graph_imp_stub_;\n};\n\n}  // namespace mindspore\n#endif  // MINDSPORE_CCSRC_CXX_API_GRAPH_MS_ASCEND_GRAPH_IMPL_H\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/graph/graph.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"include/api/graph.h\"\n#include \"cxx_api/graph/graph_data.h\"\n#include \"utils/log_adapter.h\"\n\nnamespace mindspore {\nGraph::Graph() : graph_data_(nullptr) {}\n\nGraph::Graph(const std::shared_ptr<GraphData> &graph_data) : graph_data_(graph_data) {}\n\nGraph::Graph(std::shared_ptr<GraphData> &&graph_data) : graph_data_(graph_data) {}\n\nGraph::~Graph() {}\n\nGraph::Graph(std::nullptr_t) : graph_data_(nullptr) {}\n\nbool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; }\n\nbool Graph::operator!=(std::nullptr_t) const { return graph_data_ != nullptr; }\n\nModelType Graph::ModelType() const {\n  MS_EXCEPTION_IF_NULL(graph_data_);\n  return graph_data_->ModelType();\n}\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/graph/graph_data.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"cxx_api/graph/graph_data.h\"\n#include \"utils/log_adapter.h\"\n#ifdef ENABLE_ACL\n#include \"framework/common/helper/model_helper.h\"\n#endif\n\nnamespace mindspore {\nGraph::GraphData::GraphData(const FuncGraphPtr &func_graph, enum ModelType model_type)\n    : func_graph_(nullptr), om_data_(), model_type_(ModelType::kUnknownType) {\n  if (model_type != ModelType::kMindIR) {\n    MS_LOG(EXCEPTION) << \"Invalid ModelType \" << model_type;\n  }\n  func_graph_ = func_graph;\n  model_type_ = model_type;\n}\n\nGraph::GraphData::GraphData(const Buffer &om_data, enum ModelType model_type)\n    : func_graph_(nullptr), om_data_(om_data), model_type_(model_type) {\n  if (model_type_ != ModelType::kOM) {\n    MS_LOG(EXCEPTION) << \"Invalid ModelType \" << model_type_;\n  }\n\n#ifdef ENABLE_ACL\n  // check om\n  ge::ModelHelper helper;\n  ge::ModelData model_data;\n  model_data.model_data = om_data_.MutableData();\n  model_data.model_len = om_data_.DataSize();\n  ge::Status ret = helper.LoadRootModel(model_data);\n  if (ret != ge::SUCCESS) {\n    MS_LOG(EXCEPTION) << \"Invalid input data cannot parse to om.\";\n  }\n\n#else\n  MS_LOG(EXCEPTION) << \"Unsupported ModelType OM.\";\n#endif\n}\n\nGraph::GraphData::~GraphData() {}\n\nFuncGraphPtr Graph::GraphData::GetFuncGraph() const {\n  if (model_type_ != ModelType::kMindIR) {\n    MS_LOG(ERROR) << \"Invalid ModelType \" << model_type_;\n    return nullptr;\n  }\n\n  return func_graph_;\n}\n\nBuffer Graph::GraphData::GetOMData() const {\n  if (model_type_ != ModelType::kOM) {\n    MS_LOG(ERROR) << \"Invalid ModelType \" << model_type_;\n    return Buffer();\n  }\n\n  return om_data_;\n}\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/graph/graph_data.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_DATA_H\n#define MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_DATA_H\n\n#include <string>\n#include <vector>\n#include <map>\n#include <memory>\n#include \"include/api/graph.h\"\n#include \"include/api/types.h\"\n#include \"utils/utils.h\"\n\nnamespace mindspore {\nclass Graph::GraphData {\n public:\n  GraphData();\n\n  explicit GraphData(const FuncGraphPtr &func_graph, enum ModelType model_type = kMindIR);\n\n  GraphData(const Buffer &om_data, enum ModelType model_type);\n\n  ~GraphData();\n\n  enum ModelType ModelType() const { return model_type_; }\n\n  FuncGraphPtr GetFuncGraph() const;\n\n  Buffer GetOMData() const;\n\n private:\n  FuncGraphPtr func_graph_;\n  Buffer om_data_;\n  enum ModelType model_type_;\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_DATA_H\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/graph/graph_impl.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_IMPL_H\n#define MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_IMPL_H\n#include <functional>\n#include <map>\n#include <string>\n#include <vector>\n#include <memory>\n#include <utility>\n#include \"include/api/cell.h\"\n#include \"include/api/graph.h\"\n#include \"include/api/context.h\"\n#include \"cxx_api/graph/graph_data.h\"\n#include \"utils/utils.h\"\n\nnamespace mindspore {\nclass GraphCell::GraphImpl {\n public:\n  GraphImpl() : graph_(nullptr), graph_context_(nullptr) {}\n  virtual ~GraphImpl() = default;\n\n  std::shared_ptr<Graph::GraphData> &MutableGraphData() const { return graph_->graph_data_; }\n  void SetGraph(const std::shared_ptr<Graph> &graph) { graph_ = graph; }\n  void SetContext(const std::shared_ptr<Context> &context) { graph_context_ = context; }\n\n  virtual Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) = 0;\n  virtual Status Load(uint32_t device_id) = 0;\n\n  virtual std::vector<MSTensor> GetInputs() = 0;\n  virtual std::vector<MSTensor> GetOutputs() = 0;\n\n  virtual bool CheckDeviceSupport(mindspore::DeviceType device_type) = 0;\n\n protected:\n  std::shared_ptr<Graph> graph_;\n  std::shared_ptr<Context> graph_context_;\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_IMPL_H\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/model/model.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"include/api/model.h\"\n#include \"include/api/context.h\"\n#include \"include/api/serialization.h\"\n#include \"cxx_api/model/model_impl.h\"\n#include \"cxx_api/factory.h\"\n#include \"utils/utils.h\"\n\nnamespace mindspore {\nStatus Model::Build(GraphCell graph_cell, const std::shared_ptr<Context> &model_context,\n                    const std::shared_ptr<TrainCfg> &) {\n  if (graph_cell.GetGraph() == nullptr) {\n    MS_LOG(ERROR) << \"Invalid graph input.\";\n    return kMCInvalidInput;\n  }\n\n  if (model_context == nullptr) {\n    MS_LOG(ERROR) << \"Invalid model context.\";\n    return kMCInvalidInput;\n  }\n  auto &device_info = model_context->MutableDeviceInfo();\n  if (device_info.size() < 1) {\n    MS_LOG(ERROR) << \"Invalid model context, only single device info is supported.\";\n    return kMCInvalidInput;\n  }\n\n  auto device_target = device_info[0]->GetDeviceType();\n  impl_ = Factory<ModelImpl>::Instance().Create(device_target);\n  if (impl_ == nullptr) {\n    MS_LOG(ERROR) << \"Create session type \" << device_target << \" failed\";\n    return kMEFailed;\n  }\n\n  g_device_target = device_target;\n\n  impl_->SetGraph(std::make_shared<Graph>(*graph_cell.GetGraph()));\n  impl_->SetContext(model_context);\n\n  return impl_->Build();\n}\n\nStatus Model::Build(const std::vector<char> &model_path, ModelType model_type,\n                    const std::shared_ptr<Context> &model_context, const Key &dec_key, const std::string &dec_mode,\n                    const std::vector<char> &cropto_lib_path) {\n  mindspore::Graph graph;\n  auto status = mindspore::Serialization::Load(CharToString(model_path), model_type, &graph, dec_key, dec_mode);\n  if (!status.IsOk()) {\n    return status;\n  }\n  return Build(GraphCell(graph), model_context);\n}\n\nStatus Model::Build(const std::vector<char> &model_path, ModelType model_type,\n                    const std::shared_ptr<Context> &model_context) {\n  mindspore::Graph graph;\n  auto status = mindspore::Serialization::Load(CharToString(model_path), model_type, &graph);\n  if (!status.IsOk()) {\n    return status;\n  }\n  return Build(GraphCell(graph), model_context);\n}\n\nStatus Model::Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims) {\n  if (impl_ == nullptr) {\n    MS_LOG(ERROR) << \"Failed because this model has not been built.\";\n    return kMCFailed;\n  }\n  return impl_->Resize(inputs, dims);\n}\n\nStatus Model::Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs,\n                      const MSKernelCallBack &before, const MSKernelCallBack &after) {\n  if (impl_ == nullptr) {\n    MS_LOG(ERROR) << \"Failed because this model has not been built.\";\n    return kMCFailed;\n  }\n  return impl_->Predict(inputs, outputs);\n}\n\nstd::vector<MSTensor> Model::GetInputs() {\n  if (impl_ == nullptr) {\n    MS_LOG(ERROR) << \"Failed because this model has not been built.\";\n    return {};\n  }\n  return impl_->GetInputs();\n}\n\nstd::vector<MSTensor> Model::GetOutputs() {\n  if (impl_ == nullptr) {\n    MS_LOG(ERROR) << \"Failed because this model has not been built.\";\n    return {};\n  }\n  return impl_->GetOutputs();\n}\n\nMSTensor Model::GetInputByTensorName(const std::vector<char> &tensor_name) {\n  std::string tensor_name_str = CharToString(tensor_name);\n  auto inputs = GetInputs();\n  for (auto in : inputs) {\n    if (in.Name() == tensor_name_str) {\n      return in;\n    }\n  }\n\n  return MSTensor(nullptr);\n}\n\nstd::vector<std::vector<char>> Model::GetOutputTensorNamesChar() {\n  std::vector<std::vector<char>> ret;\n  auto outputs = GetOutputs();\n  std::transform(outputs.begin(), outputs.end(), std::back_inserter(ret),\n                 [](const MSTensor &item) -> std::vector<char> { return StringToChar(item.Name()); });\n  return ret;\n}\n\nMSTensor Model::GetOutputByTensorName(const std::vector<char> &tensor_name) {\n  std::string tensor_name_str = CharToString(tensor_name);\n  auto outputs = GetOutputs();\n  for (auto out : outputs) {\n    if (out.Name() == tensor_name_str) {\n      return out;\n    }\n  }\n\n  return MSTensor(nullptr);\n}\n\nstd::vector<MSTensor> Model::GetOutputsByNodeName(const std::vector<char> &node_name) {\n  return std::vector<MSTensor>{GetOutputByTensorName(node_name)};\n}\n\nModel::Model() : impl_(nullptr) {}\nModel::~Model() {}\n\nbool Model::CheckModelSupport(enum DeviceType device_type, ModelType model_type) {\n  auto check_model = Factory<ModelImpl>::Instance().Create(device_type);\n  if (check_model == nullptr) {\n    return false;\n  }\n  return check_model->CheckModelSupport(model_type);\n}\n\nStatus Model::LoadConfig(const std::vector<char> &config_path) {\n  if (common::DirOrFileExist(CharToString(config_path))) {\n    return kSuccess;\n  }\n  MS_LOG(ERROR) << \"The config file path: \" << CharToString(config_path) << \" doesn't exist\";\n  return kMCFailed;\n}\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/model/model_impl.cc",
    "content": "/**\n * Copyright 2020-2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"cxx_api/model/model_impl.h\"\n\nnamespace mindspore {\nStatus ModelImpl::Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {\n  MS_EXCEPTION_IF_NULL(outputs);\n  if (graph_ == nullptr) {\n    MS_LOG(ERROR) << \"Invalid data, graph_ is null.\";\n    return kMCFailed;\n  }\n\n  if (graph_cell_ == nullptr) {\n    MS_LOG(WARNING) << \"Model has not been built, it will be built with default options\";\n    Status ret = Build();\n    if (ret != kSuccess) {\n      MS_LOG(ERROR) << \"Build model failed.\";\n      return ret;\n    }\n  }\n\n  MS_EXCEPTION_IF_NULL(graph_cell_);\n  Status ret = graph_cell_->Run(inputs, outputs);\n  if (ret != kSuccess) {\n    MS_LOG(ERROR) << \"Run graph failed.\";\n    return ret;\n  }\n\n  return kSuccess;\n}\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/model/model_impl.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_CCSRC_CXX_API_MODEL_MODEL_IMPL_H\n#define MINDSPORE_CCSRC_CXX_API_MODEL_MODEL_IMPL_H\n#include <functional>\n#include <map>\n#include <string>\n#include <vector>\n#include <memory>\n#include <utility>\n#include \"include/api/context.h\"\n#include \"include/api/model.h\"\n#include \"include/api/graph.h\"\n#include \"cxx_api/graph/graph_data.h\"\n#include \"utils/utils.h\"\n\nnamespace mindspore {\nclass ModelImpl {\n public:\n  ModelImpl() = default;\n  virtual ~ModelImpl() = default;\n\n  virtual Status Build() = 0;\n  virtual Status Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims) = 0;\n\n  virtual Status Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs);\n\n  virtual std::vector<MSTensor> GetInputs() = 0;\n  virtual std::vector<MSTensor> GetOutputs() = 0;\n\n  virtual bool CheckDeviceSupport(mindspore::DeviceType device_type) = 0;\n  virtual bool CheckModelSupport(enum ModelType model_type) = 0;\n\n protected:\n  FuncGraphPtr GetFuncGraph() const {\n    if (graph_->ModelType() != ModelType::kMindIR) {\n      return nullptr;\n    }\n\n    auto graph_data = graph_->graph_data_;\n    MS_EXCEPTION_IF_NULL(graph_data);\n    return graph_data->GetFuncGraph();\n  }\n\n  std::shared_ptr<Graph> graph_ = nullptr;\n  std::shared_ptr<GraphCell> graph_cell_ = nullptr;\n  std::shared_ptr<Context> model_context_ = nullptr;\n\n private:\n  friend class Model;\n  void SetGraph(const std::shared_ptr<Graph> &graph) { graph_ = graph; }\n  void SetContext(const std::shared_ptr<Context> &model_context) {\n    if (model_context != nullptr) {\n      model_context_ = std::make_shared<Context>(*model_context);\n    }\n  }\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_CCSRC_CXX_API_MODEL_MODEL_IMPL_H\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/model/ms/ms_model.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"cxx_api/model/ms/ms_model.h\"\n#include <memory>\n#include <set>\n#include \"include/api/context.h\"\n#include \"cxx_api/factory.h\"\n\nnamespace mindspore {\n// mindspore-serving check current package for version check with ModelImpl factory.\nAPI_FACTORY_REG(ModelImpl, MsModel);\n\nstatic std::string GenerateShapeKey(const std::vector<std::vector<int64_t>> &dims) {\n  std::string shape_key;\n  for (size_t i = 0; i < dims.size(); ++i) {\n    shape_key += std::to_string(i) + \":\";\n    for (size_t j = 0; j < dims[i].size(); ++j) {\n      shape_key += std::to_string(dims[i][j]);\n      if (j + 1 < dims[i].size()) {\n        shape_key += \",\";\n      }\n    }\n    if (i + 1 < dims.size()) {\n      shape_key += \";\";\n    }\n  }\n  return shape_key;\n}\n\nstd::shared_ptr<GraphCell> MsModel::GenerateGraphCell(const std::vector<std::vector<int64_t>> &dims) {\n  std::string shape_key = GenerateShapeKey(dims);\n  if (auto iter = dynamic_size_graph_map_.find(shape_key); iter != dynamic_size_graph_map_.end()) {\n    MS_LOG(INFO) << \"This options has been built, read cache.\";\n    return iter->second;\n  }\n\n  auto func_graph = ModelImpl::GetFuncGraph();\n  MS_EXCEPTION_IF_NULL(func_graph);\n\n  auto graph = std::make_shared<Graph>(std::make_shared<Graph::GraphData>(func_graph, ModelType::kMindIR));\n  MS_EXCEPTION_IF_NULL(graph);\n  auto graph_cell = std::make_shared<GraphCell>(graph);\n  MS_EXCEPTION_IF_NULL(graph_cell);\n  graph_cell->SetContext(model_context_);\n  auto ret = graph_cell->Load(GetDeviceID());\n  if (ret != kSuccess) {\n    MS_LOG(ERROR) << \"Load failed.\";\n    return nullptr;\n  }\n  dynamic_size_graph_map_[shape_key] = graph_cell;\n  return graph_cell;\n}\n\nStatus MsModel::Build() {\n  MS_LOG(INFO) << \"Start build model.\";\n  MS_EXCEPTION_IF_NULL(graph_);\n\n  if (graph_cell_ != nullptr) {\n    MS_LOG(INFO) << \"This model has been built, skip.\";\n    return kSuccess;\n  }\n\n  auto func_graph = ModelImpl::GetFuncGraph();\n  MS_EXCEPTION_IF_NULL(func_graph);\n\n  auto graph = std::make_shared<Graph>(std::make_shared<Graph::GraphData>(func_graph, ModelType::kMindIR));\n  MS_EXCEPTION_IF_NULL(graph);\n  auto graph_cell = std::make_shared<GraphCell>(graph);\n  MS_EXCEPTION_IF_NULL(graph_cell);\n  graph_cell->SetContext(model_context_);\n  auto ret = graph_cell->Load(GetDeviceID());\n  if (ret != kSuccess) {\n    MS_LOG(ERROR) << \"Load failed.\";\n    return ret;\n  }\n\n  // save result\n  graph_cell_ = graph_cell;\n  MS_LOG(INFO) << \"Build model success.\";\n  return kSuccess;\n}\n\nStatus MsModel::Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims) {\n  MS_LOG(INFO) << \"Start to resize model\";\n  auto origin_inputs = GetInputs();\n  if (inputs.size() != origin_inputs.size()) {\n    MS_LOG(ERROR) << \"Invalid inputs size \" << inputs.size() << \" not match model inputs size \" << origin_inputs.size();\n    return kMCInvalidInput;\n  }\n\n  if (inputs.size() != dims.size()) {\n    MS_LOG(ERROR) << \"Invalid dims size \" << dims.size() << \" not match inputs size \" << inputs.size();\n    return kMCInvalidInput;\n  }\n\n  auto graph_cell = GenerateGraphCell(dims);\n  if (graph_cell == nullptr) {\n    MS_LOG(ERROR) << \"GenerateGraphCell failed.\";\n    return kMCFailed;\n  }\n\n  MS_LOG(INFO) << \"Resize model success.\";\n  graph_cell_ = std::move(graph_cell);\n  return kSuccess;\n}\n\nstd::vector<MSTensor> MsModel::GetInputs() {\n  MS_EXCEPTION_IF_NULL(graph_cell_);\n  return graph_cell_->GetInputs();\n}\n\nstd::vector<MSTensor> MsModel::GetOutputs() {\n  MS_EXCEPTION_IF_NULL(graph_cell_);\n  return graph_cell_->GetOutputs();\n}\n\nuint32_t MsModel::GetDeviceID() const {\n  if (model_context_ == nullptr) {\n    return 0;\n  }\n\n  auto &device_infos = model_context_->MutableDeviceInfo();\n  if (device_infos.size() != 1) {\n    return 0;\n  }\n\n  auto ascend910_info = device_infos[0]->Cast<Ascend910DeviceInfo>();\n  if (ascend910_info != nullptr) {\n    return ascend910_info->GetDeviceID();\n  }\n\n  auto gpu_info = device_infos[0]->Cast<GPUDeviceInfo>();\n  if (gpu_info != nullptr) {\n    return gpu_info->GetDeviceID();\n  }\n\n  return 0;\n}\n\nbool MsModel::CheckDeviceSupport(enum DeviceType device_type) {\n  const char *cpu_value = ::getenv(\"SERVING_ENABLE_CPU_DEVICE\");\n  const char *gpu_value = ::getenv(\"SERVING_ENABLE_GPU_DEVICE\");\n  auto enable_cpu = cpu_value && std::string(cpu_value) == \"1\";\n  auto enable_gpu = gpu_value && std::string(gpu_value) == \"1\";\n  if (device_type == kCPU) {\n    return enable_cpu;\n  } else if (device_type == kGPU) {\n    return enable_gpu;\n  }\n  return !enable_cpu && !enable_gpu;\n}\n\nbool MsModel::CheckModelSupport(mindspore::ModelType model_type) {\n  static const std::set<ModelType> kSupportedModelMap = {kMindIR};\n  auto iter = kSupportedModelMap.find(model_type);\n  if (iter == kSupportedModelMap.end()) {\n    return false;\n  }\n  return true;\n}\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/model/ms/ms_model.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_CCSRC_SESSION_SESSION_H\n#define MINDSPORE_CCSRC_SESSION_SESSION_H\n\n#include <vector>\n#include <string>\n#include <unordered_map>\n#include <utility>\n#include <memory>\n#include <map>\n\n#include \"include/api/status.h\"\n#include \"cxx_api/model/model_impl.h\"\n\nnamespace mindspore {\nclass MsModel : public ModelImpl {\n public:\n  MsModel() {}\n  ~MsModel() = default;\n\n  Status Build() override;\n  Status Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims) override;\n\n  std::vector<MSTensor> GetInputs() override;\n  std::vector<MSTensor> GetOutputs() override;\n\n  bool CheckDeviceSupport(mindspore::DeviceType device_type) override;\n  bool CheckModelSupport(enum ModelType model_type) override;\n\n private:\n  std::shared_ptr<GraphCell> GenerateGraphCell(const std::vector<std::vector<int64_t>> &dims);\n  uint32_t GetDeviceID() const;\n\n  std::map<std::string, std::shared_ptr<GraphCell>> dynamic_size_graph_map_;\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_CCSRC_SESSION_SESSION_BASIC_H\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/serialization.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"include/api/serialization.h\"\n#include <fstream>\n#include <sstream>\n#include \"cxx_api/graph/graph_data.h\"\n#include \"utils/log_adapter.h\"\n\nnamespace mindspore {\nstatic Status RealPath(const std::string &file, std::string *realpath_str) {\n  MS_EXCEPTION_IF_NULL(realpath_str);\n  char real_path_mem[PATH_MAX] = {0};\n  char *real_path_ret = nullptr;\n#if defined(_WIN32) || defined(_WIN64)\n  real_path_ret = _fullpath(real_path_mem, common::SafeCStr(file), PATH_MAX);\n#else\n  real_path_ret = realpath(common::SafeCStr(file), real_path_mem);\n#endif\n  if (real_path_ret == nullptr) {\n    return Status(kMEInvalidInput, \"File: \" + file + \" does not exist.\");\n  }\n  *realpath_str = real_path_mem;\n  return kSuccess;\n}\n\nstatic Buffer ReadFile(const std::string &file) {\n  Buffer buffer;\n  if (file.empty()) {\n    MS_LOG(ERROR) << \"Pointer file is nullptr\";\n    return buffer;\n  }\n\n  std::string real_path;\n  auto status = RealPath(file, &real_path);\n  if (status != kSuccess) {\n    MS_LOG(ERROR) << status.GetErrDescription();\n    return buffer;\n  }\n\n  std::ifstream ifs(real_path);\n  if (!ifs.good()) {\n    MS_LOG(ERROR) << \"File: \" << real_path << \" does not exist\";\n    return buffer;\n  }\n\n  if (!ifs.is_open()) {\n    MS_LOG(ERROR) << \"File: \" << real_path << \" open failed\";\n    return buffer;\n  }\n\n  (void)ifs.seekg(0, std::ios::end);\n  size_t size = static_cast<size_t>(ifs.tellg());\n  buffer.ResizeData(size);\n  if (buffer.DataSize() != size) {\n    MS_LOG(ERROR) << \"Malloc buf failed, file: \" << real_path;\n    ifs.close();\n    return buffer;\n  }\n\n  (void)ifs.seekg(0, std::ios::beg);\n  (void)ifs.read(reinterpret_cast<char *>(buffer.MutableData()), static_cast<std::streamsize>(size));\n  ifs.close();\n\n  return buffer;\n}\n\nKey::Key(const char *dec_key, size_t key_len) {\n  len = 0;\n  if (key_len >= max_key_len) {\n    MS_LOG(ERROR) << \"Invalid key len \" << key_len << \" is more than max key len \" << max_key_len;\n    return;\n  }\n\n  auto sec_ret = memcpy_s(key, max_key_len, dec_key, key_len);\n  if (sec_ret != EOK) {\n    MS_LOG(ERROR) << \"memcpy_s failed, src_len = \" << key_len << \", dst_len = \" << max_key_len << \", ret = \" << sec_ret;\n    return;\n  }\n\n  len = key_len;\n}\n\nStatus Serialization::Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph,\n                           const Key &dec_key, const std::vector<char> &dec_mode) {\n  std::stringstream err_msg;\n  if (graph == nullptr) {\n    err_msg << \"Output args graph is nullptr.\";\n    MS_LOG(ERROR) << err_msg.str();\n    return Status(kMEInvalidInput, err_msg.str());\n  }\n\n  if (model_type == kMindIR) {\n    FuncGraphPtr anf_graph = nullptr;\n    try {\n      if (dec_key.len > dec_key.max_key_len) {\n        err_msg << \"The key length exceeds maximum length: \" << dec_key.max_key_len;\n        MS_LOG(ERROR) << err_msg.str();\n        return Status(kMEInvalidInput, err_msg.str());\n      } else if (dec_key.len == 0) {\n        if (IsCipherFile(reinterpret_cast<const unsigned char *>(model_data))) {\n          err_msg << \"Load model failed. The model_data may be encrypted, please pass in correct key.\";\n          MS_LOG(ERROR) << err_msg.str();\n          return Status(kMEInvalidInput, err_msg.str());\n        } else {\n          anf_graph = ConvertStreamToFuncGraph(reinterpret_cast<const char *>(model_data), data_size);\n        }\n      } else {\n        size_t plain_data_size;\n        auto plain_data = mindspore::Decrypt(&plain_data_size, reinterpret_cast<const unsigned char *>(model_data),\n                                             data_size, dec_key.key, dec_key.len, CharToString(dec_mode));\n        if (plain_data == nullptr) {\n          err_msg << \"Load model failed. Please check the valid of dec_key and dec_mode.\";\n          MS_LOG(ERROR) << err_msg.str();\n          return Status(kMEInvalidInput, err_msg.str());\n        }\n        anf_graph = ConvertStreamToFuncGraph(reinterpret_cast<const char *>(plain_data.get()), plain_data_size);\n      }\n    } catch (const std::exception &) {\n      err_msg << \"Load model failed. Please check the valid of dec_key and dec_mode.\";\n      MS_LOG(ERROR) << err_msg.str();\n      return Status(kMEInvalidInput, err_msg.str());\n    }\n\n    *graph = Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));\n    return kSuccess;\n  } else if (model_type == kOM) {\n    *graph = Graph(std::make_shared<Graph::GraphData>(Buffer(model_data, data_size), kOM));\n    return kSuccess;\n  }\n\n  err_msg << \"Unsupported ModelType \" << model_type;\n  MS_LOG(ERROR) << err_msg.str();\n  return Status(kMEInvalidInput, err_msg.str());\n}\n\nStatus Serialization::Load(const std::vector<char> &file, ModelType model_type, Graph *graph) {\n  return Load(file, model_type, graph, Key{}, StringToChar(kDecModeAesGcm));\n}\n\nStatus Serialization::Load(const std::vector<char> &file, ModelType model_type, Graph *graph, const Key &dec_key,\n                           const std::vector<char> &dec_mode) {\n  std::stringstream err_msg;\n  if (graph == nullptr) {\n    MS_LOG(ERROR) << \"Output args graph is nullptr.\";\n    return Status(kMEInvalidInput, \"Output args graph is nullptr.\");\n  }\n\n  std::string file_path;\n  auto status = RealPath(CharToString(file), &file_path);\n  if (status != kSuccess) {\n    MS_LOG(ERROR) << status.GetErrDescription();\n    return status;\n  }\n\n  if (model_type == kMindIR || model_type ==  kMindIR_Lite) {\n    FuncGraphPtr anf_graph;\n    if (dec_key.len > dec_key.max_key_len) {\n      err_msg << \"The key length exceeds maximum length: \" << dec_key.max_key_len;\n      MS_LOG(ERROR) << err_msg.str();\n      return Status(kMEInvalidInput, err_msg.str());\n    } else if (dec_key.len == 0 && IsCipherFile(file_path)) {\n      err_msg << \"Load model failed. The file may be encrypted, please pass in correct key.\";\n      MS_LOG(ERROR) << err_msg.str();\n      return Status(kMEInvalidInput, err_msg.str());\n    } else {\n      anf_graph =\n        LoadMindIR(file_path, false, dec_key.len == 0 ? nullptr : dec_key.key, dec_key.len, CharToString(dec_mode));\n    }\n    if (anf_graph == nullptr) {\n      err_msg << \"Load model failed.\";\n      MS_LOG(ERROR) << err_msg.str();\n      return Status(kMEInvalidInput, err_msg.str());\n    }\n    *graph = Graph(std::make_shared<Graph::GraphData>(anf_graph, kMindIR));\n    return kSuccess;\n  } else if (model_type == kOM) {\n    Buffer data = ReadFile(file_path);\n    if (data.Data() == nullptr) {\n      err_msg << \"Read file \" << file_path << \" failed.\";\n      MS_LOG(ERROR) << err_msg.str();\n      return Status(kMEInvalidInput, err_msg.str());\n    }\n    *graph = Graph(std::make_shared<Graph::GraphData>(data, kOM));\n    return kSuccess;\n  }\n\n  err_msg << \"Unsupported ModelType \" << model_type;\n  MS_LOG(ERROR) << err_msg.str();\n  return Status(kMEInvalidInput, err_msg.str());\n}\n\nStatus Serialization::Load(const std::vector<std::vector<char>> &files, ModelType model_type,\n                           std::vector<Graph> *graphs, const Key &dec_key, const std::vector<char> &dec_mode) {\n  std::stringstream err_msg;\n  if (graphs == nullptr) {\n    MS_LOG(ERROR) << \"Output args graph is nullptr.\";\n    return Status(kMEInvalidInput, \"Output args graph is nullptr.\");\n  }\n\n  if (files.size() == 1) {\n    std::vector<Graph> result(files.size());\n    auto ret = Load(files[0], model_type, &result[0], dec_key, dec_mode);\n    *graphs = std::move(result);\n    return ret;\n  }\n\n  std::vector<std::string> files_path;\n  for (const auto &file : files) {\n    std::string file_path;\n    auto status = RealPath(CharToString(file), &file_path);\n    if (status != kSuccess) {\n      MS_LOG(ERROR) << status.GetErrDescription();\n      return status;\n    }\n    files_path.emplace_back(std::move(file_path));\n  }\n\n  if (model_type == kMindIR) {\n    if (dec_key.len > dec_key.max_key_len) {\n      err_msg << \"The key length exceeds maximum length: \" << dec_key.max_key_len;\n      MS_LOG(ERROR) << err_msg.str();\n      return Status(kMEInvalidInput, err_msg.str());\n    }\n    auto anf_graphs =\n      LoadMindIRs(files_path, false, dec_key.len == 0 ? nullptr : dec_key.key, dec_key.len, CharToString(dec_mode));\n    if (anf_graphs.size() != files_path.size()) {\n      err_msg << \"Load model failed, \" << files_path.size() << \" files got \" << anf_graphs.size() << \" graphs.\";\n      MS_LOG(ERROR) << err_msg.str();\n      return Status(kMEInvalidInput, err_msg.str());\n    }\n    std::vector<Graph> results;\n    for (size_t i = 0; i < anf_graphs.size(); ++i) {\n      if (anf_graphs[i] == nullptr) {\n        if (dec_key.len == 0 && IsCipherFile(files_path[i])) {\n          err_msg << \"Load model failed. The file \" << files_path[i] << \" be encrypted, please pass in correct key.\";\n        } else {\n          err_msg << \"Load model \" << files_path[i] << \" failed.\";\n        }\n        MS_LOG(ERROR) << err_msg.str();\n        return Status(kMEInvalidInput, err_msg.str());\n      }\n      results.emplace_back(std::make_shared<Graph::GraphData>(anf_graphs[i], kMindIR));\n    }\n\n    *graphs = std::move(results);\n    return kSuccess;\n  }\n\n  err_msg << \"Unsupported ModelType \" << model_type;\n  MS_LOG(ERROR) << err_msg.str();\n  return Status(kMEInvalidInput, err_msg.str());\n}\n\nStatus Serialization::SetParameters(const std::map<std::string, Buffer> &, Model *) {\n  MS_LOG(ERROR) << \"Unsupported feature.\";\n  return kMEFailed;\n}\n\nStatus Serialization::ExportModel(const Model &, ModelType, Buffer *) {\n  MS_LOG(ERROR) << \"Unsupported feature.\";\n  return kMEFailed;\n}\n\nStatus Serialization::ExportModel(const Model &, ModelType, const std::vector<char> &, QuantizationType, bool,\n                                  const std::vector<std::vector<char>> &output_tensor_name) {\n  MS_LOG(ERROR) << \"Unsupported feature.\";\n  return kMEFailed;\n}\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/status.cc",
    "content": "/**\n * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).\n *\n * Copyright 2019 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"include/api/status.h\"\n#ifndef ENABLE_ANDROID\n#include <thread>\n#endif\n#include <map>\n#include <sstream>\n\nnamespace mindspore {\nstruct Status::Data {\n  enum StatusCode status_code = kSuccess;\n  std::string status_msg;\n  int line_of_code = -1;\n  std::string file_name;\n  std::string err_description;\n};\n\nStatus::Status() : data_(std::make_shared<Data>()) {}\n\nStatus::Status(enum StatusCode status_code, const std::vector<char> &status_msg) : data_(std::make_shared<Data>()) {\n  if (data_ == nullptr) {\n    return;\n  }\n\n  data_->status_msg = CharToString(status_msg);\n  data_->status_code = status_code;\n}\n\nStatus::Status(enum StatusCode code, int line_of_code, const char *file_name, const std::vector<char> &extra)\n    : data_(std::make_shared<Data>()) {\n  if (data_ == nullptr) {\n    return;\n  }\n  data_->status_code = code;\n  data_->line_of_code = line_of_code;\n  if (file_name != nullptr) {\n    data_->file_name = file_name;\n  }\n  data_->err_description = CharToString(extra);\n\n  std::ostringstream ss;\n#ifndef ENABLE_ANDROID\n  ss << \"Thread ID \" << std::this_thread::get_id() << \" \" << CodeAsString(code) << \". \";\n  if (!data_->err_description.empty()) {\n    ss << data_->err_description;\n  }\n  ss << \"\\n\";\n#endif\n\n  ss << \"Line of code : \" << line_of_code << \"\\n\";\n  if (file_name != nullptr) {\n    ss << \"File         : \" << file_name << \"\\n\";\n  }\n  data_->status_msg = ss.str();\n}\n\nenum StatusCode Status::StatusCode() const {\n  if (data_ == nullptr) {\n    return kSuccess;\n  }\n  return data_->status_code;\n}\n\nstd::vector<char> Status::ToCString() const {\n  if (data_ == nullptr) {\n    return std::vector<char>();\n  }\n  return StringToChar(data_->status_msg);\n}\n\nint Status::GetLineOfCode() const {\n  if (data_ == nullptr) {\n    return -1;\n  }\n  return data_->line_of_code;\n}\n\nstd::vector<char> Status::GetErrDescriptionChar() const {\n  if (data_ == nullptr) {\n    return std::vector<char>();\n  }\n  return StringToChar(data_->status_msg);\n}\n\nstd::vector<char> Status::CodeAsCString(enum StatusCode c) {\n  static std::map<enum StatusCode, std::string> info_map = {{kSuccess, \"No error occurs.\"},\n                                                            // Core\n                                                            {kCoreFailed, \"Common error code.\"},\n                                                            // MD\n                                                            {kMDOutOfMemory, \"Out of memory\"},\n                                                            {kMDShapeMisMatch, \"Shape is incorrect\"},\n                                                            {kMDInterrupted, \"Interrupted system call\"},\n                                                            {kMDNoSpace, \"No space left on device\"},\n                                                            {kMDPyFuncException, \"Exception thrown from PyFunc\"},\n                                                            {kMDDuplicateKey, \"Duplicate key\"},\n                                                            {kMDPythonInterpreterFailure, \"\"},\n                                                            {kMDTDTPushFailure, \"Unexpected error\"},\n                                                            {kMDFileNotExist, \"Unexpected error\"},\n                                                            {kMDProfilingError, \"Error encountered while profiling\"},\n                                                            {kMDBoundingBoxOutOfBounds, \"Unexpected error\"},\n                                                            {kMDBoundingBoxInvalidShape, \"Unexpected error\"},\n                                                            {kMDSyntaxError, \"Syntax error\"},\n                                                            {kMDTimeOut, \"Unexpected error\"},\n                                                            {kMDBuddySpaceFull, \"BuddySpace full\"},\n                                                            {kMDNetWorkError, \"Network error\"},\n                                                            {kMDNotImplementedYet, \"Unexpected error\"},\n                                                            {kMDUnexpectedError, \"Unexpected error\"},\n                                                            // ME\n                                                            {kMEFailed, \"Common error code.\"},\n                                                            {kMEInvalidInput, \"Invalid input.\"},\n                                                            // MC\n                                                            {kMCFailed, \"Common error code.\"},\n                                                            {kMCDeviceError, \"Device error.\"},\n                                                            {kMCInvalidInput, \"Invalid input.\"},\n                                                            {kMCInvalidArgs, \"Invalid arguments.\"},\n                                                            // Lite\n                                                            {kLiteError, \"Common error code.\"},\n                                                            {kLiteNullptr, \"NULL pointer returned.\"},\n                                                            {kLiteParamInvalid, \"Invalid parameter.\"},\n                                                            {kLiteNoChange, \"No change.\"},\n                                                            {kLiteSuccessExit, \"No error but exit.\"},\n                                                            {kLiteMemoryFailed, \"Fail to create memory.\"},\n                                                            {kLiteNotSupport, \"Fail to support.\"},\n                                                            {kLiteThreadPoolError, \"Thread pool error.\"},\n                                                            {kLiteOutOfTensorRange, \"Failed to check range.\"},\n                                                            {kLiteInputTensorError, \"Failed to check input tensor.\"},\n                                                            {kLiteReentrantError, \"Exist executor running.\"},\n                                                            {kLiteGraphFileError, \"Failed to verify graph file.\"},\n                                                            {kLiteNotFindOp, \"Failed to find operator.\"},\n                                                            {kLiteInvalidOpName, \"Invalid operator name.\"},\n                                                            {kLiteInvalidOpAttr, \"Invalid operator attr.\"},\n                                                            {kLiteOpExecuteFailure, \"Failed to execution operator.\"},\n                                                            {kLiteFormatError, \"Failed to checking tensor format.\"},\n                                                            {kLiteInferError, \"Failed to infer shape.\"},\n                                                            {kLiteInferInvalid, \"Invalid infer shape before runtime.\"},\n                                                            {kLiteInputParamInvalid, \"Invalid input param by user.\"}};\n  auto iter = info_map.find(c);\n  return StringToChar(iter == info_map.end() ? \"Unknown error\" : iter->second);\n}\n\nstd::ostream &operator<<(std::ostream &os, const Status &s) {\n  os << s.ToString();\n  return os;\n}\n\nstd::vector<char> Status::SetErrDescription(const std::vector<char> &err_description) {\n  if (data_ == nullptr) {\n    return std::vector<char>();\n  }\n  data_->err_description = CharToString(err_description);\n  std::ostringstream ss;\n#ifndef ENABLE_ANDROID\n  ss << \"Thread ID \" << std::this_thread::get_id() << \" \" << CodeAsString(data_->status_code) << \". \";\n  if (!data_->err_description.empty()) {\n    ss << data_->err_description;\n  }\n  ss << \"\\n\";\n#endif\n\n  if (data_->line_of_code > 0 && !data_->file_name.empty()) {\n    ss << \"Line of code : \" << data_->line_of_code << \"\\n\";\n    ss << \"File         : \" << data_->file_name << \"\\n\";\n  }\n  data_->status_msg = ss.str();\n  return StringToChar(data_->status_msg);\n}\n\nbool Status::operator==(const Status &other) const {\n  if (data_ == nullptr && other.data_ == nullptr) {\n    return true;\n  }\n\n  if (data_ == nullptr || other.data_ == nullptr) {\n    return false;\n  }\n\n  return data_->status_code == other.data_->status_code;\n}\n\nbool Status::operator==(enum StatusCode other_code) const { return StatusCode() == other_code; }\nbool Status::operator!=(const Status &other) const { return !operator==(other); }\nbool Status::operator!=(enum StatusCode other_code) const { return !operator==(other_code); }\n\nStatus::operator bool() const { return (StatusCode() == kSuccess); }\nStatus::operator int() const { return static_cast<int>(StatusCode()); }\n\nStatus Status::OK() { return StatusCode::kSuccess; }\nbool Status::IsOk() const { return (StatusCode() == StatusCode::kSuccess); }\nbool Status::IsError() const { return !IsOk(); }\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/cxx_api/types.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"include/api/types.h\"\n#include <fstream>\n#include <numeric>\n#include \"securec/include/securec.h\"\n#include \"utils/utils.h\"\n\nnamespace mindspore {\nclass Buffer::Impl {\n public:\n  Impl() : data_() {}\n  ~Impl() = default;\n  Impl(const void *data, size_t data_len) {\n    if (data != nullptr) {\n      (void)SetData(data, data_len);\n    } else {\n      ResizeData(data_len);\n    }\n  }\n\n  const void *Data() const { return data_.data(); }\n  void *MutableData() { return data_.data(); }\n  size_t DataSize() const { return data_.size(); }\n\n  bool ResizeData(size_t data_len) {\n    data_.resize(data_len);\n    return true;\n  }\n\n  bool SetData(const void *data, size_t data_len) {\n    ResizeData(data_len);\n    if (DataSize() != data_len) {\n      MS_LOG(ERROR) << \"Set data failed, tensor current data size \" << DataSize() << \" not match data len \" << data_len;\n      return false;\n    }\n\n    if (data == nullptr) {\n      return data_len == 0;\n    }\n\n    if (MutableData() == nullptr) {\n      MS_LOG(ERROR) << \"Set data failed, data len \" << data_len;\n      return false;\n    }\n\n    auto ret = memcpy_s(MutableData(), DataSize(), data, data_len);\n    if (ret != 0) {\n      MS_LOG(ERROR) << \"Set data memcpy_s failed, ret = \" << ret;\n      return false;\n    }\n    return true;\n  }\n\n protected:\n  std::vector<uint8_t> data_;\n};\n\nclass TensorDefaultImpl : public MSTensor::Impl {\n public:\n  TensorDefaultImpl() : buffer_(), name_(), type_(DataType::kTypeUnknown), shape_() {}\n  ~TensorDefaultImpl() override = default;\n  TensorDefaultImpl(const std::string &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,\n                    size_t data_len)\n      : buffer_(data, data_len), name_(name), type_(type), shape_(shape) {}\n\n  const std::string &Name() const override { return name_; }\n  enum DataType DataType() const override { return type_; }\n  const std::vector<int64_t> &Shape() const override { return shape_; }\n\n  std::shared_ptr<const void> Data() const override {\n    return std::shared_ptr<const void>(buffer_.Data(), [](const void *) {});\n  }\n\n  void *MutableData() override { return buffer_.MutableData(); }\n  size_t DataSize() const override { return buffer_.DataSize(); }\n\n  bool IsDevice() const override { return false; }\n\n  std::shared_ptr<Impl> Clone() const override {\n    return std::make_shared<TensorDefaultImpl>(name_, type_, shape_, buffer_.Data(), buffer_.DataSize());\n  }\n\n private:\n  Buffer buffer_;\n  std::string name_;\n  enum DataType type_;\n  std::vector<int64_t> shape_;\n};\n\nclass TensorReferenceImpl : public MSTensor::Impl {\n public:\n  TensorReferenceImpl()\n      : data_(nullptr), data_size_(0), name_(), type_(DataType::kTypeUnknown), shape_(), is_device_(false) {}\n  ~TensorReferenceImpl() override = default;\n  TensorReferenceImpl(const std::string &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,\n                      size_t data_len, bool is_device)\n      : data_(data), data_size_(data_len), name_(name), type_(type), shape_(shape), is_device_(is_device) {}\n\n  const std::string &Name() const override { return name_; }\n  enum DataType DataType() const override { return type_; }\n  const std::vector<int64_t> &Shape() const override { return shape_; }\n\n  std::shared_ptr<const void> Data() const override {\n    return std::shared_ptr<const void>(data_, [](const void *) {});\n  }\n\n  void *MutableData() override { return const_cast<void *>(data_); }\n  size_t DataSize() const override { return data_size_; }\n\n  bool IsDevice() const override { return is_device_; }\n\n  std::shared_ptr<Impl> Clone() const override {\n    return std::make_shared<TensorReferenceImpl>(name_, type_, shape_, data_, data_size_, is_device_);\n  }\n\n protected:\n  const void *data_;\n  size_t data_size_;\n  std::string name_;\n  enum DataType type_;\n  std::vector<int64_t> shape_;\n  bool is_device_;\n};\n\nMSTensor *MSTensor::CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,\n                                 const void *data, size_t data_len) noexcept {\n  std::string name_str = CharToString(name);\n  try {\n    std::shared_ptr<Impl> impl = std::make_shared<TensorDefaultImpl>(name_str, type, shape, data, data_len);\n    MSTensor *ret = new MSTensor(impl);\n    return ret;\n  } catch (const std::bad_alloc &) {\n    MS_LOG(ERROR) << \"Malloc memory failed.\";\n    return nullptr;\n  } catch (...) {\n    MS_LOG(ERROR) << \"Unknown error occurred.\";\n    return nullptr;\n  }\n}\n\nMSTensor *MSTensor::CreateRefTensor(const std::vector<char> &name, enum DataType type,\n                                    const std::vector<int64_t> &shape, const void *data, size_t data_len,\n                                    bool) noexcept {\n  std::string name_str = CharToString(name);\n  try {\n    std::shared_ptr<Impl> impl = std::make_shared<TensorReferenceImpl>(name_str, type, shape, data, data_len, false);\n    MSTensor *ret = new MSTensor(impl);\n    return ret;\n  } catch (const std::bad_alloc &) {\n    MS_LOG(ERROR) << \"Malloc memory failed.\";\n    return nullptr;\n  } catch (...) {\n    MS_LOG(ERROR) << \"Unknown error occurred.\";\n    return nullptr;\n  }\n}\n\nMSTensor MSTensor::CreateDeviceTensor(const std::vector<char> &name, enum DataType type,\n                                      const std::vector<int64_t> &shape, void *data, size_t data_len) noexcept {\n  std::string name_str = CharToString(name);\n  try {\n    std::shared_ptr<Impl> impl = std::make_shared<TensorReferenceImpl>(name_str, type, shape, data, data_len, true);\n    return MSTensor(impl);\n  } catch (const std::bad_alloc &) {\n    MS_LOG(ERROR) << \"Malloc memory failed.\";\n    return MSTensor(nullptr);\n  } catch (...) {\n    MS_LOG(ERROR) << \"Unknown error occurred.\";\n    return MSTensor(nullptr);\n  }\n}\n\nMSTensor *MSTensor::CharStringsToTensor(const std::vector<char> &name, const std::vector<std::vector<char>> &str) {\n  // num(4 bytes) + offset1(4 bytes) + offset2(4 bytes) + ... + data1(str1.len) + data2(str2.len) + ...\n  // str1.len() = offset2 - offset1\n  // data1.begin() = start + offset1\n  size_t mem_size = 0;\n  mem_size += sizeof(int32_t);  // for num\n  for (const auto &s : str) {\n    mem_size += sizeof(int32_t);  // for offset\n    mem_size += s.size();         // for data\n  }\n\n  auto tensor = CreateTensor(name, DataType::kObjectTypeString, {static_cast<int64_t>(mem_size)}, nullptr, mem_size);\n  if (tensor == nullptr) {\n    MS_LOG(ERROR) << \"Create tensor failed.\";\n    return nullptr;\n  }\n\n  int32_t *data = reinterpret_cast<int32_t *>(tensor->MutableData());\n  if (data == nullptr) {\n    MS_LOG(ERROR) << \"Create tensor failed.\";\n    DestroyTensorPtr(tensor);\n    return nullptr;\n  }\n  uint8_t *cur_data = reinterpret_cast<uint8_t *>(data + 1 + str.size());\n  *reinterpret_cast<int32_t *>(data) = str.size();\n  for (size_t i = 0; i < str.size(); ++i) {\n    int32_t offset = (cur_data - reinterpret_cast<uint8_t *>(data));\n    data[i + 1] = offset;\n    if (str[i].empty()) {\n      continue;\n    }\n    auto ret = memcpy_s(reinterpret_cast<void *>(cur_data), str[i].size(), str[i].data(), str[i].size());\n    if (ret != 0) {\n      MS_LOG(ERROR) << \"memcpy_s failed, ret = \" << ret;\n      DestroyTensorPtr(tensor);\n      return nullptr;\n    }\n    cur_data += str[i].size();\n  }\n\n  return tensor;\n}\n\nstd::vector<std::vector<char>> MSTensor::TensorToStringChars(const MSTensor &tensor) {\n  if (tensor == nullptr || tensor.DataType() != DataType::kObjectTypeString || tensor.DataSize() < 4) {\n    MS_LOG(ERROR) << \"Invalid tensor.\";\n    return {};\n  }\n\n  std::vector<std::vector<char>> strings;\n  auto host_data = tensor.Data();\n  const int32_t *data = reinterpret_cast<const int32_t *>(host_data.get());\n  int32_t str_num = data[0];\n  if (str_num == 0) {\n    return {};\n  }\n  if (str_num < 0) {\n    MS_LOG(ERROR) << \"str num \" << str_num << \" cannot be negative.\";\n    return {};\n  }\n\n  if (tensor.DataSize() < (str_num + 1) * sizeof(int32_t)) {\n    MS_LOG(ERROR) << \"Invalid tensor data size \" << tensor.DataSize() << \", need \"\n                  << IntToSize(str_num + 1) * sizeof(int32_t) << \" at least for \" << str_num << \" strings.\";\n    return {};\n  }\n  for (size_t i = 0; i < static_cast<size_t>(str_num); ++i) {\n    strings.push_back({});\n    auto &str = strings[i];\n    int32_t str_len;\n    int32_t offset = data[i + 1];\n    if (i + 1 != static_cast<size_t>(str_num)) {\n      str_len = data[i + 1 + 1] - offset;\n    } else {\n      str_len = tensor.DataSize() - offset;\n    }\n\n    if (str_len == 0) {\n      continue;\n    }\n\n    if (str_len < 0) {\n      MS_LOG(ERROR) << \"str \" << i << \" len \" << str_len << \" cannot be negative.\";\n      return {};\n    }\n\n    str.resize(str_len);\n    const uint8_t *cur_data = reinterpret_cast<const uint8_t *>(data) + offset;\n    auto ret = memcpy_s(reinterpret_cast<void *>(str.data()), str.size(), cur_data, str_len);\n    if (ret != 0) {\n      MS_LOG(ERROR) << \"memcpy_s failed, ret = \" << ret;\n      return {};\n    }\n  }\n\n  return strings;\n}\n\nvoid MSTensor::DestroyTensorPtr(MSTensor *tensor) noexcept {\n  if (tensor != nullptr) {\n    delete tensor;\n  }\n}\n\nMSTensor::MSTensor() : impl_(std::make_shared<TensorDefaultImpl>()) {}\nMSTensor::MSTensor(std::nullptr_t) : impl_(nullptr) {}\nMSTensor::MSTensor(const std::shared_ptr<Impl> &impl) : impl_(impl) { MS_EXCEPTION_IF_NULL(impl); }\nMSTensor::MSTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,\n                   const void *data, size_t data_len)\n    : impl_(std::make_shared<TensorDefaultImpl>(CharToString(name), type, shape, data, data_len)) {}\nMSTensor::~MSTensor() = default;\n\nbool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; }\n\nbool MSTensor::operator!=(std::nullptr_t) const { return impl_ != nullptr; }\n\nMSTensor *MSTensor::Clone() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  try {\n    MSTensor *ret = new MSTensor();\n    ret->impl_ = impl_->Clone();\n    return ret;\n  } catch (const std::bad_alloc &) {\n    MS_LOG(ERROR) << \"Malloc memory failed.\";\n    return nullptr;\n  } catch (...) {\n    MS_LOG(ERROR) << \"Unknown error occurred.\";\n    return nullptr;\n  }\n}\n\nstd::vector<char> MSTensor::CharName() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return StringToChar(impl_->Name());\n}\n\nenum DataType MSTensor::DataType() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->DataType();\n}\n\nconst std::vector<int64_t> &MSTensor::Shape() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->Shape();\n}\n\nint64_t MSTensor::ElementNum() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  const auto &shape = impl_->Shape();\n  if (shape.empty()) {\n    // element number of scalar is 1\n    return 1;\n  }\n  return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int64_t>());\n}\n\nstd::shared_ptr<const void> MSTensor::Data() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->Data();\n}\n\nvoid *MSTensor::MutableData() {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->MutableData();\n}\n\nsize_t MSTensor::DataSize() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->DataSize();\n}\n\nbool MSTensor::IsDevice() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->IsDevice();\n}\n\nvoid MSTensor::SetShape(const std::vector<int64_t> &) { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nvoid MSTensor::SetDataType(enum DataType) { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nvoid MSTensor::SetTensorName(const std::vector<char> &) { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nvoid MSTensor::SetAllocator(std::shared_ptr<Allocator>) { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nstd::shared_ptr<Allocator> MSTensor::allocator() const { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nvoid MSTensor::SetFormat(mindspore::Format) { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nmindspore::Format MSTensor::format() const { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nvoid MSTensor::SetData(void *, bool) { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nstd::vector<QuantParam> MSTensor::QuantParams() const { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nvoid MSTensor::SetQuantParams(std::vector<QuantParam>) { MS_LOG_EXCEPTION << \"Invalid implement.\"; }\n\nBuffer::Buffer() : impl_(std::make_shared<Impl>()) {}\nBuffer::Buffer(const void *data, size_t data_len) : impl_(std::make_shared<Impl>(data, data_len)) {}\nBuffer::~Buffer() = default;\n\nBuffer Buffer::Clone() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  Buffer ret;\n  ret.impl_ = std::make_shared<Impl>(*impl_);\n  return ret;\n}\n\nconst void *Buffer::Data() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->Data();\n}\n\nvoid *Buffer::MutableData() {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->MutableData();\n}\n\nsize_t Buffer::DataSize() const {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->DataSize();\n}\n\nbool Buffer::ResizeData(size_t data_len) {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->ResizeData(data_len);\n}\n\nbool Buffer::SetData(const void *data, size_t data_len) {\n  MS_EXCEPTION_IF_NULL(impl_);\n  return impl_->SetData(data, data_len);\n}\n\nstd::vector<char> CharVersion() { return {}; }\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/graph_impl_stub.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"stub/graph_impl_stub.h\"\n\nnamespace mindspore {\n\nvoid GraphImplStubAdd::Init(const std::vector<int64_t> &add_shape) {\n  auto element_cnt = [add_shape]() -> size_t {\n    size_t element_num = 1;\n    for (auto dim : add_shape) {\n      if (dim <= 0) {\n        return 0;\n      }\n      element_num *= dim;\n    }\n    return element_num;\n  };\n  auto ele_size = element_cnt() * sizeof(float);\n  inputs_.clear();\n  for (size_t i = 0; i < input_count; i++) {\n    MSTensor tensor_x =\n      MSTensor(\"x\" + std::to_string(1), mindspore::DataType::kNumberTypeFloat32, add_shape, nullptr, ele_size);\n    inputs_.push_back(tensor_x);\n  }\n  outputs_.clear();\n  for (size_t i = 0; i < output_count; i++) {\n    MSTensor tensor_y =\n      MSTensor(\"x\" + std::to_string(1), mindspore::DataType::kNumberTypeFloat32, add_shape, nullptr, ele_size);\n    outputs_.push_back(tensor_y);\n  }\n}\n\n// y=x1+x2+x3+x4, y=x1-x2-x3-x4\n// y2=y1+1\nStatus GraphImplStubAdd::Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) {\n  auto file_name = graph_->graph_data_->GetFuncGraph()->file_name_;\n  MS_LOG_INFO << \"exec model file ------------------- \" << file_name;\n  if (inputs.size() != inputs_.size()) {\n    return mindspore::kCoreFailed;\n  }\n  for (size_t i = 0; i < inputs.size(); i++) {\n    if (inputs[i].DataSize() != inputs_[i].DataSize()) {\n      return mindspore::kCoreFailed;\n    }\n    if (inputs_[i].DataSize() != 0 && inputs[i].Data() == nullptr) {\n      return mindspore::kCoreFailed;\n    }\n  }\n  auto item_count = outputs_[0].DataSize() / sizeof(float);\n\n  auto get_output_tensor = [this](size_t index) -> MSTensor {\n    MSTensor *output_ptr = outputs_[index].Clone();\n    MSTensor output = *output_ptr;\n    mindspore::MSTensor::DestroyTensorPtr(output_ptr);\n    return output;\n  };\n  auto output = get_output_tensor(0);\n  auto y = reinterpret_cast<float *>(output.MutableData());\n  auto x0 = reinterpret_cast<const float *>(inputs[0].Data().get());\n  for (size_t i = 0; i < item_count; i++) {\n    y[i] = x0[i];\n  }\n  for (size_t k = 1; k < input_count; k++) {\n    auto xk = reinterpret_cast<const float *>(inputs[k].Data().get());\n    for (size_t i = 0; i < item_count; i++) {\n      if (sub_) {\n        y[i] = y[i] - xk[i];\n      } else {\n        y[i] = y[i] + xk[i];\n      }\n    }\n  }\n  outputs->push_back(output);\n  for (size_t k = 1; k < output_count; k++) {\n    auto output_k = get_output_tensor(k);\n    auto yk = reinterpret_cast<float *>(output_k.MutableData());\n    for (size_t i = 0; i < item_count; i++) {\n      yk[i] = y[i] + k;\n    }\n    outputs->push_back(output_k);\n  }\n  return mindspore::kSuccess;\n}\n\nStatus GraphImplStubAdd::Load(uint32_t device_id) {\n  LoadInner();\n  auto status = CheckContext();\n  if (!status.IsOk()) {\n    return status;\n  }\n  if (input_count == 0 || output_count == 0) {\n    MS_LOG_ERROR << \"Invalid input count or output count, input count: \" << input_count\n                 << \", output count: \" << output_count;\n    return kCoreFailed;\n  }\n  MS_LOG_INFO << \"input count: \" << input_count << \", output count: \" << output_count;\n  Init({2, 2});\n  return kSuccess;\n}\n\nStatus GraphImplStubAdd::CheckContext() {\n  auto file_name = graph_->graph_data_->GetFuncGraph()->file_name_;\n  bool enable_lite = false;\n  if (file_name.find(\"lite\") != std::string::npos) {\n    enable_lite = true;\n  }\n  auto device_info_list = graph_context_->MutableDeviceInfo();\n  if (!enable_lite && device_info_list.size() > 1) {\n    return kCoreFailed;\n  }\n  auto beg = file_name.find('@');\n  if (beg == std::string::npos) {\n    return kSuccess;\n  }\n  auto device_beg = file_name.find('_', beg);\n  std::stringstream ss(file_name.substr(device_beg + 1));\n  std::vector<std::string> device_list;\n\n  std::string device_info;\n  while (std::getline(ss, device_info, '_')) {\n    device_list.push_back(device_info);\n  }\n\n  if (device_list.size() != device_info_list.size()) {\n    return kCoreFailed;\n  }\n  std::map<std::string, mindspore::DeviceType> device_type_map{\n    {\"cpu\", kCPU}, {\"gpu\", kGPU}, {\"ascend\", kAscend}};\n  for (size_t i = 0; i < device_list.size(); ++i) {\n    if (device_type_map[device_list[i]] != device_info_list[i]->GetDeviceType()) {\n      return kCoreFailed;\n    }\n  }\n  return kSuccess;\n}\n\nvoid GraphImplStubAdd::LoadInner() {\n  auto file_name = graph_->graph_data_->GetFuncGraph()->file_name_;\n  MS_LOG_INFO << \"model file ------------------- \" << file_name;\n  auto beg = file_name.find(\"tensor_add\");  // tensor_add_2_2.mindir or tensor_sub_2_2.mindir\n  if (beg == std::string::npos) {\n    beg = file_name.find(\"tensor_sub\");\n    if (beg == std::string::npos) {\n      return;\n    }\n    sub_ = true;\n  }\n  beg += std::string(\"tensor_add\").size();\n  auto input_beg = file_name.find(\"_\", beg);\n  if (input_beg == std::string::npos) {\n    return;\n  }\n  auto output_beg = file_name.find(\"_\", input_beg + 1);\n  if (output_beg == std::string::npos) {\n    return;\n  }\n  auto dot_beg = file_name.find(\".mindir\", output_beg + 1);\n  if (dot_beg == std::string::npos) {\n    return;\n  }\n  input_count = std::stoi(file_name.substr(input_beg + 1, output_beg));\n  output_count = std::stoi(file_name.substr(output_beg + 1, dot_beg));\n}\n\nstd::vector<MSTensor> GraphImplStubAdd::GetInputs() { return inputs_; }\n\nstd::vector<MSTensor> GraphImplStubAdd::GetOutputs() { return outputs_; }\n\nbool GraphImplStubAdd::CheckDeviceSupport(mindspore::DeviceType device_type) {\n  if (device_type == kCPU) {\n    const char *value = ::getenv(\"SERVING_ENABLE_CPU_DEVICE\");\n    if (value == nullptr || std::string(value) != \"1\") {\n      return false;\n    }\n  } else if (device_type == kGPU) {\n    const char *value = ::getenv(\"SERVING_ENABLE_GPU_DEVICE\");\n    if (value == nullptr || std::string(value) != \"1\") {\n      return false;\n    }\n  } else if (device_type == kAscend || device_type == kAscend310 || device_type == kAscend910) {\n    const char *value = ::getenv(\"SERVING_ENABLE_GPU_DEVICE\");\n    if (value == nullptr || std::string(value) != \"1\") {\n      return true;\n    }\n  }\n  return true;\n}\n\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/graph_impl_stub.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_SERVING_GRAPH_IMPL_STUB_H\n#define MINDSPORE_SERVING_GRAPH_IMPL_STUB_H\n\n#include <functional>\n#include <map>\n#include <string>\n#include <vector>\n#include <memory>\n#include <utility>\n#include \"include/api/status.h\"\n#include \"include/api/graph.h\"\n#include \"cxx_api/graph/graph_impl.h\"\n#include \"cxx_api/model/model_impl.h\"\n\nnamespace mindspore {\nclass GraphImplStubAdd : public GraphCell::GraphImpl {\n public:\n  GraphImplStubAdd() = default;\n  ~GraphImplStubAdd() = default;\n\n  Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;\n  Status Load(uint32_t device_id) override;\n\n  std::vector<MSTensor> GetInputs() override;\n  std::vector<MSTensor> GetOutputs() override;\n  bool CheckDeviceSupport(mindspore::DeviceType device_type) override;\n private:\n  std::vector<MSTensor> inputs_;\n  std::vector<MSTensor> outputs_;\n  uint64_t input_count = 2;\n  uint64_t output_count = 1;\n  bool sub_ = false;  // add or sub op\n\n  void Init(const std::vector<int64_t> &add_shape);\n  void LoadInner();\n  Status CheckContext();\n};\n\n}  // namespace mindspore\n\n#endif  // MINDSPORE_SERVING_GRAPH_IMPL_STUB_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/allocator.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_INCLUDE_API_ALLOCATOR_H\n#define MINDSPORE_INCLUDE_API_ALLOCATOR_H\n\n#include <memory>\n#include \"include/api/types.h\"\n\nnamespace mindspore {\n/// \\brief Allocator defined a memory pool for malloc memory and free memory dynamically.\nclass MS_API Allocator {\n public:\n  /// \\brief Destructor of MindSpore Allocator.\n  virtual ~Allocator() = default;\n\n  /// \\brief Method to request memory.\n  ///\n  /// \\param[in] size Define the memory size to request.\n  virtual void *Malloc(size_t size) = 0;\n\n  /// \\brief Method to request memory.\n  ///\n  /// \\param[in] weight Defines the width of memory to request\n  /// \\param[in] height Defines the height of memory to request\n  /// \\param[in] type Defines the data type of memory to request\n  virtual void *Malloc(size_t weight, size_t height, DataType type) {\n    return nullptr;\n  }\n\n  /// \\brief Method to free memory.\n  ///\n  /// \\param[in] ptr Define the pointer of a certain memory.\n  virtual void Free(void *ptr) = 0;\n\n  /// \\brief Reference count of a certain memory.\n  ///\n  /// \\param[in] ptr Define the pointer of a certain memory.\n  ///\n  /// \\return Reference count of a certain memory currently.\n  virtual int RefCount(void *ptr) = 0;\n\n  /// \\brief Set reference count of a certain memory.\n  ///\n  /// \\param[in] ptr Define the pointer of a certain memory.\n  /// \\param[in] ref_count Define the reference count to set.\n  ///\n  /// \\return Reference count of a certain memory after setting.\n  virtual int SetRefCount(void *ptr, int ref_count) = 0;\n\n  /// \\brief Decrease the reference count of a certain memory.\n  ///\n  /// \\param[in] ptr Define the pointer of a certain memory.\n  /// \\param[in] ref_count Define the reference count to reduce.\n  ///\n  /// \\return Reference count of a certain memory after decreating.\n  virtual int DecRefCount(void *ptr, int ref_count) = 0;\n\n  /// \\brief Increase the reference count of a certain memory.\n  ///\n  /// \\param[in] ptr Define the pointer of a certain memory.\n  /// \\param[in] ref_count Define the reference count to increase.\n  ///\n  /// \\return Reference count of a certain memory after increasing.\n  virtual int IncRefCount(void *ptr, int ref_count) = 0;\n\n  /// \\brief Static method to create an allocator.\n  ///\n  /// \\return Smart pointer of an allocator.\n  static std::shared_ptr<Allocator> Create();\n\n  /// \\brief Prepare a certain memory.\n  ///\n  /// \\param[in] ptr Define the pointer of a certain memory to prepare.\n  ///\n  /// \\return Pointer of ready memory.\n  virtual void *Prepare(void *ptr) { return ptr; }\n\n protected:\n  // memory aligned bytes\n  size_t aligned_size_ = 32;\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_ALLOCATOR_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/callback/callback.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_CALLBACK_CALLBACK_H\n#define MINDSPORE_INCLUDE_API_CALLBACK_CALLBACK_H\n\n#include <cstddef>\n#include <string>\n#include <vector>\n#include <memory>\n#include <utility>\n#include \"include/api/data_type.h\"\n#include \"include/api/dual_abi_helper.h\"\n\nnamespace mindspore {\nclass Model;\nclass ModelImpl;\nclass CallbackImpl;\n\nusing GraphPoint = std::pair<int, float>;\n\nstruct TrainCallBackData {\n  TrainCallBackData(bool train_mode, int epoch, int step, Model *model): train_mode_(train_mode), epoch_(epoch),\n                    step_(step), model_(model) {}\n\n  bool train_mode_;       /**< training mode of LiteSession object */\n  unsigned int epoch_;    /**< the current training epoch (starts at 0) */\n  unsigned int step_ = 0; /**< the current step within the epoch */\n  Model *model_;  /**< pointer to the Model object */\n};\n\nenum CallbackRetValue : uint32_t {\n  kContinue = 0,\n  kStopTraining = 1,\n  kExit = 2,\n  kUnknownRetValue = 0xFFFFFFFF\n};\n\nclass TrainCallBack {\n public:\n  virtual ~TrainCallBack() = default;\n\n  /// \\brief This method is called once before the network executing\n  ///\n  /// \\param[in] cb_data info about current execution\n  virtual void Begin(const TrainCallBackData &cb_data) {}\n\n  /// \\brief This method is called once following the network execution\n  ///\n  /// \\param[in] cb_data info about current execution\n  virtual void End(const TrainCallBackData &cb_data) {}\n\n  /// \\brief This method is called at the beginning of each epoch\n  ///\n  /// \\param[in] cb_data info about current execution\n  virtual void EpochBegin(const TrainCallBackData &cb_data) {}\n\n  /// \\brief This method is called after the run of each epoch\n  ///\n  /// \\param[in] cb_data info about current execution\n  ///\n  /// \\return indication if to continue in the train loop:\n  ///         RET_CONTINUE -- continue training\n  ///         RET_STOP_TRAINING -- stop training (e.g., due to achieved accuracy)\n  ///         RET_EXIT -- Exit training (due to error of some sort)\n  virtual CallbackRetValue EpochEnd(const TrainCallBackData &cb_data) { return kContinue; }\n\n  /// \\brief This method is called at the beginning of each step\n  ///\n  /// \\param[in] cb_data info about current execution\n  virtual void StepBegin(const TrainCallBackData &cb_data) {}\n\n  /// \\brief This method is called after each step is ran\n  ///\n  /// \\param[in] cb_data info about current execution\n  virtual void StepEnd(const TrainCallBackData &cb_data) {}\n\n protected:\n  friend class Model;\n  friend class ModelImpl;\n  CallbackImpl* callback_impl_ = nullptr;\n};\n\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_CALLBACK_CALLBACK_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/callback/ckpt_saver.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_CALLBACK_CKPT_SAVER_H\n#define MINDSPORE_INCLUDE_API_CALLBACK_CKPT_SAVER_H\n\n#include <cstddef>\n#include <string>\n#include <vector>\n#include <memory>\n#include \"include/api/callback/callback.h\"\n#include \"include/api/dual_abi_helper.h\"\n\nnamespace mindspore {\n\nclass CkptSaver: public TrainCallBack {\n public:\n  inline CkptSaver(int save_every_n, const std::string &filename_prefix);\n  virtual ~CkptSaver();\n\n private:\n  CkptSaver(int save_every_n, const std::vector<char> &filename_prefix);\n};\n\nCkptSaver::CkptSaver(int save_every_n, const std::string &filename_prefix)\n    : CkptSaver(save_every_n, StringToChar(filename_prefix)) {}\n\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_CALLBACK_CKPT_SAVER_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/callback/loss_monitor.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_CALLBACK_LOSS_MONITOR_H\n#define MINDSPORE_INCLUDE_API_CALLBACK_LOSS_MONITOR_H\n\n#include <cstddef>\n#include <vector>\n#include <utility>\n#include \"include/api/callback/callback.h\"\n\nnamespace mindspore {\n\nclass LossMonitor: public TrainCallBack {\n public:\n  explicit LossMonitor(int print_every_n_steps = INT_MAX);\n  virtual ~LossMonitor();\n  const std::vector<GraphPoint> &GetLossPoints();\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_CALLBACK_LOSS_MONITOR_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/callback/lr_scheduler.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_CALLBACK_LR_SCHEDULER_H\n#define MINDSPORE_INCLUDE_API_CALLBACK_LR_SCHEDULER_H\n\n#include <cstddef>\n#include <string>\n#include <vector>\n#include <memory>\n#include \"include/api/callback/callback.h\"\n\nnamespace mindspore {\n\nconstexpr int DONT_UPDATE_LR = 0;\nconstexpr int UPDATE_LR = 1;\n\nusing LR_Lambda = std::function<int(float *lr, int epoch, void *cb_data)>;\n\n/// \\brief Multiply the LR by a factor of gamma every epoch\nint MultiplicativeLRLambda(float *lr, int epoch, void *multiplication);\n\n/// \\brief Multiply the LR by a factor of gamma every step_size\nint StepLRLambda(float *lr, int epoch, void *step_size);\nstruct StepLRLambda {\n  StepLRLambda(int step, float g) : step_size(step), gamma(g) {}\n\n  int step_size;  // period of LR decay\n  float gamma;    // LR decay factor\n};\n\nclass LRScheduler: public TrainCallBack {\n public:\n  explicit LRScheduler(LR_Lambda lambda_func, void *lr_cb_data = nullptr, int step = 1);\n  virtual ~LRScheduler();\n};\n\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_CALLBACK_LR_SCHEDULER_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/callback/time_monitor.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_CALLBACK_TIME_MONITOR_H\n#define MINDSPORE_INCLUDE_API_CALLBACK_TIME_MONITOR_H\n\n#include <cstddef>\n#include <string>\n#include <vector>\n#include <memory>\n#include \"include/api/callback/callback.h\"\n\nnamespace mindspore {\n\nclass TimeMonitor: public TrainCallBack {\n public:\n  virtual ~TimeMonitor() = default;\n  void EpochBegin(const TrainCallBackData &cb_data) override;\n  CallbackRetValue EpochEnd(const TrainCallBackData &cb_data) override;\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_CALLBACK_TIME_MONITOR_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/callback/train_accuracy.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_CALLBACK_TRAIN_ACCURACY_H\n#define MINDSPORE_INCLUDE_API_CALLBACK_TRAIN_ACCURACY_H\n\n#include <cstddef>\n#include <string>\n#include <vector>\n#include <memory>\n#include <utility>\n#include \"include/api/callback/callback.h\"\n#include \"include/api/metrics/accuracy.h\"\n\nnamespace mindspore {\n\nclass TrainAccuracy: public TrainCallBack {\n public:\n  explicit TrainAccuracy(int print_every_n = INT_MAX,\n                         int accuracy_metrics = METRICS_CLASSIFICATION,\n                         const std::vector<int> &input_indexes = {1},\n                         const std::vector<int> &output_indexes = {0});\n  virtual ~TrainAccuracy();\n  const std::vector<GraphPoint> &GetAccuracyPoints();\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_CALLBACK_TRAIN_ACCURACY_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/cell.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_CELL_H\n#define MINDSPORE_INCLUDE_API_CELL_H\n#include <string>\n#include <vector>\n#include <map>\n#include <memory>\n#include \"include/api/status.h\"\n#include \"include/api/types.h\"\n#include \"include/api/graph.h\"\n\nnamespace mindspore {\nclass InputAndOutput;\nclass Context;\nusing Input = InputAndOutput;\nusing Output = InputAndOutput;\n\nclass MS_API CellBase {\n public:\n  CellBase() = default;\n  virtual ~CellBase() = default;\n  virtual std::vector<Output> Construct(const std::vector<Input> &inputs) { return {}; }\n  virtual std::shared_ptr<CellBase> Clone() const = 0;\n  virtual Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) { return kSuccess; }\n  std::vector<Output> operator()(const std::vector<Input> &inputs) const;\n};\n\ntemplate <class T>\nclass MS_API Cell : public CellBase {\n public:\n  virtual ~Cell() = default;\n  std::shared_ptr<CellBase> Clone() const override { return std::make_shared<T>(static_cast<const T &>(*this)); }\n};\n\nclass MS_API ParameterCell final : public Cell<ParameterCell> {\n public:\n  ParameterCell() = default;\n  ~ParameterCell() override = default;\n\n  ParameterCell(const ParameterCell &);\n  ParameterCell &operator=(const ParameterCell &);\n\n  ParameterCell(ParameterCell &&);\n  ParameterCell &operator=(ParameterCell &&);\n\n  explicit ParameterCell(const MSTensor &);\n  ParameterCell &operator=(const MSTensor &);\n\n  explicit ParameterCell(MSTensor &&);\n  ParameterCell &operator=(MSTensor &&);\n\n  MSTensor GetTensor() const { return tensor_; }\n\n private:\n  MSTensor tensor_;\n};\n\nclass MS_API OpCellBase : public CellBase {\n public:\n  explicit OpCellBase(const std::string &name) : name_(name) {}\n  ~OpCellBase() override = default;\n  const std::string &GetOpType() const { return name_; }\n\n protected:\n  std::string name_;\n};\n\ntemplate <class T>\nclass MS_API OpCell : public OpCellBase, public std::enable_shared_from_this<T> {\n public:\n  explicit OpCell(const std::string &name) : OpCellBase(name) {}\n  ~OpCell() override = default;\n  std::shared_ptr<CellBase> Clone() const override { return std::make_shared<T>(static_cast<const T &>(*this)); }\n};\n\nclass MS_API GraphCell final : public Cell<GraphCell> {\n public:\n  class GraphImpl;\n\n  GraphCell() = default;\n  ~GraphCell() override = default;\n\n  explicit GraphCell(const Graph &);\n  explicit GraphCell(Graph &&);\n  explicit GraphCell(const std::shared_ptr<Graph> &);\n\n  void SetContext(const std::shared_ptr<Context> &context);\n  const std::shared_ptr<Graph> &GetGraph() const { return graph_; }\n  Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;\n  std::vector<MSTensor> GetInputs();\n  std::vector<MSTensor> GetOutputs();\n  Status Load(uint32_t device_id);\n\n private:\n  friend class Model;\n\n  std::shared_ptr<Graph> graph_;\n  std::shared_ptr<GraphImpl> executor_;\n};\n\nclass MS_API InputAndOutput {\n public:\n  InputAndOutput();\n  ~InputAndOutput() = default;\n\n  // no explicit\n  InputAndOutput(const MSTensor &);  // NOLINT(runtime/explicit)\n  InputAndOutput(MSTensor &&);       // NOLINT(runtime/explicit)\n\n  InputAndOutput(const std::shared_ptr<CellBase> &, const std::vector<InputAndOutput> &, int32_t index);\n\n  int32_t GetIndex() const { return index_; }\n  void SetIndex(int32_t index) { index_ = index; }\n\n private:\n  std::shared_ptr<CellBase> cell_;\n  std::vector<InputAndOutput> prev_;\n  int32_t index_;\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_CELL_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/cfg.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_CFG_H\n#define MINDSPORE_INCLUDE_API_CFG_H\n\n#include <cstddef>\n#include <string>\n#include <vector>\n#include <memory>\n#include \"include/api/data_type.h\"\n#include \"include/api/dual_abi_helper.h\"\n#include \"include/api/types.h\"\n\nnamespace mindspore {\n\nclass MixPrecisionCfg {\n public:\n  MixPrecisionCfg() {\n    this->dynamic_loss_scale_ = false;\n    this->loss_scale_ = 128.0f;\n    this->num_of_not_nan_iter_th_ = 1000;\n  }\n\n  ~MixPrecisionCfg() = default;\n\n  bool dynamic_loss_scale_ = false;   /**< Enable\\disable dynamic loss scale during mix precision training */\n  float loss_scale_;                  /**< Initial loss scale factor  */\n  uint32_t num_of_not_nan_iter_th_;   /**< a threshold for modifying loss scale when dynamic loss scale is enabled */\n  bool is_raw_mix_precision_ = false; /**< Is mix precision model export from mindspore  */\n};\n\nclass TrainCfg {\n public:\n  TrainCfg() { this->loss_name_ = \"_loss_fn\"; }\n\n  ~TrainCfg() = default;\n\n  OptimizationLevel optimization_level_ = kO0;\n  std::string loss_name_;             /**< Set part of the name that identify a loss kernel */\n  MixPrecisionCfg mix_precision_cfg_; /**< Mix precision configuration */\n  bool accumulate_gradients_ = false;\n};\n\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_CFG_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/context.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_CONTEXT_H\n#define MINDSPORE_INCLUDE_API_CONTEXT_H\n\n#include <string>\n#include <memory>\n#include <vector>\n#include <map>\n#include \"include/api/types.h\"\n#include \"include/api/dual_abi_helper.h\"\n\nnamespace mindspore {\nenum DeviceType {\n  kCPU = 0,\n  kGPU,\n  kKirinNPU,\n  kAscend,\n  kAscend910,\n  kAscend310,\n  // add new type here\n  kInvalidDeviceType = 100,\n};\n\nclass Allocator;\nclass Delegate;\nclass DeviceInfoContext;\n\n/// \\brief Context is used to store environment variables during execution.\nclass MS_API Context {\n public:\n  struct Data;\n  Context();\n  ~Context() = default;\n\n  /// \\brief Set the number of threads at runtime. Only valid for Lite.\n  ///\n  /// \\param[in] thread_num the number of threads at runtime.\n  void SetThreadNum(int32_t thread_num);\n\n  /// \\brief Get the current thread number setting. Only valid for Lite.\n  ///\n  /// \\return The current thread number setting.\n  int32_t GetThreadNum() const;\n\n  /// \\brief Set the thread affinity to CPU cores. Only valid for Lite.\n  ///\n  /// \\param[in] mode: 0: no affinities, 1: big cores first, 2: little cores first\n  void SetThreadAffinity(int mode);\n\n  /// \\brief Get the thread affinity of CPU cores. Only valid for Lite.\n  ///\n  /// \\return Thread affinity to CPU cores. 0: no affinities, 1: big cores first, 2: little cores first\n  int GetThreadAffinityMode() const;\n\n  /// \\brief Set the thread lists to CPU cores. Only valid for Lite.\n  ///\n  /// \\note If core_list and mode are set by SetThreadAffinity at the same time, the core_list is effective, but the\n  /// mode is not effective.\n  ///\n  /// \\param[in] core_list: a vector of thread core lists.\n  void SetThreadAffinity(const std::vector<int> &core_list);\n\n  /// \\brief Get the thread lists of CPU cores. Only valid for Lite.\n  ///\n  /// \\return core_list: a vector of thread core lists.\n  std::vector<int32_t> GetThreadAffinityCoreList() const;\n\n  /// \\brief Set the status whether to perform model inference or training in parallel. Only valid for Lite.\n  ///\n  /// \\param[in] is_parallel: true, parallel; false, not in parallel.\n  void SetEnableParallel(bool is_parallel);\n\n  /// \\brief Get the status whether to perform model inference or training in parallel. Only valid for Lite.\n  ///\n  /// \\return Bool value that indicates whether in parallel.\n  bool GetEnableParallel() const;\n\n  /// \\brief Set Delegate to access third-party AI framework. Only valid for Lite.\n  ///\n  /// \\param[in] Pointer to the custom delegate.\n  void SetDelegate(const std::shared_ptr<Delegate> &delegate);\n\n  /// \\brief Get the delegate of the third-party AI framework. Only valid for Lite.\n  ///\n  /// \\return Pointer to the custom delegate.\n  std::shared_ptr<Delegate> GetDelegate() const;\n\n  /// \\brief Set quant model to run as float model in multi device.\n  ///\n  /// \\param[in] float_mode: true, run as float model; false, not run as float model.\n  void SetMultiModalHW(bool float_mode);\n\n  /// \\brief Get the mode of the model run.\n  ///\n  /// \\return Bool value that indicates whether run as float model\n  bool GetMultiModalHW() const;\n\n  /// \\brief Get a mutable reference of DeviceInfoContext vector in this context. Only MindSpore Lite supports\n  /// heterogeneous scenarios with multiple members in the vector.\n  ///\n  /// \\return Mutable reference of DeviceInfoContext vector in this context.\n  std::vector<std::shared_ptr<DeviceInfoContext>> &MutableDeviceInfo();\n\n private:\n  std::shared_ptr<Data> data_;\n};\n\n/// \\brief DeviceInfoContext defines different device contexts.\nclass MS_API DeviceInfoContext : public std::enable_shared_from_this<DeviceInfoContext> {\n public:\n  struct Data;\n\n  DeviceInfoContext();\n  virtual ~DeviceInfoContext() = default;\n\n  /// \\brief Get the type of this DeviceInfoContext.\n  ///\n  /// \\return Type of this DeviceInfoContext.\n  virtual enum DeviceType GetDeviceType() const = 0;\n\n  /// \\brief A similar function to RTTI is provided when the -fno-rtti compilation option is turned on, which converts\n  /// DeviceInfoContext to a shared pointer of type T, and returns nullptr if the conversion fails.\n  ///\n  /// \\param T Type\n  /// \\return A pointer of type T after conversion. If the conversion fails, it will be nullptr.\n  template <class T>\n  std::shared_ptr<T> Cast() {\n    static_assert(std::is_base_of<DeviceInfoContext, T>::value, \"Wrong cast type.\");\n    if (GetDeviceType() != T().GetDeviceType()) {\n      return nullptr;\n    }\n\n    return std::static_pointer_cast<T>(shared_from_this());\n  }\n  /// \\brief obtain provider's name\n  ///\n  /// \\return provider's name.\n  inline std::string GetProvider() const;\n  /// \\brief set provider's name.\n  ///\n  /// \\param[in] provider define the provider's name.\n\n  inline void SetProvider(const std::string &provider);\n  /// \\brief obtain provider's device type.\n  ///\n  /// \\return provider's device type.\n\n  inline std::string GetProviderDevice() const;\n  /// \\brief set provider's device type.\n  ///\n  /// \\param[in] device define the provider's device type.EG: CPU.\n  inline void SetProviderDevice(const std::string &device);\n\n  /// \\brief set memory allocator.\n  ///\n  /// \\param[in] allocator define the memory allocator which can be defined by user.\n  void SetAllocator(const std::shared_ptr<Allocator> &allocator);\n\n  /// \\brief obtain memory allocator.\n  ///\n  /// \\return memory allocator.\n  std::shared_ptr<Allocator> GetAllocator() const;\n\n protected:\n  std::vector<char> GetProviderChar() const;\n  void SetProvider(const std::vector<char> &provider);\n  std::vector<char> GetProviderDeviceChar() const;\n  void SetProviderDevice(const std::vector<char> &device);\n\n  std::shared_ptr<Data> data_;\n};\n\nstd::string DeviceInfoContext::GetProvider() const { return CharToString(GetProviderChar()); }\nvoid DeviceInfoContext::SetProvider(const std::string &provider) { SetProvider(StringToChar(provider)); }\nstd::string DeviceInfoContext::GetProviderDevice() const { return CharToString(GetProviderDeviceChar()); }\nvoid DeviceInfoContext::SetProviderDevice(const std::string &device) { SetProviderDevice(StringToChar(device)); }\n\n/// \\brief Derived from DeviceInfoContext, The configuration of the model running on the CPU. This option is only valid\n/// for MindSpore Lite.\nclass MS_API CPUDeviceInfo : public DeviceInfoContext {\n public:\n  /// \\brief Get the type of this DeviceInfoContext.\n  ///\n  /// \\return Type of this DeviceInfoContext.\n  enum DeviceType GetDeviceType() const override { return DeviceType::kCPU; };\n\n  /// \\brief Set enables to perform the float16 inference\n  ///\n  /// \\param[in] is_fp16 Enable float16 inference or not.\n  void SetEnableFP16(bool is_fp16);\n\n  /// \\brief Get enables to perform the float16 inference\n  ///\n  /// \\return Whether enable float16 inference.\n  bool GetEnableFP16() const;\n};\n\n/// \\brief Derived from DeviceInfoContext, The configuration of the model running on the NPU. This option is only valid\n/// for MindSpore Lite.\nclass MS_API KirinNPUDeviceInfo : public DeviceInfoContext {\n public:\n  /// \\brief Get the type of this DeviceInfoContext.\n  ///\n  /// \\return Type of this DeviceInfoContext.\n  enum DeviceType GetDeviceType() const override { return DeviceType::kKirinNPU; };\n\n  /// \\brief Set the NPU frequency.\n  ///\n  /// \\param[in] frequency Can be set to 1 (low power consumption), 2 (balanced), 3 (high performance), 4 (extreme\n  /// performance), default as 3.\n  void SetFrequency(int frequency);\n\n  /// \\brief Get the NPU frequency.\n  ///\n  /// \\return NPU frequency\n  int GetFrequency() const;\n};\n\n/// \\brief Derived from DeviceInfoContext, The configuration of the model running on the GPU.\nclass MS_API GPUDeviceInfo : public DeviceInfoContext {\n public:\n  /// \\brief Get the type of this DeviceInfoContext.\n  ///\n  /// \\return Type of this DeviceInfoContext.\n  enum DeviceType GetDeviceType() const override { return DeviceType::kGPU; };\n\n  /// \\brief Set device id.\n  ///\n  /// \\param[in] device_id The device id.\n  void SetDeviceID(uint32_t device_id);\n\n  /// \\brief Get the device id.\n  ///\n  /// \\return The device id.\n  uint32_t GetDeviceID() const;\n\n  /// \\brief Get the distribution rank id.\n  ///\n  /// \\return The device id.\n  int GetRankID() const;\n\n  /// \\brief Get the distribution group size.\n  ///\n  /// \\return The device id.\n  int GetGroupSize() const;\n\n  /// \\brief Set the precision mode.\n  ///\n  /// \\param[in] precision_mode Optional \"origin\", \"fp16\". \"origin\" is set as default.\n  inline void SetPrecisionMode(const std::string &precision_mode);\n\n  /// \\brief Get the precision mode.\n  ///\n  /// \\return The precision mode.\n  inline std::string GetPrecisionMode() const;\n\n  /// \\brief Set enables to perform the float16 inference\n  ///\n  /// \\param[in] is_fp16 Enable float16 inference or not.\n  void SetEnableFP16(bool is_fp16);\n\n  /// \\brief Get enables to perform the float16 inference\n  ///\n  /// \\return Whether enable float16 inference.\n  bool GetEnableFP16() const;\n\n  /// \\brief Set enables to sharing mem with OpenGL\n  ///\n  /// \\param[in] is_enable_sharing_mem_with_gl Enable sharing OpenCL Memory with OpenGL or not.\n  void SetEnableGLTexture(bool is_enable_gl_texture);\n\n  /// \\brief Get enables to sharing mem with OpenGL\n  ///\n  /// \\return Whether enable sharing mem with OpenGL.\n  bool GetEnableGLTexture() const;\n\n  /// \\brief Set current OpenGL context\n  ///\n  /// \\param[in] gl_context Current OpenGL context.\n  void SetGLContext(void *gl_context);\n\n  /// \\brief Get current OpenGL context\n  ///\n  /// \\return the OpenCL context by OpenGL used.\n  void *GetGLContext() const;\n\n  /// \\brief Set current OpenGL display\n  ///\n  /// \\param[in] gl_display Current OpenGL display.\n  void SetGLDisplay(void *gl_display);\n\n  /// \\brief Get current OpenGL display\n  ///\n  /// \\return the OpenCL display by OpenGL used.\n  void *GetGLDisplay() const;\n\n private:\n  void SetPrecisionMode(const std::vector<char> &precision_mode);\n  std::vector<char> GetPrecisionModeChar() const;\n};\n\nvoid GPUDeviceInfo::SetPrecisionMode(const std::string &precision_mode) {\n  SetPrecisionMode(StringToChar(precision_mode));\n}\nstd::string GPUDeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }\n\n/// \\brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend. This option is\n/// invalid for MindSpore Lite.\nclass MS_API AscendDeviceInfo : public DeviceInfoContext {\n public:\n  /// \\brief Get the type of this DeviceInfoContext.\n  ///\n  /// \\return Type of this DeviceInfoContext.\n  enum DeviceType GetDeviceType() const override { return DeviceType::kAscend; };\n\n  /// \\brief Set device id.\n  ///\n  /// \\param[in] device_id The device id.\n  void SetDeviceID(uint32_t device_id);\n\n  /// \\brief Get the device id.\n  ///\n  /// \\return The device id.\n  uint32_t GetDeviceID() const;\n\n  /// \\brief Set AIPP configuration file path.\n  ///\n  /// \\param[in] cfg_path AIPP configuration file path.\n  inline void SetInsertOpConfigPath(const std::string &cfg_path);\n\n  /// \\brief Get AIPP configuration file path.\n  ///\n  /// \\return AIPP configuration file path.\n  inline std::string GetInsertOpConfigPath() const;\n\n  /// \\brief Set format of model inputs.\n  ///\n  /// \\param[in] format Optional \"NCHW\", \"NHWC\", etc.\n  inline void SetInputFormat(const std::string &format);\n\n  /// \\brief Get format of model inputs.\n  ///\n  /// \\return The format of model inputs.\n  inline std::string GetInputFormat() const;\n\n  /// \\brief Set shape of model inputs.\n  ///\n  /// \\param[in] shape e.g. \"input_op_name1: 1,2,3,4;input_op_name2: 4,3,2,1\".\n  inline void SetInputShape(const std::string &shape);\n\n  /// \\brief Get shape of model inputs.\n  ///\n  /// \\return The shape of model inputs.\n  inline std::string GetInputShape() const;\n\n  /// \\brief Set shape of model inputs.\n  ///\n  /// \\param[in] shape e.g. {{1, {1,2,3,4}}, {2, {4,3,2,1}}} means the first input shape 1,2,3,4 and the second input\n  /// shape 4,3,2,1.\n  void SetInputShapeMap(const std::map<int, std::vector<int>> &shape);\n\n  /// \\brief Get shape of model inputs.\n  ///\n  /// \\return The shape of model inputs.\n  std::map<int, std::vector<int>> GetInputShapeMap() const;\n\n  void SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size);\n  inline std::string GetDynamicBatchSize() const;\n\n  /// \\brief Set the dynamic image size of model inputs.\n  ///\n  /// \\param[in] image size hw e.g. \"66,88;32,64\" means h1:66,w1:88; h2:32,w2:64.\n  inline void SetDynamicImageSize(const std::string &dynamic_image_size);\n\n  /// \\brief Get dynamic image size of model inputs.\n  ///\n  /// \\return The image size of model inputs.\n  inline std::string GetDynamicImageSize() const;\n\n  /// \\brief Set type of model outputs.\n  ///\n  /// \\param[in] output_type FP32, UINT8 or FP16, default as FP32.\n  void SetOutputType(enum DataType output_type);\n\n  /// \\brief Get type of model outputs.\n  ///\n  /// \\return The set type of model outputs.\n  enum DataType GetOutputType() const;\n\n  /// \\brief Set precision mode of model.\n  ///\n  /// \\param[in] precision_mode Optional \"force_fp16\", \"allow_fp32_to_fp16\", \"must_keep_origin_dtype\" and\n  /// \"allow_mix_precision\", \"force_fp16\" is set as default\n  inline void SetPrecisionMode(const std::string &precision_mode);\n\n  /// \\brief Get precision mode of model.\n  ///\n  /// \\return The set type of model outputs\n  inline std::string GetPrecisionMode() const;\n\n  /// \\brief Set op select implementation mode.\n  ///\n  /// \\param[in] op_select_impl_mode Optional \"high_performance\" and \"high_precision\", \"high_performance\" is set as\n  /// default.\n  inline void SetOpSelectImplMode(const std::string &op_select_impl_mode);\n\n  /// \\brief Get op select implementation mode.\n  ///\n  /// \\return The set op select implementation mode.\n  inline std::string GetOpSelectImplMode() const;\n\n  inline void SetFusionSwitchConfigPath(const std::string &cfg_path);\n  inline std::string GetFusionSwitchConfigPath() const;\n\n  // Optional \"l1_optimize\", \"l2_optimize\", \"off_optimize\" or \"l1_and_l2_optimize\", default as \"l2_optimize\"\n  inline void SetBufferOptimizeMode(const std::string &buffer_optimize_mode);\n  inline std::string GetBufferOptimizeMode() const;\n\n private:\n  void SetInsertOpConfigPath(const std::vector<char> &cfg_path);\n  std::vector<char> GetInsertOpConfigPathChar() const;\n\n  void SetInputFormat(const std::vector<char> &format);\n  std::vector<char> GetInputFormatChar() const;\n\n  void SetInputShape(const std::vector<char> &shape);\n  std::vector<char> GetInputShapeChar() const;\n\n  std::vector<char> GetDynamicBatchSizeChar() const;\n\n  void SetDynamicImageSize(const std::vector<char> &dynamic_image_size);\n  std::vector<char> GetDynamicImageSizeChar() const;\n\n  void SetPrecisionMode(const std::vector<char> &precision_mode);\n  std::vector<char> GetPrecisionModeChar() const;\n\n  void SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode);\n  std::vector<char> GetOpSelectImplModeChar() const;\n\n  void SetFusionSwitchConfigPath(const std::vector<char> &cfg_path);\n  std::vector<char> GetFusionSwitchConfigPathChar() const;\n\n  void SetBufferOptimizeMode(const std::vector<char> &buffer_optimize_mode);\n  std::vector<char> GetBufferOptimizeModeChar() const;\n};\n\nusing Ascend310DeviceInfo = AscendDeviceInfo;\nusing Ascend910DeviceInfo = AscendDeviceInfo;\nusing Ascend710DeviceInfo = AscendDeviceInfo;\n\nvoid AscendDeviceInfo::SetInsertOpConfigPath(const std::string &cfg_path) {\n  SetInsertOpConfigPath(StringToChar(cfg_path));\n}\nstd::string AscendDeviceInfo::GetInsertOpConfigPath() const { return CharToString(GetInsertOpConfigPathChar()); }\n\nvoid AscendDeviceInfo::SetInputFormat(const std::string &format) { SetInputFormat(StringToChar(format)); }\nstd::string AscendDeviceInfo::GetInputFormat() const { return CharToString(GetInputFormatChar()); }\n\nvoid AscendDeviceInfo::SetInputShape(const std::string &shape) { SetInputShape(StringToChar(shape)); }\nstd::string AscendDeviceInfo::GetInputShape() const { return CharToString(GetInputShapeChar()); }\n\nstd::string AscendDeviceInfo::GetDynamicBatchSize() const { return CharToString(GetDynamicBatchSizeChar()); }\n\nvoid AscendDeviceInfo::SetDynamicImageSize(const std::string &dynamic_image_size) {\n  SetDynamicImageSize(StringToChar(dynamic_image_size));\n}\n\nstd::string AscendDeviceInfo::GetDynamicImageSize() const { return CharToString(GetDynamicImageSizeChar()); }\n\nvoid AscendDeviceInfo::SetPrecisionMode(const std::string &precision_mode) {\n  SetPrecisionMode(StringToChar(precision_mode));\n}\nstd::string AscendDeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }\n\nvoid AscendDeviceInfo::SetOpSelectImplMode(const std::string &op_select_impl_mode) {\n  SetOpSelectImplMode(StringToChar(op_select_impl_mode));\n}\nstd::string AscendDeviceInfo::GetOpSelectImplMode() const { return CharToString(GetOpSelectImplModeChar()); }\n\nvoid AscendDeviceInfo::SetFusionSwitchConfigPath(const std::string &cfg_path) {\n  SetFusionSwitchConfigPath(StringToChar(cfg_path));\n}\nstd::string AscendDeviceInfo::GetFusionSwitchConfigPath() const {\n  return CharToString(GetFusionSwitchConfigPathChar());\n}\n\nvoid AscendDeviceInfo::SetBufferOptimizeMode(const std::string &buffer_optimize_mode) {\n  SetBufferOptimizeMode(StringToChar(buffer_optimize_mode));\n}\nstd::string AscendDeviceInfo::GetBufferOptimizeMode() const { return CharToString(GetBufferOptimizeModeChar()); }\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_CONTEXT_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/data_type.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_DATA_TYPE_H_\n#define MINDSPORE_INCLUDE_API_DATA_TYPE_H_\n\n#include <stdint.h>\n\nnamespace mindspore {\nenum class DataType : int {\n  kTypeUnknown = 0,\n  kObjectTypeString = 12,\n  kObjectTypeList = 13,\n  kObjectTypeTuple = 14,\n  kObjectTypeTensorType = 17,\n  kNumberTypeBegin = 29,\n  kNumberTypeBool = 30,\n  kNumberTypeInt8 = 32,\n  kNumberTypeInt16 = 33,\n  kNumberTypeInt32 = 34,\n  kNumberTypeInt64 = 35,\n  kNumberTypeUInt8 = 37,\n  kNumberTypeUInt16 = 38,\n  kNumberTypeUInt32 = 39,\n  kNumberTypeUInt64 = 40,\n  kNumberTypeFloat16 = 42,\n  kNumberTypeFloat32 = 43,\n  kNumberTypeFloat64 = 44,\n  kNumberTypeEnd = 46,\n  // add new enum here\n  kInvalidType = INT32_MAX,\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_DATA_TYPE_H_\n"
  },
  {
    "path": "tests/ut/stub/include/api/delegate.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_INCLUDE_API_DELEGATE_H\n#define MINDSPORE_INCLUDE_API_DELEGATE_H\n\n#include <map>\n#include <vector>\n#include <memory>\n#include \"schema/model_generated.h\"\n#include \"include/api/kernel.h\"\n#include \"include/api/status.h\"\n\nnamespace mindspore {\ntypedef enum {\n  SCHEMA_INVALID = -1, /**< invalid version */\n  SCHEMA_CUR,          /**< current version for ms model defined in model.fbs*/\n  SCHEMA_V0,           /**< previous version for ms model defined in model_v0.fbs*/\n} SchemaVersion;\n\nusing KernelIter = std::vector<kernel::Kernel *>::iterator;\n\ntemplate <class T>\nclass MS_API DelegateModel {\n public:\n  /// \\brief Constructor of MindSpore Lite DelegateModel.\n  DelegateModel(std::vector<kernel::Kernel *> *kernels, const std::vector<MSTensor> &inputs,\n                const std::vector<MSTensor> &outputs, const std::map<kernel::Kernel *, const T *> &primitives,\n                SchemaVersion version)\n      : kernels_(kernels), inputs_(inputs), outputs_(outputs), primitives_(primitives), version_(version) {}\n\n  /// \\brief Destructor of MindSpore Lite DelegateModel.\n  ~DelegateModel() = default;\n\n  /// \\brief Get Primitive of kernel::Kernel.\n  ///\n  /// \\param[in] a kernel in DelegateModel kernels vector.\n  ///\n  /// \\return The Primitive of The kernel.\n  const T *GetPrimitive(kernel::Kernel *kernel) const {\n    if (primitives_.find(kernel) != primitives_.end()) {\n      return primitives_.at(kernel);\n    } else {\n      return nullptr;\n    }\n  }\n\n  /// \\brief Get the begin iterator of the DelegateModel kernels vector.\n  ///\n  /// \\return The begin iterator of the DelegateModel kernels vector.\n  KernelIter BeginKernelIterator() { return kernels_->begin(); }\n\n  /// \\brief Get the end iterator of the DelegateModel kernels vector.\n  ///\n  /// \\return The end iterator of the DelegateModel kernels vector.\n  KernelIter EndKernelIterator() { return kernels_->end(); }\n\n  /// \\brief Replace the continuous kernel supported by the delegate with a delegate graph kernel.\n  ///\n  /// \\param[in] from Define the begin iterator of continuous kernel supported by the delegate.\n  /// \\param[in] end Define the end iterator of continuous kernel supported by the delegate.\n  ///\n  /// \\return The next iterator after graph_kernel, point to the next kernel that is not visited.\n  KernelIter Replace(KernelIter from, KernelIter end, kernel::Kernel *graph_kernel) {\n    size_t insert_index = from - BeginKernelIterator();\n    if (insert_index >= kernels_->size()) {\n      return BeginKernelIterator();\n    }\n    kernels_->erase(from, end);\n    kernels_->insert(BeginKernelIterator() + insert_index, graph_kernel);\n    return BeginKernelIterator() + insert_index + 1;\n  }\n\n  /// \\brief Get the input tensors of DelegateModel.\n  ///\n  /// \\return The input tensor vector of DelegateModel.\n  const std::vector<mindspore::MSTensor> &inputs() { return this->inputs_; }\n\n  /// \\brief Get the output tensors of DelegateModel.\n  ///\n  /// \\return The ioutput tensor vector of DelegateModel.\n  const std::vector<mindspore::MSTensor> &outputs() { return this->outputs_; }\n\n  /// \\brief Get the ms model version.\n  ///\n  /// \\return The schema version for the primitives map.\n  SchemaVersion GetVersion() const { return version_; }\n\n protected:\n  std::vector<kernel::Kernel *> *kernels_;\n  const std::vector<mindspore::MSTensor> &inputs_;\n  const std::vector<mindspore::MSTensor> &outputs_;\n  const std::map<kernel::Kernel *, const T *> &primitives_;\n  SchemaVersion version_;\n};\n\nclass MS_API Delegate {\n public:\n  /// \\brief Constructor of MindSpore Lite Delegate.\n  Delegate() = default;\n\n  /// \\brief Destructor of MindSpore Lite Delegate.\n  virtual ~Delegate() = default;\n\n  /// \\brief Init delegate.\n  ///\n  /// \\note Init willed be called in Model::Build.\n  ///\n  /// \\return Status. If Status is kLiteNotSupport, the program will return to the MindSpore Lite inner inference.\n  virtual Status Init() = 0;\n\n  /// \\brief Build delegate graph for MindSpore Lite model.\n  ///\n  /// \\note Build willed be called in Model::Build.\n  ///\n  /// \\param[in] model Define the delegate model to be built.\n  virtual Status Build(DelegateModel<schema::Primitive> *model) = 0;\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_DELEGATE_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/dual_abi_helper.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_\n#define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_\n\n#include <algorithm>\n#include <iterator>\n#include <map>\n#include <memory>\n#include <string>\n#include <set>\n#include <unordered_map>\n#include <utility>\n#include <vector>\n\nnamespace mindspore {\ninline std::vector<char> StringToChar(const std::string &s) { return std::vector<char>(s.begin(), s.end()); }\n\ninline std::string CharToString(const std::vector<char> &c) { return std::string(c.begin(), c.end()); }\n\ninline std::pair<std::vector<char>, int32_t> PairStringToChar(const std::pair<std::string, int32_t> &s) {\n  return std::pair<std::vector<char>, int32_t>(std::vector<char>(s.first.begin(), s.first.end()), s.second);\n}\n\ninline std::pair<std::string, int32_t> PairCharToString(const std::pair<std::vector<char>, int32_t> &c) {\n  return std::pair<std::string, int32_t>(std::string(c.first.begin(), c.first.end()), c.second);\n}\n\ninline std::vector<std::vector<char>> VectorStringToChar(const std::vector<std::string> &s) {\n  std::vector<std::vector<char>> ret;\n  std::transform(s.begin(), s.end(), std::back_inserter(ret),\n                 [](auto str) { return std::vector<char>(str.begin(), str.end()); });\n  return ret;\n}\n\ninline std::vector<std::string> VectorCharToString(const std::vector<std::vector<char>> &c) {\n  std::vector<std::string> ret;\n  std::transform(c.begin(), c.end(), std::back_inserter(ret),\n                 [](auto ch) { return std::string(ch.begin(), ch.end()); });\n  return ret;\n}\n\ninline std::set<std::vector<char>> SetStringToChar(const std::set<std::string> &s) {\n  std::set<std::vector<char>> ret;\n  std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()),\n                 [](auto str) { return std::vector<char>(str.begin(), str.end()); });\n  return ret;\n}\n\ninline std::set<std::string> SetCharToString(const std::set<std::vector<char>> &c) {\n  std::set<std::string> ret;\n  std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()),\n                 [](auto ch) { return std::string(ch.begin(), ch.end()); });\n  return ret;\n}\n\ninline std::map<std::vector<char>, int32_t> MapStringToChar(const std::map<std::string, int32_t> &s) {\n  std::map<std::vector<char>, int32_t> ret;\n  std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {\n    return std::pair<std::vector<char>, int32_t>(std::vector<char>(str.first.begin(), str.first.end()), str.second);\n  });\n  return ret;\n}\n\ninline std::map<std::string, int32_t> MapCharToString(const std::map<std::vector<char>, int32_t> &c) {\n  std::map<std::string, int32_t> ret;\n  std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {\n    return std::pair<std::string, int32_t>(std::string(ch.first.begin(), ch.first.end()), ch.second);\n  });\n  return ret;\n}\n\ninline std::map<std::vector<char>, std::vector<char>> UnorderedMapStringToChar(\n  const std::unordered_map<std::string, std::string> &s) {\n  std::map<std::vector<char>, std::vector<char>> ret;\n  std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {\n    return std::pair<std::vector<char>, std::vector<char>>(std::vector<char>(str.first.begin(), str.first.end()),\n                                                           std::vector<char>(str.second.begin(), str.second.end()));\n  });\n  return ret;\n}\n\ninline std::unordered_map<std::string, std::string> UnorderedMapCharToString(\n  const std::map<std::vector<char>, std::vector<char>> &c) {\n  std::unordered_map<std::string, std::string> ret;\n  std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {\n    return std::pair<std::string, std::string>(std::string(ch.first.begin(), ch.first.end()),\n                                               std::string(ch.second.begin(), ch.second.end()));\n  });\n  return ret;\n}\n\ninline std::map<std::vector<char>, std::vector<char>> MapStringToVectorChar(\n  const std::map<std::string, std::string> &s) {\n  std::map<std::vector<char>, std::vector<char>> ret;\n  std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {\n    return std::pair<std::vector<char>, std::vector<char>>(std::vector<char>(str.first.begin(), str.first.end()),\n                                                           std::vector<char>(str.second.begin(), str.second.end()));\n  });\n  return ret;\n}\n\ninline std::map<std::string, std::string> MapVectorCharToString(\n  const std::map<std::vector<char>, std::vector<char>> &c) {\n  std::map<std::string, std::string> ret;\n  std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {\n    return std::pair<std::string, std::string>(std::string(ch.first.begin(), ch.first.end()),\n                                               std::string(ch.second.begin(), ch.second.end()));\n  });\n  return ret;\n}\n\ninline std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ClassIndexStringToChar(\n  const std::vector<std::pair<std::string, std::vector<int32_t>>> &s) {\n  std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ret;\n  std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) {\n    return std::pair<std::vector<char>, std::vector<int32_t>>(std::vector<char>(str.first.begin(), str.first.end()),\n                                                              str.second);\n  });\n  return ret;\n}\n\ninline std::vector<std::pair<std::string, std::vector<int32_t>>> ClassIndexCharToString(\n  const std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> &c) {\n  std::vector<std::pair<std::string, std::vector<int32_t>>> ret;\n  std::transform(c.begin(), c.end(), std::back_inserter(ret), [](auto ch) {\n    return std::pair<std::string, std::vector<int32_t>>(std::string(ch.first.begin(), ch.first.end()), ch.second);\n  });\n  return ret;\n}\n\ninline std::vector<std::pair<std::vector<char>, int64_t>> PairStringInt64ToPairCharInt64(\n  const std::vector<std::pair<std::string, int64_t>> &s) {\n  std::vector<std::pair<std::vector<char>, int64_t>> ret;\n  std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) {\n    return std::pair<std::vector<char>, int64_t>(std::vector<char>(str.first.begin(), str.first.end()), str.second);\n  });\n  return ret;\n}\n\ntemplate <class T>\ninline std::map<std::vector<char>, T> PadInfoStringToChar(const std::map<std::string, T> &s_pad_info) {\n  std::map<std::vector<char>, T> ret;\n  std::transform(s_pad_info.begin(), s_pad_info.end(), std::inserter(ret, ret.begin()), [](auto str) {\n    return std::pair<std::vector<char>, T>(std::vector<char>(str.first.begin(), str.first.end()), str.second);\n  });\n  return ret;\n}\n\ntemplate <class T>\ninline std::map<std::string, T> PadInfoCharToString(const std::map<std::vector<char>, T> &c_pad_info) {\n  std::map<std::string, T> ret;\n  std::transform(c_pad_info.begin(), c_pad_info.end(), std::inserter(ret, ret.begin()), [](auto ch) {\n    return std::pair<std::string, T>(std::string(ch.first.begin(), ch.first.end()), ch.second);\n  });\n  return ret;\n}\n\ntemplate <class T>\ninline void TensorMapCharToString(const std::map<std::vector<char>, T> *c, std::unordered_map<std::string, T> *s) {\n  if (c == nullptr || s == nullptr) {\n    return;\n  }\n  for (auto ch : *c) {\n    auto key = std::string(ch.first.begin(), ch.first.end());\n    auto val = ch.second;\n    s->insert(std::pair<std::string, T>(key, val));\n  }\n}\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_\n"
  },
  {
    "path": "tests/ut/stub/include/api/format.h",
    "content": "/**\n * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).\n *\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_INCLUDE_API_FORMAT_H\n#define MINDSPORE_INCLUDE_API_FORMAT_H\n\n#if __has_include(\"include/mindapi/base/format.h\")\n#include \"include/mindapi/base/format.h\"\n#else\n#include \"mindapi/base/format.h\"\n#endif\n\n#endif  // MINDSPORE_INCLUDE_API_FORMAT_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/graph.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_GRAPH_H\n#define MINDSPORE_INCLUDE_API_GRAPH_H\n\n#include <cstddef>\n#include <vector>\n#include <map>\n#include <memory>\n#include \"include/api/status.h\"\n#include \"include/api/types.h\"\n\nnamespace mindspore {\nclass MS_API Graph {\n public:\n  class GraphData;\n  Graph();\n  explicit Graph(const std::shared_ptr<GraphData> &graph_data);\n  explicit Graph(std::shared_ptr<GraphData> &&graph_data);\n  explicit Graph(std::nullptr_t);\n  ~Graph();\n\n  enum ModelType ModelType() const;\n  bool operator==(std::nullptr_t) const;\n  bool operator!=(std::nullptr_t) const;\n\n private:\n  friend class GraphCell;\n  friend class ModelImpl;\n  friend class GraphImplStubAdd;\n  std::shared_ptr<GraphData> graph_data_;\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_GRAPH_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/kernel.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_INCLUDE_API_KERNEL_H\n#define MINDSPORE_INCLUDE_API_KERNEL_H\n#include <vector>\n#include <string>\n#include <utility>\n#include <map>\n#include \"schema/model_generated.h\"\n#include \"include/api/types.h\"\n#include \"include/api/context.h\"\n\nnamespace mindspore::kernel {\n/// \\brief The Kernel class is used to define a MindSpore Kernel.\nclass MS_API Kernel {\n public:\n  Kernel() = default;\n  /// \\brief Constructor.\n  ///\n  /// \\param[in] inputs define the input tensors for kernel.\n  /// \\param[in] outputs define the output tensors for kernel.\n  /// \\param[in] primitive define the primitive of kernel generated by flatbuffers.\n  /// \\param[in] ctx define the context for kernel.\n  Kernel(const std::vector<mindspore::MSTensor> &inputs, const std::vector<mindspore::MSTensor> &outputs,\n         const schema::Primitive *primitive, const mindspore::Context *ctx)\n      : context_(ctx), inputs_(std::move(inputs)), outputs_(std::move(outputs)), primitive_(primitive) {\n    Initialize();\n  }\n  /// \\brief Destructor.\n  virtual ~Kernel() = default;\n  /// \\brief prepare for executing kernel.\n  ///\n  /// \\return result code.\n  virtual int Prepare() = 0;\n  /// \\brief execute the kernel.\n  ///\n  /// \\return result code.\n  virtual int Execute() = 0;\n  /// \\brief resize the kernel input shape, memory need to refresh.\n  ///\n  /// \\return result code.\n  virtual int ReSize() = 0;\n  /// \\brief set kernel's input tensors.\n  ///\n  /// \\param[in] in_tensors define the input tensors.\n  virtual void set_inputs(const std::vector<mindspore::MSTensor> &in_tensors) { this->inputs_ = in_tensors; }\n  /// \\brief set kernel's input tensor.\n  ///\n  /// \\param[in] in_tensor define the input tensor.\n  /// \\param[in] index define the index of the input tensor.\n  virtual void set_input(mindspore::MSTensor in_tensor, int index) { this->inputs_[index] = in_tensor; }\n  /// \\brief set kernel's output tensors.\n  ///\n  /// \\param[in] out_tensors define the output tensors.\n  virtual void set_outputs(const std::vector<mindspore::MSTensor> &out_tensors) { this->outputs_ = out_tensors; }\n  /// \\brief set kernel's output tensor.\n  ///\n  /// \\param[in] out_tensor define the output tensor.\n  /// \\param[in] index define the index of the output tensor.\n  virtual void set_output(mindspore::MSTensor out_tensor, int index) { this->outputs_[index] = out_tensor; }\n  /// \\brief obtain kernel's input tensors.\n  ///\n  /// \\return input tensors.\n  virtual const std::vector<mindspore::MSTensor> &inputs() { return this->inputs_; }\n  /// \\brief obtain kernel's output tensors.\n  ///\n  /// \\return output tensors.\n  virtual const std::vector<mindspore::MSTensor> &outputs() { return this->outputs_; }\n  /// \\brief obtain kernel's name.\n  ///\n  /// \\return kernel's name.\n  std::string name() const { return this->name_; }\n  /// \\brief set kernel's name.\n  ///\n  /// \\param[in] name define the kernel's name.\n  void set_name(const std::string &name) { this->name_ = name; }\n  /// \\brief obtain kernel's context.\n  ///\n  /// \\return kernel's context.\n  const mindspore::Context *context() const { return this->context_; }\n  /// \\brief obtain kernel's type.\n  ///\n  /// \\return kernel's type.\n  virtual schema::PrimitiveType type() const { return type_; }\n  /// \\brief obtain kernel's quant type.\n  ///\n  /// \\return kernel's quant type.\n  virtual schema::QuantType quant_type() const { return quant_type_; }\n  /// \\brief obtain the primitive of kernel generated by flatbuffers.\n  ///\n  /// \\return the primitive of kernel generated by flatbuffers.\n  const schema::Primitive *primitive() const { return this->primitive_; }\n\n  /// \\brief get kernel's attribute.\n  ///\n  /// \\param[in] key define the kernel's attribute key.\n  std::string GetAttr(const std::string &key) const {\n    auto iter = attrs_.find(key);\n    if (iter != attrs_.end()) {\n      return iter->second;\n    }\n    return \"\";\n  }\n\n  /// \\brief set kernel's config.\n  ///\n  /// \\param[in] config define the kernel's config.\n  void SetConfig(const std::map<std::string, std::map<std::string, std::string>> *config) {\n    config_ = config;\n  }\n  /// \\brief set kernel's config.\n  ///\n  /// \\param[in] config define the kernel's config.\n  std::map<std::string, std::string> GetConfig(const std::string &section) const {\n    if (config_ == nullptr) {\n      return std::map<std::string, std::string>();\n    }\n    auto iter = config_->find(section);\n    if (iter != config_->end()) {\n      return iter->second;\n    }\n    return std::map<std::string, std::string>();\n  }\n\n protected:\n  /// \\brief set kernel's attribute\n  ///\n  /// \\param[in] key define the kernel's attribute key.\n  /// \\param[in] value define the kernel's attribute value.\n  void SetAttr(const std::string &key, const std::string &value) { attrs_[key] = value; }\n\n  std::string name_;\n  const mindspore::Context *context_ = nullptr;\n  std::vector<mindspore::MSTensor> inputs_;\n  std::vector<mindspore::MSTensor> outputs_;\n  schema::PrimitiveType type_ = schema::PrimitiveType_NONE;\n  const schema::Primitive *primitive_ = nullptr;\n  std::map<std::string, std::string> attrs_;\n  const std::map<std::string, std::map<std::string, std::string>> *config_;\n  schema::QuantType quant_type_ = schema::QuantType_QUANT_NONE;\n\n private:\n  void Initialize();\n};\n}  // namespace mindspore::kernel\n\n#endif  // MINDSPORE_INCLUDE_API_KERNEL_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/metrics/accuracy.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_METRICS_ACCURACY_H\n#define MINDSPORE_INCLUDE_API_METRICS_ACCURACY_H\n#include <vector>\n#include \"include/api/metrics/metrics.h\"\n\nnamespace mindspore {\n\nconstexpr int METRICS_CLASSIFICATION = 0;\nconstexpr int METRICS_MULTILABEL = 1;\n\nclass AccuracyMetrics : public Metrics {\n public:\n  explicit AccuracyMetrics(int accuracy_metrics = METRICS_CLASSIFICATION, const std::vector<int> &input_indexes = {1},\n                           const std::vector<int> &output_indexes = {0});\n  virtual ~AccuracyMetrics();\n  void Clear() override;\n  float Eval() override;\n};\n\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_METRICS_ACCURACY_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/metrics/metrics.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_METRICS_METRICS_H\n#define MINDSPORE_INCLUDE_API_METRICS_METRICS_H\n#include <vector>\n#include \"include/api/model.h\"\n\nnamespace mindspore {\n\nclass MetricsImpl;\nclass ModelImpl;\nclass MSTensor;\n\nclass Metrics {\n public:\n  virtual ~Metrics() = default;\n  virtual void Clear() {}\n  virtual float Eval() { return 0.0; }\n  virtual void Update(std::vector<MSTensor *> inputs, std::vector<MSTensor *> outputs) {}\n protected:\n  friend class Model;\n  friend class ModelImpl;\n  MetricsImpl* metrics_impl_;\n};\n\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_METRICS_METRICS_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/model.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_MODEL_H\n#define MINDSPORE_INCLUDE_API_MODEL_H\n\n#include <string>\n#include <vector>\n#include <map>\n#include <memory>\n#include <utility>\n#include \"include/api/status.h\"\n#include \"include/api/types.h\"\n#include \"include/api/graph.h\"\n#include \"include/api/context.h\"\n#include \"include/api/callback/callback.h\"\n#include \"include/api/cell.h\"\n#include \"include/api/cfg.h\"\n#include \"include/api/dual_abi_helper.h\"\n\nnamespace mindspore {\nclass ModelImpl;\nclass Metrics;\n\nnamespace dataset {\nclass Dataset;\n}  // namespace dataset\n/// \\brief The Model class is used to define a MindSpore model, facilitating computational graph management.\nclass MS_API Model {\n public:\n  Model();\n  ~Model();\n  Model(const Model &) = delete;\n  void operator=(const Model &) = delete;\n\n  /// \\brief Builds a model\n  ///\n  /// \\param[in] graph GraphCell is a derivative of Cell. Cell is not available currently. GraphCell can be constructed\n  /// from Graph, for example, model.Build(GraphCell(graph), context).\n  /// \\param[in] model_context A context used to store options during execution.\n  /// \\param[in] train_cfg A config used by training.\n  ///\n  /// \\return Status.\n  Status Build(GraphCell graph, const std::shared_ptr<Context> &model_context = nullptr,\n               const std::shared_ptr<TrainCfg> &train_cfg = nullptr);\n\n  /// \\brief Builds a Transfer Learning model where the backbone weights are fixed and the head weights are trainable\n  ///\n  /// \\param[in] backbone The static, non-learnable part of the graph\n  /// \\param[in] head The trainable part of the graph\n  /// \\param[in] context A context used to store options during execution\n  /// \\param[in] cfg A config used by training\n  ///\n  /// \\return Status\n  Status BuildTransferLearning(GraphCell backbone, GraphCell head, const std::shared_ptr<Context> &context,\n                               const std::shared_ptr<TrainCfg> &train_cfg = nullptr);\n\n  /// \\brief Resizes the shapes of inputs.\n  ///\n  /// \\param[in] inputs A vector that includes all input tensors in order.\n  /// \\param[in] dims Defines the new shapes of inputs, should be consistent with inputs.\n  ///\n  /// \\return Status.\n  Status Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims);\n\n  /// \\brief Change the size and or content of weight tensors\n  ///\n  /// \\param[in] new_weights a vector of tensors with new shapes and data to use in the model\n  ///            If data pointer is null, the data of the original tensors will be copied to the new ones\n  ///\n  /// \\return Status.\n  Status UpdateWeights(const std::vector<MSTensor> &new_weights);\n\n  /// \\brief Inference model.\n  ///\n  /// \\param[in] inputs A vector where model inputs are arranged in sequence.\n  /// \\param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence.\n  /// \\param[in] before CallBack before predict.\n  /// \\param[in] after CallBack after predict.\n  ///\n  /// \\return Status.\n  Status Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs,\n                 const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr);\n\n  /// \\brief Train model by step.\n  ///\n  /// \\param[in] before CallBack before predict.\n  /// \\param[in] after CallBack after predict.\n  ///\n  /// \\return Status.\n  Status RunStep(const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr);\n\n  /// \\brief Inference model with preprocess in model.\n  ///\n  /// \\param[in] inputs A vector where model inputs are arranged in sequence.\n  /// \\param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence.\n  /// \\param[in] whether to use data preprocess in model.\n  /// \\param[in] before CallBack before predict.\n  /// \\param[in] after CallBack after predict.\n  ///\n  /// \\return Status.\n  Status PredictWithPreprocess(const std::vector<std::vector<MSTensor>> &inputs, std::vector<MSTensor> *outputs,\n                               const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr);\n\n  /// \\brief Apply data preprocess if it exits in model.\n  ///\n  /// \\param[in] inputs A vector where model inputs are arranged in sequence.\n  /// \\param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence.\n  ///\n  /// \\return Status.\n  Status Preprocess(const std::vector<std::vector<MSTensor>> &inputs, std::vector<MSTensor> *outputs);\n\n  /// \\brief Check if data preprocess exists in model.\n  /// \\return true if data preprocess exists.\n  bool HasPreprocess();\n\n  /// \\brief Load config file.\n  ///\n  /// \\param[in] config_path config file path.\n  ///\n  /// \\return Status.\n  inline Status LoadConfig(const std::string &config_path);\n\n  /// \\brief Update config.\n  ///\n  /// \\param[in] section define the config section.\n  /// \\param[in] config define the config will be updated.\n  ///\n  /// \\return Status.\n  inline Status UpdateConfig(const std::string &section, const std::pair<std::string, std::string> &config);\n\n  /// \\brief Obtains all input tensors of the model.\n  ///\n  /// \\return The vector that includes all input tensors.\n  std::vector<MSTensor> GetInputs();\n\n  /// \\brief Obtains the input tensor of the model by name.\n  ///\n  /// \\return The input tensor with the given name, if the name is not found, an invalid tensor is returned.\n  inline MSTensor GetInputByTensorName(const std::string &tensor_name);\n\n  /// \\brief Obtains all gradient tensors of the model.\n  ///\n  /// \\return The vector that includes all gradient tensors.\n  std::vector<MSTensor> GetGradients() const;\n\n  /// \\brief update gradient tensors of the model.\n  ///\n  /// \\param[in] inputs A vector new gradients.\n  /// \\return Status of operation\n  Status ApplyGradients(const std::vector<MSTensor> &gradients);\n\n  /// \\brief Obtains all weights tensors of the model.\n  ///\n  /// \\return The vector that includes all gradient tensors.\n  std::vector<MSTensor> GetFeatureMaps() const;\n\n  /// \\brief update weights tensors of the model.\n  ///\n  /// \\param[in] inputs A vector new weights.\n  /// \\return Status of operation\n  Status UpdateFeatureMaps(const std::vector<MSTensor> &new_weights);\n\n  /// \\brief Obtains optimizer params tensors of the model.\n  ///\n  /// \\return The vector that includes all params tensors.\n  std::vector<MSTensor> GetOptimizerParams() const;\n\n  /// \\brief update the optimizer parameters\n  ///\n  /// \\param[in] inputs A vector new optimizer params.\n  /// \\return Status of operation\n  Status SetOptimizerParams(const std::vector<MSTensor> &params);\n\n  /// \\brief Setup training with virtual batches\n  ///\n  /// \\param[in] virtual_batch_multiplier - virtual batch multiplier, use any number < 1 to disable\n  /// \\param[in] lr - learning rate to use for virtual batch, -1 for internal configuration\n  /// \\param[in] momentum - batch norm momentum to use for virtual batch, -1 for internal configuration\n  /// \\return Status of operation\n  Status SetupVirtualBatch(int virtual_batch_multiplier, float lr = -1.0f, float momentum = -1.0f);\n\n  /// \\brief Sets the Learning Rate of the training\n  ///\n  /// \\param[in] learning_rate to set\n  /// \\return Status of operation\n  Status SetLearningRate(float learning_rate);\n\n  /// \\brief Gets the Learning Rate of the optimizer\n  ///\n  /// \\return learning rate. 0.0 if no optimizer was found\n  float GetLearningRate();\n\n  Status InitMetrics(std::vector<Metrics *> metrics);\n  std::vector<Metrics *> GetMetrics();\n\n  /// \\brief Obtains all output tensors of the model.\n  ///\n  /// \\return The vector that includes all output tensors.\n  std::vector<MSTensor> GetOutputs();\n\n  /// \\brief Obtains names of all output tensors of the model.\n  ///\n  /// \\return A vector that includes names of all output tensors.\n  inline std::vector<std::string> GetOutputTensorNames();\n\n  /// \\brief Obtains the output tensor of the model by name.\n  ///\n  /// \\return The output tensor with the given name, if the name is not found, an invalid tensor is returned.\n  inline MSTensor GetOutputByTensorName(const std::string &tensor_name);\n\n  /// \\brief Get output MSTensors of model by node name.\n  ///\n  /// \\param[in] node_name Define node name.\n  ///\n  /// \\note Deprecated, replace with GetOutputByTensorName\n  ///\n  /// \\return The vector of output MSTensor.\n  inline std::vector<MSTensor> GetOutputsByNodeName(const std::string &node_name);\n\n  /// \\brief Bind GLTexture2D object to cl Memory.\n  ///\n  /// \\param[in] inputGlTexture The input GLTexture id for Model.\n  /// \\param[in] outputGLTexture The output GLTexture id for Model.\n  ///\n  /// \\return Status of operation.\n\n  Status BindGLTexture2DMemory(const std::map<std::string, unsigned int> &inputGLTexture,\n                               std::map<std::string, unsigned int> *outputGLTexture);\n\n  /// \\brief Inference model.\n  ///\n  /// \\param[in] device_type Device type，options are kGPU, kAscend etc.\n  /// \\param[in] model_type The type of model file, options are ModelType::kMindIR, ModelType::kOM.\n  ///\n  /// \\return Is supported or not.\n  static bool CheckModelSupport(enum DeviceType device_type, ModelType model_type);\n\n  Status SetTrainMode(bool train);\n  bool GetTrainMode() const;\n  Status Train(int epochs, std::shared_ptr<dataset::Dataset> ds, std::vector<TrainCallBack *> cbs);\n  Status Evaluate(std::shared_ptr<dataset::Dataset> ds, std::vector<TrainCallBack *> cbs);\n\n  /// \\brief Build a model from model buffer so that it can run on a device. Only valid for Lite.\n  ///\n  /// \\param[in] model_data Define the buffer read from a model file.\n  /// \\param[in] data_size Define bytes number of model buffer.\n  /// \\param[in] model_type Define The type of model file. Options: ModelType::kMindIR, ModelType::kOM. Only\n  /// ModelType::kMindIR is valid for Lite.\n  /// \\param[in] model_context Define the context used to store options during execution.\n  ///\n  /// \\return Status.\n  Status Build(const void *model_data, size_t data_size, ModelType model_type,\n               const std::shared_ptr<Context> &model_context = nullptr);\n\n  /// \\brief Load and build a model from model buffer so that it can run on a device. Only valid for Lite.\n  ///\n  /// \\param[in] model_path Define the model path.\n  /// \\param[in] model_type Define The type of model file. Options: ModelType::kMindIR, ModelType::kOM. Only\n  /// ModelType::kMindIR is valid for Lite.\n  /// \\param[in] model_context Define the context used to store options during execution.\n  ///\n  /// \\return Status.\n  Status Build(const std::string &model_path, ModelType model_type,\n               const std::shared_ptr<Context> &model_context = nullptr);\n\n  /// \\brief Build a model from model buffer so that it can run on a device. Only valid for Lite.\n  ///\n  /// \\param[in] model_data Define the buffer read from a model file.\n  /// \\param[in] data_size Define bytes number of model buffer.\n  /// \\param[in] model_type Define The type of model file. Options: ModelType::kMindIR, ModelType::kOM. Only\n  /// ModelType::kMindIR is valid for Lite.\n  /// \\param[in] model_context Define the context used to store options during execution.\n  /// \\param[in] dec_key Define the key used to decrypt the ciphertext model. The key length is 16.\n  /// \\param[in] dec_mode Define the decryption mode. Options: AES-GCM.\n  /// \\param[in] cropto_lib_path Define the openssl library path.\n  ///\n  /// \\return Status.\n  Status Build(const void *model_data, size_t data_size, ModelType model_type,\n               const std::shared_ptr<Context> &model_context, const Key &dec_key, const std::string &dec_mode,\n               const std::string &cropto_lib_path);\n\n  /// \\brief Load and build a model from model buffer so that it can run on a device. Only valid for Lite.\n  ///\n  /// \\param[in] model_path Define the model path.\n  /// \\param[in] model_type Define The type of model file. Options: ModelType::kMindIR, ModelType::kOM. Only\n  /// ModelType::kMindIR is valid for Lite.\n  /// \\param[in] model_context Define the context used to store options during execution.\n  /// \\param[in] dec_key Define the key used to decrypt the ciphertext model. The key length is 16.\n  /// \\param[in] dec_mode Define the decryption mode. Options: AES-GCM.\n  /// \\param[in] cropto_lib_path Define the openssl library path.\n  ///\n  /// \\return Status.\n  Status Build(const std::string &model_path, ModelType model_type, const std::shared_ptr<Context> &model_context,\n               const Key &dec_key, const std::string &dec_mode, const std::string &cropto_lib_path);\n\n private:\n  friend class Serialization;\n  // api without std::string\n  MSTensor GetInputByTensorName(const std::vector<char> &tensor_name);\n  std::vector<std::vector<char>> GetOutputTensorNamesChar();\n  MSTensor GetOutputByTensorName(const std::vector<char> &tensor_name);\n  std::vector<MSTensor> GetOutputsByNodeName(const std::vector<char> &node_name);\n  Status LoadConfig(const std::vector<char> &config_path);\n  Status UpdateConfig(const std::vector<char> &section, const std::pair<std::vector<char>, std::vector<char>> &config);\n  Status Build(const std::vector<char> &model_path, ModelType model_type,\n               const std::shared_ptr<Context> &model_context);\n  Status Build(const std::vector<char> &model_path, ModelType model_type, const std::shared_ptr<Context> &model_context,\n               const Key &dec_key, const std::string &dec_mode, const std::vector<char> &cropto_lib_path);\n  std::shared_ptr<ModelImpl> impl_;\n};\n\nMSTensor Model::GetInputByTensorName(const std::string &tensor_name) {\n  return GetInputByTensorName(StringToChar(tensor_name));\n}\n\nstd::vector<std::string> Model::GetOutputTensorNames() { return VectorCharToString(GetOutputTensorNamesChar()); }\n\nMSTensor Model::GetOutputByTensorName(const std::string &tensor_name) {\n  return GetOutputByTensorName(StringToChar(tensor_name));\n}\n\nstd::vector<MSTensor> Model::GetOutputsByNodeName(const std::string &node_name) {\n  return GetOutputsByNodeName(StringToChar(node_name));\n}\n\nStatus Model::LoadConfig(const std::string &config_path) { return LoadConfig(StringToChar(config_path)); }\n\nStatus Model::UpdateConfig(const std::string &section, const std::pair<std::string, std::string> &config) {\n  std::pair<std::vector<char>, std::vector<char>> config_pair = {StringToChar(config.first),\n                                                                 StringToChar(config.second)};\n  return UpdateConfig(StringToChar(section), config_pair);\n}\n\ninline Status Model::Build(const std::string &model_path, ModelType model_type,\n                           const std::shared_ptr<Context> &model_context, const Key &dec_key,\n                           const std::string &dec_mode, const std::string &cropto_lib_path) {\n  return Build(StringToChar(model_path), model_type, model_context, dec_key, dec_mode, StringToChar(cropto_lib_path));\n}\n\ninline Status Model::Build(const std::string &model_path, ModelType model_type,\n                           const std::shared_ptr<Context> &model_context) {\n  return Build(StringToChar(model_path), model_type, model_context);\n}\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_MODEL_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/model_parallel_runner.h",
    "content": "/**\n * Copyright 2022 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_MODEL_PARALLEL_RUNNER_H\n#define MINDSPORE_INCLUDE_API_MODEL_PARALLEL_RUNNER_H\n#include <vector>\n#include <memory>\n#include <utility>\n#include <string>\n#include \"include/api/status.h\"\n#include \"include/api/context.h\"\nnamespace mindspore {\nstruct RunnerConfig {\n  std::shared_ptr<Context> context = nullptr;\n  int workers_num = 0;\n};\n\n/// \\brief The ModelParallelRunner class is used to define a MindSpore ModelParallelRunner, facilitating Model\n/// management.\nclass MS_API ModelParallelRunner {\n public:\n  ModelParallelRunner() = default;\n  ~ModelParallelRunner() = default;\n\n  /// \\brief build a model parallel runner from model path so that it can run on a device. Only valid for Lite.\n  ///\n  /// \\param[in] model_path Define the model path.\n  /// \\param[in] runner_config Define the config used to store options during model pool init.\n  ///\n  /// \\return Status.\n  Status Init(const std::string &model_path, const std::shared_ptr<RunnerConfig> &runner_config = nullptr);\n\n  /// \\brief Obtains all input tensors information of the model.\n  ///\n  /// \\return The vector that includes all input tensors.\n  std::vector<MSTensor> GetInputs();\n\n  /// \\brief Obtains all output tensors information of the model.\n  ///\n  /// \\return The vector that includes all output tensors.\n  std::vector<MSTensor> GetOutputs();\n\n  /// \\brief Inference ModelParallelRunner.\n  ///\n  /// \\param[in] inputs A vector where model inputs are arranged in sequence.\n  /// \\param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence.\n  /// \\param[in] before CallBack before predict.\n  /// \\param[in] after CallBack after predict.\n  ///\n  /// \\return Status.\n  Status Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs,\n                 const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr);\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_MODEL_PARALLEL_RUNNER_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/ops/ops.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_OPS_OPS_H\n#define MINDSPORE_INCLUDE_API_OPS_OPS_H\n\n#include <string>\n#include <vector>\n#include <map>\n#include <memory>\n#include \"include/api/status.h\"\n#include \"include/api/types.h\"\n#include \"include/api/cell.h\"\n\nnamespace mindspore {\nstruct MS_API Conv2D : public OpCell<Conv2D> {\n  Conv2D() : OpCell(\"Conv2D\") {}\n  ~Conv2D() override = default;\n  std::vector<Output> Construct(const std::vector<Input> &inputs) override;\n  Conv2D(int out_channel, const std::vector<int> &kernel_size, int mode = 1, const std::string &pad_mode = \"valid\",\n         const std::vector<int> &pad = {0, 0, 0, 0}, const std::vector<int> &stride = {1, 1, 1, 1},\n         const std::vector<int> &dilation = {1, 1, 1, 1}, int group = 1);\n\n  Output operator()(const Input &, const Input &) const;\n\n  int out_channel;\n  std::vector<int> kernel_size;\n  int mode = 1;\n  std::string pad_mode = \"valid\";\n  std::vector<int> pad = {0, 0, 0, 0};\n  std::vector<int> stride = {1, 1, 1, 1};\n  std::vector<int> dilation = {1, 1, 1, 1};\n  int group = 1;\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_OPS_OPS_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/serialization.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_SERIALIZATION_H\n#define MINDSPORE_INCLUDE_API_SERIALIZATION_H\n\n#include <string>\n#include <vector>\n#include <map>\n#include <memory>\n#include \"include/api/status.h\"\n#include \"include/api/types.h\"\n#include \"include/api/model.h\"\n#include \"include/api/graph.h\"\n#include \"include/api/dual_abi_helper.h\"\n\nnamespace mindspore {\n/// \\brief The Serialization class is used to summarize methods for reading and writing model files.\nclass MS_API Serialization {\n public:\n  /// \\brief Loads a model file from memory buffer.\n  ///\n  /// \\param[in] model_data A buffer filled by model file.\n  /// \\param[in] data_size The size of the buffer.\n  /// \\param[in] model_type The Type of model file, options are ModelType::kMindIR, ModelType::kOM.\n  /// \\param[out] graph The output parameter, an object saves graph data.\n  /// \\param[in] dec_key The decryption key, key length is 16, 24, or 32.\n  /// \\param[in] dec_mode The decryption mode, optional options are AES-GCM, AES-CBC.\n  ///\n  /// \\return Status.\n  inline static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph,\n                            const Key &dec_key = {}, const std::string &dec_mode = kDecModeAesGcm);\n\n  /// \\brief Loads a model file from path, is not supported on MindSpore Lite.\n  ///\n  /// \\param[in] file The path of model file.\n  /// \\param[in] model_type The Type of model file, options are ModelType::kMindIR, ModelType::kOM.\n  /// \\param[out] graph The output parameter, an object saves graph data.\n  /// \\param[in] dec_key The decryption key, key length is 16, 24, or 32.\n  /// \\param[in] dec_mode The decryption mode, optional options are AES-GCM, AES-CBC.\n  ///\n  /// \\return Status.\n  inline static Status Load(const std::string &file, ModelType model_type, Graph *graph, const Key &dec_key = {},\n                            const std::string &dec_mode = kDecModeAesGcm);\n\n  /// \\brief Load multiple models from multiple files, MindSpore Lite does not provide this feature.\n  ///\n  /// \\param[in] files The path of model files.\n  /// \\param[in] model_type The Type of model file, options are ModelType::kMindIR, ModelType::kOM.\n  /// \\param[out] graph The output parameter, an object saves graph data.\n  /// \\param[in] dec_key The decryption key, key length is 16, 24, or 32.\n  /// \\param[in] dec_mode The decryption mode, optional options are AES-GCM, AES-CBC.\n  ///\n  /// \\return Status.\n  inline static Status Load(const std::vector<std::string> &files, ModelType model_type, std::vector<Graph> *graphs,\n                            const Key &dec_key = {}, const std::string &dec_mode = kDecModeAesGcm);\n  static Status SetParameters(const std::map<std::string, Buffer> &parameters, Model *model);\n  static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data);\n  inline static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file,\n                                   QuantizationType quantization_type = kNoQuant, bool export_inference_only = true,\n                                   std::vector<std::string> output_tensor_name = {});\n\n private:\n  static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph, const Key &dec_key,\n                     const std::vector<char> &dec_mode);\n  static Status Load(const std::vector<char> &file, ModelType model_type, Graph *graph);\n  static Status Load(const std::vector<char> &file, ModelType model_type, Graph *graph, const Key &dec_key,\n                     const std::vector<char> &dec_mode);\n  static Status Load(const std::vector<std::vector<char>> &files, ModelType model_type, std::vector<Graph> *graphs,\n                     const Key &dec_key, const std::vector<char> &dec_mode);\n  static Status ExportModel(const Model &model, ModelType model_type, const std::vector<char> &model_file,\n                            QuantizationType quantization_type, bool export_inference_only,\n                            const std::vector<std::vector<char>> &output_tensor_name);\n};\n\nStatus Serialization::Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph,\n                           const Key &dec_key, const std::string &dec_mode) {\n  return Load(model_data, data_size, model_type, graph, dec_key, StringToChar(dec_mode));\n}\n\nStatus Serialization::Load(const std::string &file, ModelType model_type, Graph *graph, const Key &dec_key,\n                           const std::string &dec_mode) {\n  return Load(StringToChar(file), model_type, graph, dec_key, StringToChar(dec_mode));\n}\n\nStatus Serialization::Load(const std::vector<std::string> &files, ModelType model_type, std::vector<Graph> *graphs,\n                           const Key &dec_key, const std::string &dec_mode) {\n  return Load(VectorStringToChar(files), model_type, graphs, dec_key, StringToChar(dec_mode));\n}\n\nStatus Serialization::ExportModel(const Model &model, ModelType model_type, const std::string &model_file,\n                                  QuantizationType quantization_type, bool export_inference_only,\n                                  std::vector<std::string> output_tensor_name) {\n  return ExportModel(model, model_type, StringToChar(model_file), quantization_type, export_inference_only,\n                     VectorStringToChar(output_tensor_name));\n}\n\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_SERIALIZATION_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/status.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_STATUS_H\n#define MINDSPORE_INCLUDE_API_STATUS_H\n\n#include <memory>\n#include <string>\n#include <vector>\n#include <ostream>\n#include <climits>\n#include \"include/api/dual_abi_helper.h\"\n#include \"include/api/types.h\"\n\nnamespace mindspore {\nenum CompCode : uint32_t {\n  kCore = 0x00000000u,\n  kMD = 0x10000000u,\n  kME = 0x20000000u,\n  kMC = 0x30000000u,\n  kLite = 0xF0000000u,\n};\n\nenum StatusCode : uint32_t {\n  kSuccess = 0,\n  // Core\n  kCoreFailed = kCore | 0x1,\n\n  // MD\n  kMDOutOfMemory = kMD | 1,\n  kMDShapeMisMatch = kMD | 2,\n  kMDInterrupted = kMD | 3,\n  kMDNoSpace = kMD | 4,\n  kMDPyFuncException = kMD | 5,\n  kMDDuplicateKey = kMD | 6,\n  kMDPythonInterpreterFailure = kMD | 7,\n  kMDTDTPushFailure = kMD | 8,\n  kMDFileNotExist = kMD | 9,\n  kMDProfilingError = kMD | 10,\n  kMDBoundingBoxOutOfBounds = kMD | 11,\n  kMDBoundingBoxInvalidShape = kMD | 12,\n  kMDSyntaxError = kMD | 13,\n  kMDTimeOut = kMD | 14,\n  kMDBuddySpaceFull = kMD | 15,\n  kMDNetWorkError = kMD | 16,\n  kMDNotImplementedYet = kMD | 17,\n  // Make this error code the last one. Add new error code above it.\n  kMDUnexpectedError = kMD | 127,\n\n  // ME\n  kMEFailed = kME | 0x1,\n  kMEInvalidInput = kME | 0x2,\n\n  // MC\n  kMCFailed = kMC | 0x1,\n  kMCDeviceError = kMC | 0x2,\n  kMCInvalidInput = kMC | 0x3,\n  kMCInvalidArgs = kMC | 0x4,\n\n  // Lite  // Common error code, range: [-1, -100）\n  kLiteError = kLite | (0x0FFFFFFF & -1),            /**< Common error code. */\n  kLiteNullptr = kLite | (0x0FFFFFFF & -2),          /**< NULL pointer returned.*/\n  kLiteParamInvalid = kLite | (0x0FFFFFFF & -3),     /**< Invalid parameter.*/\n  kLiteNoChange = kLite | (0x0FFFFFFF & -4),         /**< No change. */\n  kLiteSuccessExit = kLite | (0x0FFFFFFF & -5),      /**< No error but exit. */\n  kLiteMemoryFailed = kLite | (0x0FFFFFFF & -6),     /**< Fail to create memory. */\n  kLiteNotSupport = kLite | (0x0FFFFFFF & -7),       /**< Fail to support. */\n  kLiteThreadPoolError = kLite | (0x0FFFFFFF & -8),  /**< Error occur in thread pool. */\n  kLiteUninitializedObj = kLite | (0x0FFFFFFF & -9), /**< Object is not initialized. */\n  kLiteFileError = kLite | (0x0FFFFFFF & -10),       /**< Invalid file. */\n\n  // Executor error code, range: [-100,-200)\n  kLiteOutOfTensorRange = kLite | (0x0FFFFFFF & -100), /**< Failed to check range. */\n  kLiteInputTensorError = kLite | (0x0FFFFFFF & -101), /**< Failed to check input tensor. */\n  kLiteReentrantError = kLite | (0x0FFFFFFF & -102),   /**< Exist executor running. */\n\n  // Graph error code, range: [-200,-300)\n  kLiteGraphFileError = kLite | (0x0FFFFFFF & -200), /**< Failed to verify graph file. */\n\n  // Node error code, range: [-300,-400)\n  kLiteNotFindOp = kLite | (0x0FFFFFFF & -300),        /**< Failed to find operator. */\n  kLiteInvalidOpName = kLite | (0x0FFFFFFF & -301),    /**< Invalid operator name. */\n  kLiteInvalidOpAttr = kLite | (0x0FFFFFFF & -302),    /**< Invalid operator attr. */\n  kLiteOpExecuteFailure = kLite | (0x0FFFFFFF & -303), /**< Failed to execution operator. */\n\n  // Tensor error code, range: [-400,-500)\n  kLiteFormatError = kLite | (0x0FFFFFFF & -400), /**< Failed to checking tensor format. */\n\n  // InferShape error code, range: [-500,-600)\n  kLiteInferError = kLite | (0x0FFFFFFF & -500),   /**< Failed to infer shape. */\n  kLiteInferInvalid = kLite | (0x0FFFFFFF & -501), /**< Invalid infer shape before runtime. */\n\n  // User input param error code, range: [-600, 700)\n  kLiteInputParamInvalid = kLite | (0x0FFFFFFF & -600), /**< Invalid input param by user. */\n};\n\nclass MS_API Status {\n public:\n  Status();\n  inline Status(enum StatusCode status_code, const std::string &status_msg = \"\");  // NOLINT(runtime/explicit)\n  inline Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = \"\");\n\n  ~Status() = default;\n\n  enum StatusCode StatusCode() const;\n  inline std::string ToString() const;\n\n  int GetLineOfCode() const;\n  inline std::string GetErrDescription() const;\n  inline std::string SetErrDescription(const std::string &err_description);\n\n  MS_API friend std::ostream &operator<<(std::ostream &os, const Status &s);\n\n  bool operator==(const Status &other) const;\n  bool operator==(enum StatusCode other_code) const;\n  bool operator!=(const Status &other) const;\n  bool operator!=(enum StatusCode other_code) const;\n\n  explicit operator bool() const;\n  explicit operator int() const;\n\n  static Status OK();\n\n  bool IsOk() const;\n\n  bool IsError() const;\n\n  static inline std::string CodeAsString(enum StatusCode c);\n\n private:\n  // api without std::string\n  Status(enum StatusCode status_code, const std::vector<char> &status_msg);\n  Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::vector<char> &extra);\n  std::vector<char> ToCString() const;\n  std::vector<char> GetErrDescriptionChar() const;\n  std::vector<char> SetErrDescription(const std::vector<char> &err_description);\n  static std::vector<char> CodeAsCString(enum StatusCode c);\n\n  struct Data;\n  std::shared_ptr<Data> data_;\n};\n\nStatus::Status(enum StatusCode status_code, const std::string &status_msg)\n    : Status(status_code, StringToChar(status_msg)) {}\nStatus::Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::string &extra)\n    : Status(code, line_of_code, file_name, StringToChar(extra)) {}\nstd::string Status::ToString() const { return CharToString(ToCString()); }\nstd::string Status::GetErrDescription() const { return CharToString(GetErrDescriptionChar()); }\nstd::string Status::SetErrDescription(const std::string &err_description) {\n  return CharToString(SetErrDescription(StringToChar(err_description)));\n}\nstd::string Status::CodeAsString(enum StatusCode c) { return CharToString(CodeAsCString(c)); }\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_STATUS_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/types.h",
    "content": "/**\n * Copyright 2020-2022 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_TYPES_H\n#define MINDSPORE_INCLUDE_API_TYPES_H\n\n#include <cstddef>\n#include <string>\n#include <vector>\n#include <memory>\n#include <functional>\n#include \"include/api/data_type.h\"\n#include \"include/api/dual_abi_helper.h\"\n#include \"include/api/format.h\"\n#include \"include/api/visible.h\"\n\nnamespace mindspore {\nenum ModelType : uint32_t {\n  kMindIR = 0,\n  kAIR = 1,\n  kOM = 2,\n  kONNX = 3,\n  kMindIR_Lite = 4,\n  // insert new data type here\n  kUnknownType = 0xFFFFFFFF\n};\n\nenum QuantizationType : uint32_t { kNoQuant = 0, kWeightQuant = 1, kFullQuant = 2, kUnknownQuantType = 0xFFFFFFFF };\n\nenum OptimizationLevel : uint32_t {\n  kO0 = 0,    // Do not change\n  kO2 = 2,    // Cast network to float16, keep batchnorm and loss in float32,\n  kO3 = 3,    // Cast network to float16, including bacthnorm\n  kAuto = 4,  // Choose optimization based on device\n  kOptimizationType = 0xFFFFFFFF\n};\n\nstruct QuantParam {\n  int bit_num;\n  double scale;\n  int32_t zero_point;\n  double min;\n  double max;\n};\n\nclass Allocator;\n/// \\brief The MSTensor class defines a tensor in MindSpore.\nclass MS_API MSTensor {\n public:\n  class Impl;\n  /// \\brief Creates a MSTensor object, whose data need to be copied before accessed by Model, must be used in pairs\n  /// with DestroyTensorPtr.\n  ///\n  /// \\param[in] name The name of the MSTensor.\n  /// \\param[in] type The data type of the MSTensor.\n  /// \\param[in] shape The shape of the MSTensor.\n  /// \\param[in] data The data pointer that points to allocated memory.\n  /// \\param[in] data_len The length of the memory, in bytes.\n  ///\n  /// \\return A pointer of MSTensor.\n  static inline MSTensor *CreateTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,\n                                       const void *data, size_t data_len) noexcept;\n\n  /// \\brief Creates a MSTensor object, whose data can be directly accessed by Model, must be used in pairs with\n  /// DestroyTensorPtr.\n  ///\n  /// \\param[in] name The name of the MSTensor.\n  /// \\param[in] type The data type of the MSTensor.\n  /// \\param[in] shape The shape of the MSTensor.\n  /// \\param[in] data The data pointer that points to allocated memory.\n  /// \\param[in] data_len The length of the memory, in bytes.\n  /// \\param[in] own_data Whether the data memory should be freed in MSTensor destruction.\n  ///\n  /// \\return A pointer of MSTensor.\n  static inline MSTensor *CreateRefTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,\n                                          const void *data, size_t data_len, bool own_data = true) noexcept;\n\n  /// \\brief Creates a MSTensor object, whose device data can be directly accessed by Model, must be used in pairs with\n  /// DestroyTensorPtr.\n  ///\n  /// \\param[in] name The name of the MSTensor.\n  /// \\param[in] type The data type of the MSTensor.\n  /// \\param[in] shape The shape of the MSTensor.\n  /// \\param[in] data The data pointer that points to device memory.\n  /// \\param[in] data_len The length of the memory, in bytes.\n  ///\n  /// \\return A pointer of MSTensor.\n  static inline MSTensor CreateDeviceTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,\n                                            void *data, size_t data_len) noexcept;\n\n  /// \\brief Creates a MSTensor object from local file, must be used in pairs with DestroyTensorPtr.\n  ///\n  /// \\param[in] file Path of file to be read.\n  /// \\param[in] type The data type of the MSTensor.\n  /// \\param[in] shape The shape of the MSTensor.\n  ///\n  /// \\return A pointer of MSTensor.\n  static inline MSTensor *CreateTensorFromFile(const std::string &file, DataType type = DataType::kNumberTypeUInt8,\n                                               const std::vector<int64_t> &shape = {}) noexcept;\n\n  /// \\brief Create a string type MSTensor object whose data can be accessed by Model only after being copied, must be\n  /// used in pair with DestroyTensorPtr.\n  ///\n  /// \\param[in] name The name of the MSTensor.\n  /// \\param[in] str A vector container containing several strings.\n  ///\n  /// \\return A pointer of MSTensor.\n  static inline MSTensor *StringsToTensor(const std::string &name, const std::vector<std::string> &str);\n\n  /// \\brief Parse the string type MSTensor object into strings.\n  ///\n  /// \\param[in] tensor A MSTensor object.\n  ///\n  /// \\return A vector container containing several strings.\n  static inline std::vector<std::string> TensorToStrings(const MSTensor &tensor);\n\n  /// \\brief Destroy an object created by Clone, StringsToTensor, CreateRefTensor or CreateTensor. Do\n  /// not use it to destroy MSTensor from other sources.\n  ///\n  /// \\param[in] tensor A MSTensor object.\n  static void DestroyTensorPtr(MSTensor *tensor) noexcept;\n\n  MSTensor();\n  explicit MSTensor(const std::shared_ptr<Impl> &impl);\n  // if malloc data, user need to free after constructing MSTensor, else memory leak.\n  inline MSTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data,\n                  size_t data_len);\n  explicit MSTensor(std::nullptr_t);\n  ~MSTensor();\n\n  /// \\brief Obtains the name of the MSTensor.\n  ///\n  /// \\return The name of the MSTensor.\n  inline std::string Name() const;\n\n  /// \\brief Obtains the data type of the MSTensor.\n  ///\n  /// \\return The data type of the MSTensor.\n  enum DataType DataType() const;\n\n  /// \\brief Obtains the shape of the MSTensor.\n  ///\n  /// \\return The shape of the MSTensor.\n  const std::vector<int64_t> &Shape() const;\n\n  /// \\brief Obtains the number of elements of the MSTensor.\n  ///\n  /// \\return The number of elements of the MSTensor.\n  int64_t ElementNum() const;\n\n  /// \\brief Obtains a shared pointer to the copy of data of the MSTensor. The data can be read on host.\n  ///\n  /// \\return A shared pointer to the copy of data of the MSTensor.\n  std::shared_ptr<const void> Data() const;\n\n  /// \\brief Obtains the pointer to the data of the MSTensor. If the MSTensor is a device tensor, the data cannot be\n  /// accessed directly on host.\n  ///\n  /// \\return A pointer to the data of the MSTensor.\n  void *MutableData();\n\n  /// \\brief Obtains the length of the data of the MSTensor, in bytes.\n  ///\n  /// \\return The length of the data of the MSTensor, in bytes.\n  size_t DataSize() const;\n\n  /// \\brief Get whether the MSTensor data is const data\n  ///\n  /// \\return Const flag of MSTensor\n  bool IsConst() const;\n\n  /// \\brief Gets the boolean value that indicates whether the memory of MSTensor is on device.\n  ///\n  /// \\return The boolean value that indicates whether the memory of MSTensor is on device.\n  bool IsDevice() const;\n\n  /// \\brief Gets a deep copy of the MSTensor, must be used in pair with DestroyTensorPtr.\n  ///\n  /// \\return A pointer points to a deep copy of the MSTensor.\n  MSTensor *Clone() const;\n\n  /// \\brief Gets the boolean value that indicates whether the MSTensor is valid.\n  ///\n  /// \\return The boolean value that indicates whether the MSTensor is valid.\n  bool operator==(std::nullptr_t) const;\n\n  /// \\brief Gets the boolean value that indicates whether the MSTensor is valid.\n  ///\n  /// \\return The boolean value that indicates whether the MSTensor is valid.\n  bool operator!=(std::nullptr_t) const;\n\n  /// \\brief Get the boolean value that indicates whether the MSTensor equals tensor.\n  ///\n  /// \\param[in] another MSTensor.\n  ///\n  /// \\return The boolean value that indicates whether the MSTensor equals tensor.\n  bool operator==(const MSTensor &tensor) const;\n\n  /// \\brief Get the boolean value that indicates whether the MSTensor not equals tensor.\n  ///\n  /// \\param[in] another MSTensor.\n  ///\n  /// \\return The boolean value that indicates whether the MSTensor not equals tensor.\n  bool operator!=(const MSTensor &tensor) const;\n\n  /// \\brief Set the shape of for the MSTensor. Only valid for Lite.\n  ///\n  /// \\param[in] shape Shape of the MSTensor, a vector of int64_t.\n  void SetShape(const std::vector<int64_t> &shape);\n\n  /// \\brief Set the data type for the MSTensor. Only valid for Lite.\n  ///\n  /// \\param[in] data_type The data type of the MSTensor.\n  void SetDataType(enum DataType data_type);\n\n  /// \\brief Set the name for the MSTensor. Only valid for Lite.\n  ///\n  /// \\param[in] name The name of the MSTensor.\n  inline void SetTensorName(const std::string &name);\n\n  /// \\brief Set the Allocator for the MSTensor. Only valid for Lite.\n  ///\n  /// \\param[in] allocator A pointer to Allocator.\n  void SetAllocator(std::shared_ptr<Allocator> allocator);\n\n  /// \\brief Obtain the Allocator of the MSTensor. Only valid for Lite.\n  ///\n  /// \\return A pointer to Allocator.\n  std::shared_ptr<Allocator> allocator() const;\n\n  /// \\brief Set the format for the MSTensor. Only valid for Lite.\n  ///\n  /// \\param[in] format The format of the MSTensor.\n  void SetFormat(mindspore::Format format);\n\n  /// \\brief Obtain the format of the MSTensor. Only valid for Lite.\n  ///\n  /// \\return The format of the MSTensor.\n  mindspore::Format format() const;\n\n  /// \\brief Set the data for the MSTensor. Only valid for Lite.\n  ///\n  /// \\note Deprecated, this interface will be removed in the next iteration\n  ///\n  /// \\note A pointer to the data should be created by malloc interface\n  ///\n  /// \\note The memory pointed to origin data pointer of MSTensor needs to be managed by the user\n  ///\n  /// \\param[in] data A pointer to the data of the MSTensor.\n  /// \\param[in] own_data Whether the data memory should be freed in MSTensor destruction.\n  void SetData(void *data, bool own_data = true);\n\n  /// \\brief Set the device data address for the MSTensor. Only valid for Lite.\n  ///\n  /// \\note The memory pointed to origin data pointer of MSTensor needs to be managed by the user\n  ///\n  /// \\param[in] data A pointer to the device data of the MSTensor.\n  void SetDeviceData(void *data);\n\n  /// \\brief Get the device data address of the MSTensor set by SetDeviceData. Only valid for Lite.\n  ///\n  /// \\return A pointer to the device data of the MSTensor.\n  void *GetDeviceData();\n\n  /// \\brief Get the quantization parameters of the MSTensor. Only valid for Lite.\n  ///\n  /// \\return The quantization parameters of the MSTensor.\n  std::vector<QuantParam> QuantParams() const;\n\n  /// \\brief Set the quantization parameters for the MSTensor. Only valid for Lite.\n  ///\n  /// \\param[in] quant_params The quantization parameters of the MSTensor.\n  void SetQuantParams(std::vector<QuantParam> quant_params);\n\n  const std::shared_ptr<Impl> impl() const { return impl_; }\n\n private:\n  // api without std::string\n  static MSTensor *CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,\n                                const void *data, size_t data_len) noexcept;\n  static MSTensor *CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,\n                                   const void *data, size_t data_len, bool own_data) noexcept;\n  static MSTensor CreateDeviceTensor(const std::vector<char> &name, enum DataType type,\n                                     const std::vector<int64_t> &shape, void *data, size_t data_len) noexcept;\n  static MSTensor *CreateTensorFromFile(const std::vector<char> &file, enum DataType type,\n                                        const std::vector<int64_t> &shape) noexcept;\n  static MSTensor *CharStringsToTensor(const std::vector<char> &name, const std::vector<std::vector<char>> &str);\n  static std::vector<std::vector<char>> TensorToStringChars(const MSTensor &tensor);\n\n  MSTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,\n           size_t data_len);\n  std::vector<char> CharName() const;\n  void SetTensorName(const std::vector<char> &name);\n\n  friend class ModelImpl;\n  std::shared_ptr<Impl> impl_;\n};\n\nclass MS_API Buffer {\n public:\n  Buffer();\n  Buffer(const void *data, size_t data_len);\n  ~Buffer();\n\n  const void *Data() const;\n  void *MutableData();\n  size_t DataSize() const;\n\n  bool ResizeData(size_t data_len);\n  bool SetData(const void *data, size_t data_len);\n\n  Buffer Clone() const;\n\n private:\n  class Impl;\n  std::shared_ptr<Impl> impl_;\n};\n\nMSTensor *MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,\n                                 const void *data, size_t data_len) noexcept {\n  return CreateTensor(StringToChar(name), type, shape, data, data_len);\n}\n\nMSTensor *MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,\n                                    const void *data, size_t data_len, bool own_data) noexcept {\n  return CreateRefTensor(StringToChar(name), type, shape, data, data_len, own_data);\n}\n\nMSTensor MSTensor::CreateDeviceTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,\n                                      void *data, size_t data_len) noexcept {\n  return CreateDeviceTensor(StringToChar(name), type, shape, data, data_len);\n}\n\nMSTensor *MSTensor::CreateTensorFromFile(const std::string &file, enum DataType type,\n                                         const std::vector<int64_t> &shape) noexcept {\n  return CreateTensorFromFile(StringToChar(file), type, shape);\n}\n\nMSTensor *MSTensor::StringsToTensor(const std::string &name, const std::vector<std::string> &str) {\n  return CharStringsToTensor(StringToChar(name), VectorStringToChar(str));\n}\n\nstd::vector<std::string> MSTensor::TensorToStrings(const MSTensor &tensor) {\n  return VectorCharToString(TensorToStringChars(tensor));\n}\n\nMSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,\n                   size_t data_len)\n    : MSTensor(StringToChar(name), type, shape, data, data_len) {}\n\nstd::string MSTensor::Name() const { return CharToString(CharName()); }\n\nvoid MSTensor::SetTensorName(const std::string &name) { SetTensorName(StringToChar(name)); }\n\nusing Key = struct Key {\n  const size_t max_key_len = 32;\n  size_t len = 0;\n  unsigned char key[32] = {0};\n  Key() : len(0) {}\n  explicit Key(const char *dec_key, size_t key_len);\n};\n\nconstexpr char kDecModeAesGcm[] = \"AES-GCM\";\n\n/// \\brief CallBackParam defined input arguments for callBack function.\nstruct MSCallBackParam {\n  std::string node_name; /**< node name argument */\n  std::string node_type; /**< node type argument */\n  double execute_time;   /**< gpu execute time */\n};\n\n/// \\brief KernelCallBack defined the function pointer for callBack.\nusing MSKernelCallBack =\n  std::function<bool(const std::vector<MSTensor> & /* inputs */, const std::vector<MSTensor> & /* outputs */,\n                     const MSCallBackParam &opInfo)>;\n\nstd::vector<char> CharVersion();\ninline std::string Version() { return CharToString(CharVersion()); }\n\n}  // namespace mindspore\n#endif  // MINDSPORE_INCLUDE_API_TYPES_H\n"
  },
  {
    "path": "tests/ut/stub/include/api/visible.h",
    "content": "/**\n * Copyright 2022 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef MINDSPORE_INCLUDE_API_VISIBLE_H\n#define MINDSPORE_INCLUDE_API_VISIBLE_H\n\n#ifndef MS_API\n#ifdef _WIN32\n#define MS_API __declspec(dllexport)\n#else\n#define MS_API __attribute__((visibility(\"default\")))\n#endif  // _WIN32\n#endif\n\n#ifdef _MSC_VER\n#ifdef BUILDING_DATASET_DLL\n#define DATASET_API __declspec(dllexport)\n#else\n#define DATASET_API __declspec(dllimport)\n#endif\n#else\n#define DATASET_API __attribute__((visibility(\"default\")))\n#endif  // _MSC_VER\n\n#endif  // MINDSPORE_INCLUDE_API_VISIBLE_H\n"
  },
  {
    "path": "tests/ut/stub/include/mindapi/base/format.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_CORE_MINDAPI_BASE_FORMAT_H_\n#define MINDSPORE_CORE_MINDAPI_BASE_FORMAT_H_\n\n#include <cstdint>\n\nnamespace mindspore {\nenum Format : int64_t {\n  NCHW = 0,\n  NHWC = 1,\n  NHWC4 = 2,\n  HWKC = 3,\n  HWCK = 4,\n  KCHW = 5,\n  CKHW = 6,\n  KHWC = 7,\n  CHWK = 8,\n  HW = 9,\n  HW4 = 10,\n  NC = 11,\n  NC4 = 12,\n  NC4HW4 = 13,\n  NUM_OF_FORMAT = 14,\n  NCDHW = 15,\n  NWC = 16,\n  NCW = 17,\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_CORE_MINDAPI_BASE_FORMAT_H_\n"
  },
  {
    "path": "tests/ut/stub/include/mindapi/base/type_id.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_CORE_MINDAPI_BASE_TYPE_ID_H_\n#define MINDSPORE_CORE_MINDAPI_BASE_TYPE_ID_H_\n\nnamespace mindspore {\n/// \\brief TypeId defines data type identifiers.\nenum TypeId : int {\n  kTypeUnknown = 0,\n  //\n  // Meta types.\n  //\n  kMetaTypeBegin = kTypeUnknown,\n  kMetaTypeType,  // Type\n  kMetaTypeAnything,\n  kMetaTypeObject,\n  kMetaTypeTypeType,  // TypeType\n  kMetaTypeProblem,\n  kMetaTypeExternal,\n  kMetaTypeNone,\n  kMetaTypeNull,\n  kMetaTypeEllipsis,\n  kMetaTypeEnd,\n  //\n  // Object types\n  //\n  kObjectTypeBegin = kMetaTypeEnd,\n  kObjectTypeNumber,\n  kObjectTypeString,\n  kObjectTypeList,\n  kObjectTypeTuple,\n  kObjectTypeSlice,\n  kObjectTypeKeyword,\n  kObjectTypeTensorType,\n  kObjectTypeRowTensorType,\n  kObjectTypeSparseTensorType,\n  kObjectTypeUndeterminedType,\n  kObjectTypeClass,\n  kObjectTypeDictionary,\n  kObjectTypeFunction,\n  kObjectTypeJTagged,\n  kObjectTypeSymbolicKeyType,\n  kObjectTypeEnvType,\n  kObjectTypeRefKey,\n  kObjectTypeRef,\n  kObjectTypeEnd,\n  //\n  // Number Types\n  //\n  kNumberTypeBegin = kObjectTypeEnd,\n  kNumberTypeBool,\n  kNumberTypeInt,\n  kNumberTypeInt8,\n  kNumberTypeInt16,\n  kNumberTypeInt32,\n  kNumberTypeInt64,\n  kNumberTypeUInt,\n  kNumberTypeUInt8,\n  kNumberTypeUInt16,\n  kNumberTypeUInt32,\n  kNumberTypeUInt64,\n  kNumberTypeFloat,\n  kNumberTypeFloat16,\n  kNumberTypeFloat32,\n  kNumberTypeFloat64,\n  kNumberTypeComplex,\n  kNumberTypeComplex64,\n  kNumberTypeComplex128,\n  kNumberTypeInt4,\n  kNumberTypeGLUInt,\n  kNumberTypeEnd,\n  //\n  // Monad Types\n  //\n  kMonadTypeBegin = kNumberTypeEnd,\n  kObjectTypeMonad,\n  kObjectTypeUMonad,\n  kObjectTypeIOMonad,\n  kMonadTypeEnd,\n  //\n  // Sparse Types\n  //\n  // Sparse types is placed at the end of enum,\n  // in order to keep fit with the type of existing model on the lite side.\n  kSparseTypeBegin = kMonadTypeEnd,\n  kObjectTypeCSRTensorType,\n  kSparseTypeEnd\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_CORE_MINDAPI_BASE_TYPE_ID_H_\n"
  },
  {
    "path": "tests/ut/stub/include/mindapi/base/types.h",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_CORE_MINDAPI_BASE_TYPES_H_\n#define MINDSPORE_CORE_MINDAPI_BASE_TYPES_H_\n\n#include <cstdint>\n\nnamespace mindspore {\nenum CoordinateTransformMode : int64_t {\n  ASYMMETRIC = 0,\n  ALIGN_CORNERS = 1,\n  HALF_PIXEL = 2,\n  CROP_AND_RESIZE = 3,\n};\n\nenum class ResizeMethod : int64_t {\n  UNKNOWN = -1,\n  LINEAR = 0,\n  NEAREST = 1,\n  CUBIC = 2,\n};\n\nenum class NearestMode : int64_t {\n  NORMAL = 0,\n  ROUND_HALF_DOWN = 1,\n  ROUND_HALF_UP = 2,\n  FLOOR = 3,\n  CEIL = 4,\n};\n\nenum RoundMode : int64_t {\n  FLOOR = 0,\n  CEIL = 1,\n};\n\nenum ActivationType : int64_t {\n  NO_ACTIVATION = 0,\n  RELU = 1,\n  SIGMOID = 2,\n  RELU6 = 3,\n  ELU = 4,\n  LEAKY_RELU = 5,\n  ABS = 6,\n  RELU1 = 7,\n  SOFTSIGN = 8,\n  SOFTPLUS = 9,\n  TANH = 10,\n  SELU = 11,\n  HSWISH = 12,\n  HSIGMOID = 13,\n  THRESHOLDRELU = 14,\n  LINEAR = 15,\n  HARD_TANH = 16,\n  SIGN = 17,\n  SWISH = 18,\n  GELU = 19,\n  GLU = 20,\n  UNKNOWN = 21,\n};\n\nenum ReduceMode : int64_t {\n  Reduce_Mean = 0,\n  Reduce_Max = 1,\n  Reduce_Min = 2,\n  Reduce_Prod = 3,\n  Reduce_Sum = 4,\n  Reduce_Sum_Square = 5,\n  Reduce_ASum = 6,\n  Reduce_All = 7,\n};\n\nenum EltwiseMode : int64_t {\n  PROD = 0,\n  SUM = 1,\n  MAXIMUM = 2,\n  ELTWISEMODE_UNKNOW = 3,\n};\n\nenum Reduction : int64_t {\n  REDUCTION_SUM = 0,\n  MEAN = 1,\n  NONE = 2,\n};\n\nenum PadMode : int64_t {\n  PAD = 0,\n  SAME = 1,\n  VALID = 2,\n};\n\nenum class LshProjectionType : int64_t {\n  UNKNOWN = 0,\n  SPARSE = 1,\n  DENSE = 2,\n};\n\nenum PaddingMode : int64_t {\n  CONSTANT = 0,\n  REFLECT = 1,\n  SYMMETRIC = 2,\n  MODE_RESERVED = 3,\n};\n}  // namespace mindspore\n#endif  // MINDSPORE_CORE_MINDAPI_BASE_TYPES_H_\n"
  },
  {
    "path": "tests/ut/stub/include/utils/log_adapter.cc",
    "content": "/**\n * Copyright 2019 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"utils/log_adapter.h\"\n\n#define google mindspore_serving_private\n\n#ifndef _MSC_VER\n#include <unistd.h>\n#include <sys/time.h>\n#endif\n#include <map>\n#include <iomanip>\n#include <thread>\n\n// namespace to support utils module definition\nnamespace mindspore {\n// set default log level to WARNING for all sub modules\nint g_ms_submodule_log_levels[NUM_SUBMODUES] = {INFO};\n\nstatic std::string GetProcName() {\n#if defined(__APPLE__) || defined(__FreeBSD__)\n  const std::string appname = getprogname();\n#elif defined(_GNU_SOURCE)\n  const std::string appname = program_invocation_name;\n#else\n  const std::string appname = \"?\";\n#endif\n  // sometimes, the app name is an absolute path, it is too long\n  std::string app_name(appname);\n  std::size_t pos = app_name.rfind(\"/\");\n  if (pos == std::string::npos) {\n    return app_name;\n  }\n  if (pos + 1 >= app_name.size()) {\n    return app_name;\n  }\n  return app_name.substr(pos + 1);\n}\n\nstatic std::string GetLogLevel(MsLogLevel level) {\n#define _TO_STRING(x) #x\n  static const char *const level_names[] = {\n    _TO_STRING(DEBUG),\n    _TO_STRING(INFO),\n    _TO_STRING(WARNING),\n    _TO_STRING(ERROR),\n  };\n#undef _TO_STRING\n  if (level > ERROR) {\n    level = ERROR;\n  }\n  return std::string(level_names[level]);\n}\n\n// convert MsLogLevel to corresponding glog level\nstatic int GetGlogLevel(MsLogLevel level) {\n  switch (level) {\n    case DEBUG:\n    case INFO:\n      return google::GLOG_INFO;\n    case WARNING:\n      return google::GLOG_WARNING;\n    case ERROR:\n    default:\n      return google::GLOG_ERROR;\n  }\n}\n\n// get threshold level\nstatic int GetThresholdLevel(const std::string &threshold) {\n  if (threshold.empty()) {\n    return google::GLOG_WARNING;\n  } else if (threshold == std::to_string(DEBUG) || threshold == std::to_string(INFO)) {\n    return google::GLOG_INFO;\n  } else if (threshold == std::to_string(WARNING)) {\n    return google::GLOG_WARNING;\n  } else if (threshold == std::to_string(ERROR)) {\n    return google::GLOG_ERROR;\n  } else {\n    return google::GLOG_WARNING;\n  }\n}\n\nvoid LogWriter::OutputLog(const std::ostringstream &msg) const {\n  auto submodule_name = GetSubModuleName(submodule_);\n  google::LogMessage(\"\", 0, GetGlogLevel(log_level_)).stream()\n#ifdef _MSC_VER\n    << \"[\" << GetLogLevel(log_level_) << \"] \" << submodule_name << \"(\"\n    << \",\" << std::hex\n#else\n    << \"[\" << GetLogLevel(log_level_) << \"] \" << submodule_name << \"(\" << getpid() << \",\" << std::hex\n#endif\n    << std::this_thread::get_id() << std::dec << \",\" << GetProcName() << \"):\" << GetTimeString() << \" \"\n    << \"[\" << location_.file_ << \":\" << location_.line_ << \"] \" << location_.func_ << \"] \" << msg.str() << std::endl;\n}\n\nvoid LogWriter::operator<(const LogStream &stream) const noexcept {\n  std::ostringstream msg;\n  msg << stream.sstream_->rdbuf();\n  OutputLog(msg);\n}\n\nvoid LogWriter::operator^(const LogStream &stream) const {\n  std::ostringstream msg;\n  msg << stream.sstream_->rdbuf();\n  std::ostringstream oss;\n  oss << location_.file_ << \":\" << location_.line_ << \" \" << location_.func_ << \"] \";\n  oss << msg.str();\n\n  thread_local bool running = false;\n  if (!running) {\n    running = true;\n    OutputLog(msg);\n    if (trace_provider_ != nullptr) {\n      trace_provider_(oss);\n    }\n    running = false;\n  }\n\n  if (exception_handler_ != nullptr) {\n    exception_handler_(exception_type_, oss.str());\n  }\n  throw std::runtime_error(oss.str());\n}\n\nstatic std::string GetEnv(const std::string &envvar) {\n  const char *value = ::getenv(envvar.c_str());\n\n  if (value == nullptr) {\n    return std::string();\n  }\n\n  return std::string(value);\n}\n\nenum class LogConfigToken : size_t {\n  INVALID,      // indicate invalid token\n  LEFT_BRACE,   // '{'\n  RIGHT_BRACE,  // '}'\n  VARIABLE,     // '[A-Za-z][A-Za-z0-9_]*'\n  NUMBER,       // [0-9]+\n  COMMA,        // ','\n  COLON,        // ':'\n  EOS,          // End Of String, '\\0'\n  NUM_LOG_CFG_TOKENS\n};\n\nstatic const char *g_tok_names[static_cast<size_t>(LogConfigToken::NUM_LOG_CFG_TOKENS)] = {\n  \"invalid\",        // indicate invalid token\n  \"{\",              // '{'\n  \"}\",              // '}'\n  \"variable\",       // '[A-Za-z][A-Za-z0-9_]*'\n  \"number\",         // [0-9]+\n  \",\",              // ','\n  \":\",              // ':'\n  \"end-of-string\",  // End Of String, '\\0'\n};\n\nstatic inline bool IsAlpha(char ch) { return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z'); }\n\nstatic inline bool IsDigit(char ch) { return ch >= '0' && ch <= '9'; }\n\nclass LogConfigLexer {\n public:\n  explicit LogConfigLexer(const std::string &text) : buffer_(text), cur_idx_(0) {}\n  ~LogConfigLexer() = default;\n\n  // skip white space, and return the first char after white space\n  char SkipWhiteSpace() {\n    while (cur_idx_ < buffer_.size()) {\n      char ch = buffer_[cur_idx_];\n      if (ch == ' ' || ch == '\\t') {\n        ++cur_idx_;\n        continue;\n      }\n      return ch;\n    }\n    return '\\0';\n  }\n\n  LogConfigToken GetNext(std::string *const ptr) {\n#ifdef DEBUG\n    std::string text;\n    auto tok = GetNextInner(&text);\n    MS_LOG(DEBUG) << \"Got token \" << tok << \" with value [\" << text << \"]\";\n    if (ptr != nullptr) {\n      *ptr = text;\n    }\n    return tok;\n  }\n\n  LogConfigToken GetNextInner(std::string *ptr) {\n#endif\n    char ch = SkipWhiteSpace();\n    // clang-format off\n    static const std::map<char, LogConfigToken> single_char_map = {\n      {'{', LogConfigToken::LEFT_BRACE},\n      {'}', LogConfigToken::RIGHT_BRACE},\n      {',', LogConfigToken::COMMA},\n      {':', LogConfigToken::COLON},\n      {'\\0', LogConfigToken::EOS},\n    };\n    // clang-format on\n\n    auto iter = single_char_map.find(ch);\n    if (iter != single_char_map.end()) {\n      if (ptr != nullptr) {\n        *ptr = std::string() + ch;\n      }\n      ++cur_idx_;\n      return iter->second;\n    } else if (IsAlpha(ch)) {\n      std::ostringstream oss;\n      do {\n        oss << ch;\n        ch = buffer_[++cur_idx_];\n      } while (cur_idx_ < buffer_.size() && (IsAlpha(ch) || IsDigit(ch) || ch == '_'));\n      if (ptr != nullptr) {\n        *ptr = std::string(oss.str());\n      }\n      return LogConfigToken::VARIABLE;\n    } else if (IsDigit(ch)) {\n      std::ostringstream oss;\n      do {\n        oss << ch;\n        ch = buffer_[++cur_idx_];\n      } while (cur_idx_ < buffer_.size() && IsDigit(ch));\n      if (ptr != nullptr) {\n        *ptr = std::string(oss.str());\n      }\n      return LogConfigToken::NUMBER;\n    }\n    return LogConfigToken::INVALID;\n  }\n\n private:\n  std::string buffer_;\n  size_t cur_idx_;\n};\n\nclass LogConfigParser {\n public:\n  explicit LogConfigParser(const std::string &cfg) : lexer(cfg) {}\n  ~LogConfigParser() = default;\n\n  bool Expect(LogConfigToken expected, LogConfigToken tok) const {\n    if (expected != tok) {\n      MS_LOG(WARNING) << \"Parse submodule log configuration text error, expect `\"\n                      << g_tok_names[static_cast<size_t>(expected)] << \"`, but got `\"\n                      << g_tok_names[static_cast<size_t>(tok)] << \"`. The whole configuration will be ignored.\";\n      return false;\n    }\n    return true;\n  }\n\n  // The text of config MS_SUBMODULE_LOG_v is in the form {submodule1:log_level1,submodule2:log_level2,...}.\n  // Valid values of log levels are: 0 - debug, 1 - info, 2 - warning, 3 - error\n  // e.g. MS_SUBMODULE_LOG_v={PARSER:0, ANALYZER:2, PIPELINE:1}\n  std::map<std::string, std::string> Parse() {\n    std::map<std::string, std::string> log_levels;\n\n    bool flag_error = false;\n    std::string text;\n    auto tok = lexer.GetNext(&text);\n    // empty string\n    if (tok == LogConfigToken::EOS) {\n      return log_levels;\n    }\n\n    if (!Expect(LogConfigToken::LEFT_BRACE, tok)) {\n      return log_levels;\n    }\n\n    do {\n      std::string key, val;\n      tok = lexer.GetNext(&key);\n      if (!Expect(LogConfigToken::VARIABLE, tok)) {\n        flag_error = true;\n        break;\n      }\n\n      tok = lexer.GetNext(&text);\n      if (!Expect(LogConfigToken::COLON, tok)) {\n        flag_error = true;\n        break;\n      }\n\n      tok = lexer.GetNext(&val);\n      if (!Expect(LogConfigToken::NUMBER, tok)) {\n        flag_error = true;\n        break;\n      }\n\n      log_levels[key] = val;\n      tok = lexer.GetNext(&text);\n    } while (tok == LogConfigToken::COMMA);\n\n    if (!flag_error && !Expect(LogConfigToken::RIGHT_BRACE, tok)) {\n      flag_error = true;\n    }\n\n    if (flag_error) {\n      log_levels.clear();\n    }\n    return log_levels;\n  }\n\n private:\n  LogConfigLexer lexer;\n};\n\nbool ParseLogLevel(const std::string &str_level, MsLogLevel *ptr_level) {\n  constexpr char number_start = '0';\n  if (str_level.size() == 1) {\n    int ch = str_level.c_str()[0];\n    ch = ch - number_start;  // subtract ASCII code of '0', which is 48\n    if (ch >= DEBUG && ch <= ERROR) {\n      if (ptr_level != nullptr) {\n        *ptr_level = static_cast<MsLogLevel>(ch);\n      }\n      return true;\n    }\n  }\n  return false;\n}\n\nstatic MsLogLevel GetGlobalLogLevel() {\n  return static_cast<MsLogLevel>(FLAGS_v);\n}\n\nvoid InitSubModulesLogLevel() {\n  // initialize submodule's log level using global\n  auto global_log_level = GetGlobalLogLevel();\n  for (int i = 0; i < NUM_SUBMODUES; ++i) {\n    g_ms_submodule_log_levels[i] = global_log_level;\n  }\n\n  // set submodule's log level\n  auto submodule = GetEnv(\"MS_SUBMODULE_LOG_v\");\n  MS_LOG(DEBUG) << \"MS_SUBMODULE_LOG_v=`\" << submodule << \"`\";\n  LogConfigParser parser(submodule);\n  auto configs = parser.Parse();\n  for (const auto &cfg : configs) {\n    int mod_idx = -1;\n    for (int i = 0; i < NUM_SUBMODUES; ++i) {\n      if (cfg.first == GetSubModuleName(static_cast<SubModuleId>(i))) {\n        mod_idx = i;\n        break;\n      }\n    }\n    if (mod_idx < 0) {\n      MS_LOG(WARNING) << \"Undefined module name \" << cfg.first << \", ignore it\";\n      continue;\n    }\n    MsLogLevel submodule_log_level;\n    if (!ParseLogLevel(cfg.second, &submodule_log_level)) {\n      MS_LOG(WARNING) << \"Illegal log level value \" << cfg.second << \" for \" << cfg.first << \", ignore it.\";\n      continue;\n    }\n    g_ms_submodule_log_levels[mod_idx] = submodule_log_level;\n  }\n}\n}  // namespace mindspore\n\nextern \"C\" {\n#if defined(_WIN32) || defined(_WIN64)\n#ifdef _MSC_VER\nvoid common_log_init(void) {\n#else\n__attribute__((constructor)) void common_log_init(void) {\n#endif\n#else\nvoid common_log_init(void) {\n#endif\n  // Do not use glog predefined log prefix\n  FLAGS_log_prefix = false;\n  // Write log to files real-time\n  FLAGS_logbufsecs = 0;\n  // Set default log level to WARNING\n  if (mindspore::GetEnv(\"GLOG_v\").empty()) {\n    FLAGS_v = mindspore::WARNING;\n  }\n\n  // Set default log file mode to 0640\n  if (mindspore::GetEnv(\"GLOG_logfile_mode\").empty()) {\n    FLAGS_logfile_mode = 0640;\n  }\n  // Set default log file max size to 50 MB\n  FLAGS_max_log_size = 50;\n  std::string max_log_size = mindspore::GetEnv(\"GLOG_max_log_size\");\n  if (!max_log_size.empty()) {\n    FLAGS_max_log_size = std::stoi(max_log_size);\n  }\n  std::string logtostderr = mindspore::GetEnv(\"GLOG_logtostderr\");\n  // Default print log to screen\n  if (logtostderr.empty()) {\n    FLAGS_logtostderr = true;\n  } else if (logtostderr == \"0\") {\n    if (mindspore::GetEnv(\"GLOG_log_dir\").empty()) {\n      MS_LOG(ERROR) << \"`GLOG_log_dir` is empty, it must be set while 'logtostderr' equals to 0.\";\n      // Here can not throw exception and use python to catch, because the PYBIND11_MODULE is not yet been initialed.\n      exit(EXIT_FAILURE);\n    } else {\n      // Set log dir from GLOG_log_dir with RANK_ID or OMPI_COMM_WORLD_RANK.\n      std::string rank_id = mindspore::GetEnv(\"RANK_ID\");\n      std::string gpu_rank_id = mindspore::GetEnv(\"OMPI_COMM_WORLD_RANK\");\n      std::string rank = \"0\";\n      if ((!rank_id.empty() && gpu_rank_id.empty()) || (!rank_id.empty() && !gpu_rank_id.empty())) {\n        rank = rank_id;\n      } else if (rank_id.empty() && !gpu_rank_id.empty()) {\n        rank = gpu_rank_id;\n      }\n      FLAGS_log_dir = mindspore::GetEnv(\"GLOG_log_dir\") + \"/rank_\" + rank + \"/logs\";\n    }\n  }\n\n  // Default GLOG_stderrthreshold level to WARNING\n  auto threshold = mindspore::GetEnv(\"GLOG_stderrthreshold\");\n  FLAGS_stderrthreshold = mindspore::GetThresholdLevel(threshold);\n  mindspore::InitSubModulesLogLevel();\n}\n}\n#undef google\n"
  },
  {
    "path": "tests/ut/stub/include/utils/log_adapter.h",
    "content": "/**\n * Copyright 2019-2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_CORE_UTILS_LOG_ADAPTER_H_\n#define MINDSPORE_CORE_UTILS_LOG_ADAPTER_H_\n\n#include <stdarg.h>\n#include <stdint.h>\n#include <string>\n#include <sstream>\n#include <memory>\n#include <map>\n#include <thread>\n#include <functional>\n#include \"utils/visible.h\"\n#include \"utils/overload.h\"\n#include \"./securec.h\"\n\n#define google mindspore_serving_private\n#include \"glog/logging.h\"\n#undef google\n\n// NOTICE: when relative path of 'log_adapter.h' changed, macro 'LOG_HDR_FILE_REL_PATH' must be changed\n#define LOG_HDR_FILE_REL_PATH \"mindspore/core/utils/log_adapter.h\"\n\n// Get start index of file relative path in __FILE__\nstatic constexpr size_t GetRelPathPos() noexcept {\n  return sizeof(__FILE__) > sizeof(LOG_HDR_FILE_REL_PATH) ? sizeof(__FILE__) - sizeof(LOG_HDR_FILE_REL_PATH) : 0;\n}\n\nnamespace mindspore {\nMS_CORE_API extern std::map<void **, std::thread *> acl_handle_map;\n#define FILE_NAME                                                                             \\\n  (sizeof(__FILE__) > GetRelPathPos() ? static_cast<const char *>(__FILE__) + GetRelPathPos() \\\n                                      : static_cast<const char *>(__FILE__))\nenum ExceptionType {\n  NoExceptionType = 0,\n  UnknownError,\n  ArgumentError,\n  NotSupportError,\n  NotExistsError,\n  AlreadyExistsError,\n  UnavailableError,\n  DeviceProcessError,\n  AbortedError,\n  TimeOutError,\n  ResourceUnavailable,\n  NoPermissionError,\n  IndexError,\n  ValueError,\n  TypeError,\n  KeyError,\n  AttributeError,\n  NameError\n};\n\nstruct LocationInfo {\n  LocationInfo(const char *file, int line, const char *func) : file_(file), line_(line), func_(func) {}\n  ~LocationInfo() = default;\n\n  const char *file_;\n  int line_;\n  const char *func_;\n};\n\nclass LogStream {\n public:\n  LogStream() { sstream_ = std::make_shared<std::stringstream>(); }\n  ~LogStream() = default;\n\n  template <typename T>\n  LogStream &operator<<(const T &val) noexcept {\n    (*sstream_) << val;\n    return *this;\n  }\n\n  LogStream &operator<<(std::ostream &func(std::ostream &os)) noexcept {\n    (*sstream_) << func;\n    return *this;\n  }\n\n  friend class LogWriter;\n\n private:\n  std::shared_ptr<std::stringstream> sstream_;\n};\n\ntemplate <class T, typename std::enable_if<std::is_enum<T>::value, int>::type = 0>\nconstexpr std::ostream &operator<<(std::ostream &stream, const T &value) {\n  return stream << static_cast<typename std::underlying_type<T>::type>(value);\n}\n\nenum MsLogLevel : int { DEBUG = 0, INFO, WARNING, ERROR, EXCEPTION };\n\nenum SubModuleId : int {\n  SM_UNKNOWN = 0,        // unknown submodule\n  SM_CORE,               // core\n  SM_ANALYZER,           // static analyzer\n  SM_COMMON,             // common\n  SM_DEBUG,              // debug\n  SM_OFFLINE_DEBUG,      // offline debug\n  SM_DEVICE,             // device\n  SM_GE_ADPT,            // ge adapter\n  SM_IR,                 // IR\n  SM_KERNEL,             // kernel\n  SM_MD,                 // MindData\n  SM_ME,                 // MindExpression\n  SM_EXPRESS,            // EXPRESS_IR\n  SM_OPTIMIZER,          // optimzer\n  SM_PARALLEL,           // parallel\n  SM_PARSER,             // parser\n  SM_PIPELINE,           // ME pipeline\n  SM_PRE_ACT,            // pre-activate\n  SM_PYNATIVE,           // PyNative\n  SM_SESSION,            // session\n  SM_UTILS,              // utils\n  SM_VM,                 // VM\n  SM_PROFILER,           // profiler\n  SM_PS,                 // Parameter Server\n  SM_FL,                 // Federated Learning\n  SM_LITE,               // LITE\n  SM_ARMOUR,             // ARMOUR\n  SM_HCCL_ADPT,          // Hccl Adapter\n  SM_MINDQUANTUM,        // MindQuantum\n  SM_RUNTIME_FRAMEWORK,  // Runtime framework\n  SM_GE,                 // GraphEngine\n  NUM_SUBMODUES          // number of submodules\n};\n\n#ifndef SUBMODULE_ID\n#define SUBMODULE_ID mindspore::SubModuleId::SM_ME\n#endif\n\nMS_EXPORT const std::string GetSubModuleName(SubModuleId module_id);\n\nconst char *EnumStrForMsLogLevel(MsLogLevel level);\n\nMS_EXPORT std::string GetTimeString();\n\nMS_EXPORT extern int g_ms_submodule_log_levels[];\n\nclass LogWriter {\n public:\n  using ExceptionHandler = std::function<void(ExceptionType, const std::string &msg)>;\n  using TraceProvider = std::function<void(std::ostringstream &oss)>;\n\n  LogWriter(const LocationInfo &location, MsLogLevel log_level, SubModuleId submodule,\n            ExceptionType excp_type = NoExceptionType)\n      : location_(location), log_level_(log_level), submodule_(submodule), exception_type_(excp_type) {}\n  ~LogWriter() = default;\n\n  MS_CORE_API void operator<(const LogStream &stream) const noexcept;\n  MS_CORE_API void operator^(const LogStream &stream) const __attribute__((noreturn));\n\n  static void set_exception_handler(ExceptionHandler exception_handler) { exception_handler_ = exception_handler; }\n  static void set_trace_provider(TraceProvider trace_provider) { trace_provider_ = trace_provider; }\n  static TraceProvider trace_provider() { return trace_provider_; }\n\n private:\n  void OutputLog(const std::ostringstream &msg) const;\n\n  LocationInfo location_;\n  MsLogLevel log_level_;\n  SubModuleId submodule_;\n  ExceptionType exception_type_;\n\n  inline static ExceptionHandler exception_handler_ = nullptr;\n  inline static TraceProvider trace_provider_ = nullptr;\n};\n\n#define MSLOG_IF(level, condition, excp_type)                                                                       \\\n  static_cast<void>(0), !(condition)                                                                                \\\n                          ? void(0)                                                                                 \\\n                          : mindspore::LogWriter(mindspore::LocationInfo(FILE_NAME, __LINE__, __FUNCTION__), level, \\\n                                                 SUBMODULE_ID, excp_type) < mindspore::LogStream()\n#define MSLOG_THROW(excp_type)                                                                                         \\\n  mindspore::LogWriter(mindspore::LocationInfo(FILE_NAME, __LINE__, __FUNCTION__), mindspore::EXCEPTION, SUBMODULE_ID, \\\n                       excp_type) ^                                                                                    \\\n    mindspore::LogStream()\n\n#define IS_OUTPUT_ON(level) ((level) >= mindspore::g_ms_submodule_log_levels[SUBMODULE_ID])\n\n#define MS_LOG(level) MS_LOG_##level\n\n#define MS_LOG_DEBUG MSLOG_IF(mindspore::DEBUG, IS_OUTPUT_ON(mindspore::DEBUG), mindspore::NoExceptionType)\n#define MS_LOG_INFO MSLOG_IF(mindspore::INFO, IS_OUTPUT_ON(mindspore::INFO), mindspore::NoExceptionType)\n#define MS_LOG_WARNING MSLOG_IF(mindspore::WARNING, IS_OUTPUT_ON(mindspore::WARNING), mindspore::NoExceptionType)\n#define MS_LOG_ERROR MSLOG_IF(mindspore::ERROR, IS_OUTPUT_ON(mindspore::ERROR), mindspore::NoExceptionType)\n\n#define MS_LOG_EXCEPTION MSLOG_THROW(mindspore::NoExceptionType)\n#define MS_EXCEPTION(type) MSLOG_THROW(type)\n}  // namespace mindspore\n\n#define MS_EXCEPTION_IF_NULL(ptr)                                    \\\n  do {                                                               \\\n    if ((ptr) == nullptr) {                                          \\\n      MS_LOG(EXCEPTION) << \": The pointer[\" << #ptr << \"] is null.\"; \\\n    }                                                                \\\n  } while (0)\n\n#define MS_EXCEPTION_IF_ZERO(name, value)                   \\\n  do {                                                      \\\n    if (value == 0) {                                       \\\n      MS_LOG(EXCEPTION) << \": The \" << name << \" is zero.\"; \\\n    }                                                       \\\n  } while (0)\n\n#define MS_ERROR_IF_NULL(ptr)                                    \\\n  do {                                                           \\\n    if ((ptr) == nullptr) {                                      \\\n      MS_LOG(ERROR) << \": The pointer[\" << #ptr << \"] is null.\"; \\\n      return false;                                              \\\n    }                                                            \\\n  } while (0)\n\n#define MS_ERROR_IF_NULL_W_RET_VAL(ptr, val)                     \\\n  do {                                                           \\\n    if ((ptr) == nullptr) {                                      \\\n      MS_LOG(ERROR) << \": The pointer[\" << #ptr << \"] is null.\"; \\\n      return val;                                                \\\n    }                                                            \\\n  } while (0)\n\n#define MS_ERROR_IF_NULL_WO_RET_VAL(ptr)                         \\\n  do {                                                           \\\n    if ((ptr) == nullptr) {                                      \\\n      MS_LOG(ERROR) << \": The pointer[\" << #ptr << \"] is null.\"; \\\n      return;                                                    \\\n    }                                                            \\\n  } while (0)\n\n#ifdef DEBUG\n#include <cassert>\n#define MS_ASSERT(f) assert(f)\n#else\n#define MS_ASSERT(f) ((void)0)\n#endif\n\n#endif  // MINDSPORE_CORE_UTILS_LOG_ADAPTER_H_\n"
  },
  {
    "path": "tests/ut/stub/include/utils/log_adapter_common.cc",
    "content": "/**\n * Copyright 2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef _MSC_VER\n#include <sys/time.h>\n#endif\n#include <string>\n#include <iomanip>\n#include <sstream>\n#include \"utils/log_adapter.h\"\n\nnamespace mindspore {\nstatic const std::vector<std::string> sub_module_names = {\n  \"UNKNOWN\",            // SM_UNKNOWN\n  \"CORE\",               // SM_CORE\n  \"ANALYZER\",           // SM_ANALYZER\n  \"COMMON\",             // SM_COMMON\n  \"DEBUG\",              // SM_DEBUG\n  \"OFFLINE_DEBUG\",      // SM_OFFLINE_DEBUG\n  \"DEVICE\",             // SM_DEVICE\n  \"GE_ADPT\",            // SM_GE_ADPT\n  \"IR\",                 // SM_IR\n  \"KERNEL\",             // SM_KERNEL\n  \"MD\",                 // SM_MD\n  \"ME\",                 // SM_ME\n  \"EXPRESS\",            // SM_EXPRESS\n  \"OPTIMIZER\",          // SM_OPTIMIZER\n  \"PARALLEL\",           // SM_PARALLEL\n  \"PARSER\",             // SM_PARSER\n  \"PIPELINE\",           // SM_PIPELINE\n  \"PRE_ACT\",            // SM_PRE_ACT\n  \"PYNATIVE\",           // SM_PYNATIVE\n  \"SESSION\",            // SM_SESSION\n  \"UTILS\",              // SM_UTILS\n  \"VM\",                 // SM_VM\n  \"PROFILER\",           // SM_PROFILER\n  \"PS\",                 // SM_PS\n  \"FL\",                 // SM_FL\n  \"LITE\",               // SM_LITE\n  \"ARMOUR\",             // SM_ARMOUR\n  \"HCCL_ADPT\",          // SM_HCCL_ADPT\n  \"MINDQUANTUM\",        // SM_MINDQUANTUM\n  \"RUNTIME_FRAMEWORK\",  // SM_RUNTIME_FRAMEWORK\n  \"GE\",                 // SM_GE\n};\n\nconst std::string GetSubModuleName(SubModuleId module_id) {\n  return sub_module_names[static_cast<size_t>(module_id % NUM_SUBMODUES)];\n}\n\nstd::string GetTimeString() {\n#if defined(_WIN32) || defined(_WIN64)\n  time_t time_seconds = time(0);\n  struct tm now_time;\n  localtime_s(&now_time, &time_seconds);\n  constexpr int base_year = 1900;\n  std::stringstream ss;\n  ss << now_time.tm_year + base_year << \"-\" << now_time.tm_mon + 1 << \"-\" << now_time.tm_mday << \" \" << now_time.tm_hour\n     << \":\" << now_time.tm_min << \":\" << now_time.tm_sec;\n  return ss.str();\n#else\n  constexpr auto BUFLEN = 80;\n  char buf[BUFLEN] = {'\\0'};\n  struct timeval cur_time;\n  (void)gettimeofday(&cur_time, nullptr);\n\n  struct tm now;\n  constexpr int width = 3;\n  constexpr int64_t time_convert_unit = 1000;\n  (void)localtime_r(&cur_time.tv_sec, &now);\n  (void)strftime(buf, BUFLEN, \"%Y-%m-%d-%H:%M:%S\", &now);  // format date and time\n  std::stringstream ss;\n  ss << \".\" << std::setfill('0') << std::setw(width) << cur_time.tv_usec / time_convert_unit << \".\" << std::setfill('0')\n     << std::setw(width) << cur_time.tv_usec % time_convert_unit;\n  return std::string(buf) + ss.str();\n#endif\n}\n}  // namespace mindspore\n"
  },
  {
    "path": "tests/ut/stub/include/utils/overload.h",
    "content": "/**\n * Copyright 2019 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_CORE_UTILS_OVERLOAD_H_\n#define MINDSPORE_CORE_UTILS_OVERLOAD_H_\n\n#include <list>\n#include <utility>\n#include <vector>\n#include <iostream>\n#include <initializer_list>\n#include <unordered_map>\n#include <map>\n#include <memory>\n#include <string>\n\nnamespace mindspore {\ntemplate <typename T>\nstd::ostream &operator<<(std::ostream &out, const std::vector<T> &v) {\n  out << \"[const vector][\";\n  size_t last = v.size() - 1;\n  for (size_t i = 0; i < v.size(); ++i) {\n    out << v[i];\n    if (i != last) out << \", \";\n  }\n  out << \"]\";\n  return out;\n}\n\ntemplate <typename T>\nstd::ostream &operator<<(std::ostream &os, const std::list<T> &vec) {\n  bool begin = true;\n  os << \"[const list][\";\n  for (auto &item : vec) {\n    if (!begin) {\n      os << \", \";\n    } else {\n      begin = false;\n    }\n    os << item;\n  }\n  os << \"]\";\n\n  return os;\n}\n\ntemplate <typename T>\nstd::ostream &operator<<(std::ostream &os, const std::initializer_list<T> &vec) {\n  bool begin = true;\n  os << \"[\";\n  for (auto &item : vec) {\n    if (!begin) {\n      os << \", \";\n    } else {\n      begin = false;\n    }\n    os << item;\n  }\n  os << \"]\";\n\n  return os;\n}\n\ntemplate <typename T>\nbool operator==(const std::initializer_list<T> &lhs, const std::initializer_list<T> &rhs) {\n  if (lhs.size() != rhs.size()) {\n    return false;\n  }\n  auto lit = lhs.begin();\n  auto rit = rhs.begin();\n  while (lit != lhs.end()) {\n    if (!(*lit == *rit)) {\n      return false;\n    }\n    lit++;\n    rit++;\n  }\n  return true;\n}\n\ntemplate <typename T1, typename T2>\nstd::ostream &operator<<(std::ostream &os, const std::pair<T1, T2> &pair) {\n  os << \"[const pair]\";\n\n  return os;\n}\n\ntemplate <typename T1, typename T2, typename T3>\nstd::ostream &operator<<(std::ostream &os, const std::unordered_map<T1, T2, T3> &map) {\n  os << \"[const unordered_map]\";\n  return os;\n}\n\ntemplate <typename T1, typename T2, typename T3>\nstd::ostream &operator<<(std::ostream &os, const std::map<T1, T2, T3> &map) {\n  os << \"[const map]\";\n  return os;\n}\n\ntemplate <typename T>\nstd::string ToString(const std::vector<T> &vec) {\n  std::ostringstream buffer;\n\n  buffer << vec;\n  return buffer.str();\n}\n\ntemplate <typename T1, typename T2>\nstd::string ToString(const std::unordered_map<T1, T2> &map) {\n  std::ostringstream buffer;\n\n  buffer << map;\n  return buffer.str();\n}\n\ntemplate <typename T1, typename T2>\nstd::string ToString(const std::map<T1, T2> &map) {\n  std::ostringstream buffer;\n\n  buffer << map;\n  return buffer.str();\n}\n}  // namespace mindspore\n\n#endif  // MINDSPORE_CORE_UTILS_OVERLOAD_H_\n"
  },
  {
    "path": "tests/ut/stub/include/utils/utils.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_STUB_SERVING_UTILS_H\n#define MINDSPORE_STUB_SERVING_UTILS_H\n\n#include <unistd.h>\n#include <memory>\n#include <atomic>\n#include <string>\n#include <vector>\n#include <set>\n#include <fstream>\n#include \"utils/log_adapter.h\"\n\nnamespace mindspore {\n\nclass FuncGraph {\n public:\n  explicit FuncGraph(const std::string &file_name) : file_name_(file_name) {}\n  const std::string file_name_;\n};\nusing FuncGraphPtr = std::shared_ptr<FuncGraph>;\n\nnamespace common {\nstatic inline const char *SafeCStr(const std::string &str) {\n  const int CACHED_STR_NUM = 1 << 8;\n  const int CACHED_STR_MASK = CACHED_STR_NUM - 1;\n  std::vector<std::string> STR_HOLDER(CACHED_STR_NUM);\n\n  static std::atomic<uint32_t> index{0};\n  uint32_t cur_index = index++;\n  cur_index = cur_index & CACHED_STR_MASK;\n  STR_HOLDER[cur_index] = str;\n  return STR_HOLDER[cur_index].c_str();\n}\n\nstatic inline bool DirOrFileExist(const std::string &file_path) {\n  int ret = access(file_path.c_str(), 0);\n  return ret != -1;\n}\n\n}  // namespace common\n\nstatic inline size_t IntToSize(int i) { return static_cast<size_t>(i); }\n\ntypedef unsigned char Byte;\n\nstatic inline std::unique_ptr<Byte[]> Decrypt(size_t *decrypt_len, const std::string &encrypt_data_path,\n                                              const Byte *key, const size_t key_len, const std::string &dec_mode) {\n  auto bytes = new Byte[10];\n  return std::unique_ptr<Byte[]>(bytes);\n}\n\nstatic inline std::unique_ptr<Byte[]> Decrypt(size_t *decrypt_len, const Byte *model_data, const size_t data_size,\n                                              const Byte *key, const size_t key_len, const std::string &dec_mode) {\n  auto bytes = new Byte[10];\n  return std::unique_ptr<Byte[]>(bytes);\n}\n\nstatic inline bool IsCipherFile(const std::string &file_path) { return false; }\n\nstatic inline bool IsCipherFile(const Byte *model_data) { return false; }\n\nstatic inline std::shared_ptr<FuncGraph> LoadMindIR(const std::string &file_name, bool is_lite,\n                                                    const unsigned char *dec_key, const size_t key_len,\n                                                    const std::string &dec_mode) {\n  std::ifstream ifs(file_name);\n  if (!ifs.good()) {\n    MS_LOG(ERROR) << \"File: \" << file_name << \" is not exist\";\n    return nullptr;\n  }\n  if (!ifs.is_open()) {\n    MS_LOG(ERROR) << \"File: \" << file_name << \"open failed\";\n    return nullptr;\n  }\n  return std::make_shared<FuncGraph>(file_name);\n}\n\nstatic inline std::vector<std::shared_ptr<FuncGraph>> LoadMindIRs(\n  const std::vector<std::string> file_names, bool is_lite = false, const unsigned char *dec_key = nullptr,\n  const size_t key_len = 0, const std::string &dec_mode = std::string(\"AES-GCM\")) {\n  std::vector<std::shared_ptr<FuncGraph>> graphs;\n  for (auto &file_name : file_names) {\n    std::ifstream ifs(file_name);\n    if (!ifs.good()) {\n      MS_LOG(ERROR) << \"File: \" << file_name << \" is not exist\";\n      return {};\n    }\n    if (!ifs.is_open()) {\n      MS_LOG(ERROR) << \"File: \" << file_name << \"open failed\";\n      return {};\n    }\n    graphs.push_back(std::make_shared<FuncGraph>(file_name));\n  }\n  return graphs;\n}\n\nstatic inline std::shared_ptr<FuncGraph> ConvertStreamToFuncGraph(const char *buf, const size_t buf_size,\n                                                                  bool is_lite = false) {\n  return std::make_shared<FuncGraph>(\"\");\n}\n\nclass MSTensor::Impl {\n public:\n  Impl() = default;\n  virtual ~Impl() = default;\n\n  virtual const std::string &Name() const = 0;\n  virtual enum DataType DataType() const = 0;\n  virtual const std::vector<int64_t> &Shape() const = 0;\n\n  virtual std::shared_ptr<const void> Data() const = 0;\n  virtual void *MutableData() = 0;\n  virtual size_t DataSize() const = 0;\n\n  virtual bool IsDevice() const = 0;\n\n  virtual std::shared_ptr<Impl> Clone() const = 0;\n};\n\n}  // namespace mindspore\n\n#endif  // MINDSPORE_STUB_SERVING_UTILS_H\n"
  },
  {
    "path": "tests/ut/stub/include/utils/visible.h",
    "content": "/**\n * Copyright 2019-2021 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MINDSPORE_CORE_UTILS_VISIBLE_H_\n#define MINDSPORE_CORE_UTILS_VISIBLE_H_\n\n#if (defined(_WIN32) || defined(__WIN32__) || defined(WIN32) || defined(__CYGWIN__))\n#ifdef BUILDING_DLL\n#define MS_CORE_API __declspec(dllexport)\n#define MS_EXPORT __declspec(dllexport)\n#else\n#define MS_CORE_API __declspec(dllimport)\n#define MS_EXPORT __declspec(dllimport)\n#endif\n#define MS_LOCAL\n#else\n#define MS_CORE_API __attribute__((visibility(\"default\")))\n#define MS_EXPORT __attribute__((visibility(\"default\")))\n#define MS_LOCAL __attribute__((visibility(\"hidden\")))\n#endif\n\n#endif  // MINDSPORE_CORE_UTILS_VISIBLE_H_\n"
  },
  {
    "path": "tests/ut/stub/stub_inference.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include <memory>\n#include \"worker/inference/inference.h\"\n#include \"worker/inference/mindspore_model_wrap.h\"\n\nnamespace mindspore::serving {\n\nInferenceLoader::InferenceLoader() {}\nInferenceLoader::~InferenceLoader() {}\n\nstd::string ModelContext::AsString() const {\n  std::map<std::string, std::string> output_map;\n  if (thread_num > -1) {\n    output_map[\"thread num\"] = AsStringHelper::AsString(thread_num);\n  }\n  if (!thread_affinity_core_list.empty()) {\n    output_map[\"thread affinity core list\"] = AsStringHelper::AsString(thread_affinity_core_list);\n  }\n  if (enable_parallel > -1) {\n    output_map[\"enable parallel\"] = AsStringHelper::AsString(enable_parallel);\n  }\n  if (!device_list.empty()) {\n    output_map[\"device infos\"] = AsStringHelper::AsString(device_list);\n  }\n  return AsStringHelper::AsString(output_map);\n}\n\nInferenceLoader &InferenceLoader::Instance() {\n  static InferenceLoader inference;\n  return inference;\n}\n\nstd::shared_ptr<InferenceBase> InferenceLoader::CreateMindSporeInfer() {\n  return std::make_shared<MindSporeModelWrap>();\n}\n\nStatus InferenceLoader::LoadMindSporeModelWrap() { return SUCCESS; }\n\nbool InferenceLoader::GetEnableLite() const { return enable_lite_; }\n\nDeviceType InferenceLoader::GetSupportDeviceType(DeviceType device_type, ModelType model_type) {\n  auto mindspore_infer = CreateMindSporeInfer();\n  if (mindspore_infer == nullptr) {\n    MSI_LOG_ERROR << \"Create MindSpore infer failed\";\n    return kDeviceTypeNotSpecified;\n  }\n  std::vector<ModelType> check_model_types;\n  if (model_type == kUnknownType) {\n    check_model_types = {kMindIR, kMindIR_Lite, kOM};\n  } else {\n    check_model_types = {model_type};\n  }\n  for (auto &model_type_item : check_model_types) {\n    if (device_type == kDeviceTypeNotSpecified) {\n      auto device_list = {kDeviceTypeAscend, kDeviceTypeGpu, kDeviceTypeCpu};\n      for (auto item : device_list) {\n        if (mindspore_infer->CheckModelSupport(item, model_type_item)) {\n          return item;\n        }\n      }\n    } else {\n      if (mindspore_infer->CheckModelSupport(device_type, model_type_item)) {\n        return device_type;\n      }\n    }\n  }\n  return kDeviceTypeNotSpecified;\n}\n\nbool InferenceLoader::SupportReuseDevice() {\n  auto mindspore_infer = CreateMindSporeInfer();\n  if (mindspore_infer == nullptr) {\n    MSI_LOG_ERROR << \"Create MindSpore infer failed\";\n    return false;\n  }\n  return mindspore_infer->SupportReuseDevice();\n}\n\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "tests/ut/stub/stub_postprocess.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/stage_function.h\"\n#include \"mindspore_serving/ccsrc/common/tensor.h\"\n\nnamespace mindspore::serving {\n\nclass StubCastFp32toInt32Postprocess : public CppStageFunctionBase {\n public:\n  Status Call(const std::string &postprocess_name, const InstanceData &input, InstanceData *output) override {\n    MSI_EXCEPTION_IF_NULL(output);\n    auto x1 = input[0];\n    if (x1->data_type() != kMSI_Float32) {\n      return INFER_STATUS_LOG_ERROR(FAILED) << \"Postprocess failed: Input data type invalid \" << x1->data_type();\n    }\n    auto y1 = std::make_shared<Tensor>();\n    y1->set_data_type(kMSI_Int32);\n    y1->resize_data(x1->data_size());\n    y1->set_shape(x1->shape());\n    output->push_back(y1);\n\n    auto x1_data = reinterpret_cast<const float *>(x1->data());\n    auto y1_data = reinterpret_cast<int32_t *>(y1->mutable_data());\n    for (size_t i = 0; i < y1->data_size() / 4; i++) {\n      y1_data[i] = static_cast<int32_t>(x1_data[i]);\n    }\n    return SUCCESS;\n  }\n\n  size_t GetInputsCount(const std::string &postprocess_name) const override { return 1; }\n\n  size_t GetOutputsCount(const std::string &postprocess_name) const override { return 1; }\n};\n\nREGISTER_STAGE_FUNCTION(StubCastFp32toInt32Postprocess, \"stub_postprocess_cast_fp32_to_int32_cpp\")\n\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "tests/ut/stub/stub_preprocess.cc",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"worker/stage_function.h\"\n#include \"mindspore_serving/ccsrc/common/tensor.h\"\n\nnamespace mindspore::serving {\n\nclass StubCastInt32toFp32Preprocess : public CppStageFunctionBase {\n public:\n  Status Call(const std::string &postprocess_name, const InstanceData &input, InstanceData *output) override {\n    MSI_EXCEPTION_IF_NULL(output);\n    auto x1 = input[0];\n    auto x2 = input[1];\n    if (x1->data_type() != kMSI_Int32 || x2->data_type() != kMSI_Int32) {\n      return INFER_STATUS_LOG_ERROR(FAILED)\n             << \"Call failed: Input data type invalid \" << x1->data_type() << \", \" << x2->data_type();\n    }\n\n    auto y1 = std::make_shared<Tensor>();\n    y1->set_data_type(serving::kMSI_Float32);\n    y1->resize_data(x1->data_size());\n    y1->set_shape(x1->shape());\n    output->push_back(y1);\n\n    auto y2 = std::make_shared<Tensor>();\n    y2->set_data_type(serving::kMSI_Float32);\n    y2->resize_data(x2->data_size());\n    y2->set_shape(x2->shape());\n    output->push_back(y2);\n\n    auto x1_data = reinterpret_cast<const int32_t *>(x1->data());\n    auto y1_data = reinterpret_cast<float *>(y1->mutable_data());\n    for (size_t i = 0; i < y1->data_size() / 4; i++) {\n      y1_data[i] = static_cast<float>(x1_data[i]);\n    }\n\n    auto x2_data = reinterpret_cast<const int32_t *>(x2->data());\n    auto y2_data = reinterpret_cast<float *>(y2->mutable_data());\n    for (size_t i = 0; i < y2->data_size() / 4; i++) {\n      y2_data[i] = static_cast<float>(x2_data[i]);\n    }\n    return SUCCESS;\n  }\n\n  size_t GetInputsCount(const std::string &postprocess_name) const override { return 2; }\n\n  size_t GetOutputsCount(const std::string &postprocess_name) const override { return 2; }\n};\n\nREGISTER_STAGE_FUNCTION(StubCastInt32toFp32Preprocess, \"stub_preprocess_cast_int32_to_fp32_cpp\")\n\n}  // namespace mindspore::serving\n"
  },
  {
    "path": "third_party/patch/c-ares/CVE-2021-3672.patch",
    "content": "diff -Npur c-ares-1.15.0/ares_expand_name.c c-ares-1.15.0-new/ares_expand_name.c\n--- c-ares-1.15.0/ares_expand_name.c\t2017-07-03 17:04:19.000000000 +0800\n+++ c-ares-1.15.0-new/ares_expand_name.c\t2021-08-21 22:48:24.650973166 +0800\n@@ -38,6 +38,26 @@\n static int name_length(const unsigned char *encoded, const unsigned char *abuf,\n                        int alen);\n \n+/* Reserved characters for names that need to be escaped */\n+static int is_reservedch(int ch)\n+{\n+  switch (ch) {\n+    case '\"':\n+    case '.':\n+    case ';':\n+    case '\\\\':\n+    case '(':\n+    case ')':\n+    case '@':\n+    case '$':\n+      return 1;\n+    default:\n+      break;\n+  }\n+\n+  return 0;\n+}\n+\n /* Expand an RFC1035-encoded domain name given by encoded.  The\n  * containing message is given by abuf and alen.  The result given by\n  * *s, which is set to a NUL-terminated allocated buffer.  *enclen is\n@@ -113,18 +133,37 @@ int ares_expand_name(const unsigned char\n         }\n       else\n         {\n-          len = *p;\n+          int name_len = *p;\n+          len = name_len;\n           p++;\n+\n           while (len--)\n             {\n-              if (*p == '.' || *p == '\\\\')\n-                *q++ = '\\\\';\n-              *q++ = *p;\n+              /* Output as \\DDD for consistency with RFC1035 5.1, except\n+               * for the special case of a root name response  */\n+              if (!isprint(*p) && !(name_len == 1 && *p == 0))\n+                {\n+\n+                  *q++ = '\\\\';\n+                  *q++ = '0' + *p / 100;\n+                  *q++ = '0' + (*p % 100) / 10;\n+                  *q++ = '0' + (*p % 10);\n+                }\n+              else if (is_reservedch(*p))\n+                {\n+                  *q++ = '\\\\';\n+                  *q++ = *p;\n+                }\n+              else\n+                {\n+                  *q++ = *p;\n+                }\n               p++;\n             }\n           *q++ = '.';\n         }\n-    }\n+     }\n+\n   if (!indir)\n     *enclen = aresx_uztosl(p + 1U - encoded);\n \n@@ -171,15 +210,29 @@ static int name_length(const unsigned ch\n         }\n       else if (top == 0x00)\n         {\n-          offset = *encoded;\n+          int name_len = *encoded;\n+          offset = name_len;\n           if (encoded + offset + 1 >= abuf + alen)\n             return -1;\n           encoded++;\n+\n           while (offset--)\n             {\n-              n += (*encoded == '.' || *encoded == '\\\\') ? 2 : 1;\n+              if (!isprint(*encoded) && !(name_len == 1 && *encoded == 0))\n+                {\n+                  n += 4;\n+                }\n+              else if (is_reservedch(*encoded))\n+                {\n+                  n += 2;\n+                }\n+              else\n+                {\n+                  n += 1;\n+                }\n               encoded++;\n             }\n+\n           n++;\n         }\n       else\n"
  },
  {
    "path": "third_party/patch/glog/glog.patch001",
    "content": "diff -Npur glog/CMakeLists.txt glog-patch/CMakeLists.txt\n--- glog/CMakeLists.txt\t2019-03-22 10:51:46.000000000 +0800\n+++ glog-patch/CMakeLists.txt\t2021-04-01 10:32:25.753140500 +0800\n@@ -64,7 +64,6 @@ check_include_file (dlfcn.h HAVE_DLFCN_H\n check_include_file (execinfo.h HAVE_EXECINFO_H)\n check_include_file (glob.h HAVE_GLOB_H)\n check_include_file (inttypes.h HAVE_INTTYPES_H)\n-check_include_file (libunwind.h HAVE_LIBUNWIND_H)\n check_include_file (memory.h HAVE_MEMORY_H)\n check_include_file (pwd.h HAVE_PWD_H)\n check_include_file (stdint.h HAVE_STDINT_H)\n@@ -80,7 +79,6 @@ check_include_file (syscall.h HAVE_SYSCA\n check_include_file (syslog.h HAVE_SYSLOG_H)\n check_include_file (ucontext.h HAVE_UCONTEXT_H)\n check_include_file (unistd.h HAVE_UNISTD_H)\n-check_include_file (unwind.h HAVE_UNWIND_H)\n check_include_file (pwd.h HAVE_PWD_H)\n \n check_include_file_cxx (\"ext/hash_map\" HAVE_EXT_HASH_MAP)\n@@ -116,12 +114,8 @@ check_cxx_compiler_flag (-Wunnamed-type-\n # snprintf as an inline function\n check_symbol_exists (snprintf stdio.h HAVE_SNPRINTF)\n \n-check_library_exists (unwind get_static_proc_name \"\" HAVE_LIB_UNWIND)\n check_library_exists (dbghelp UnDecorateSymbolName \"\" HAVE_DBGHELP)\n \n-find_library (UNWIND_LIBRARY NAMES unwind DOC \"unwind library\")\n-mark_as_advanced (UNWIND_LIBRARY)\n-\n check_c_source_compiles (\"\n #include <stdlib.h>\n static void foo(void) __attribute__ ((unused));\n@@ -470,10 +464,7 @@ add_library (glog\n add_library(glog::glog ALIAS glog)\n \n set_target_properties (glog PROPERTIES POSITION_INDEPENDENT_CODE ON)\n-\n-if (UNWIND_LIBRARY)\n-  target_link_libraries (glog PUBLIC ${UNWIND_LIBRARY})\n-endif (UNWIND_LIBRARY)\n+set_target_properties (glog PROPERTIES OUTPUT_NAME mindspore_serving_glog)\n \n if (HAVE_DBGHELP)\n    target_link_libraries (glog PUBLIC dbghelp)\n"
  },
  {
    "path": "third_party/patch/grpc/grpc.patch001",
    "content": "diff -Npur grpc/..rej grpc-patch/..rej\n--- grpc/..rej\t1970-01-01 08:00:00.000000000 +0800\n+++ grpc-patch/..rej\t2021-04-22 21:00:17.343178600 +0800\n@@ -0,0 +1,22 @@\n+--- CMakeLists.txt\t2020-02-27 03:12:33.000000000 +0800\n++++ CMakeLists.txt\t2021-04-07 21:27:12.317207600 +0800\n+@@ -12992,7 +12992,7 @@ if(gRPC_BUILD_CODEGEN AND gRPC_BUILD_GRP\n+ add_executable(grpc_cpp_plugin\n+   src/compiler/cpp_plugin.cc\n+ )\n+-\n++set_target_properties(grpc_cpp_plugin PROPERTIES INSTALL_RPATH $ORIGIN/../lib)\n+ target_include_directories(grpc_cpp_plugin\n+   PRIVATE\n+     ${CMAKE_CURRENT_SOURCE_DIR}\n+--- cmake/cares.cmake\t2020-02-27 03:12:33.000000000 +0800\n++++ cmake/cares.cmake\t2021-04-10 14:22:35.895725700 +0800\n+@@ -39,7 +39,7 @@ if(gRPC_CARES_PROVIDER STREQUAL \"module\"\n+     set(gRPC_INSTALL FALSE)\n+   endif()\n+ elseif(gRPC_CARES_PROVIDER STREQUAL \"package\")\n+-  find_package(c-ares 1.13.0 REQUIRED)\n++  find_package(c-ares REQUIRED) # cmake 3.19+ cannot find cares 1.15.0\n+   if(TARGET c-ares::cares)\n+     set(_gRPC_CARES_LIBRARIES c-ares::cares)\n+   endif()\ndiff -Npur grpc/.rej grpc-patch/.rej\n--- grpc/.rej\t1970-01-01 08:00:00.000000000 +0800\n+++ grpc-patch/.rej\t2021-04-22 21:03:38.192349100 +0800\n@@ -0,0 +1,22 @@\n+--- grpc/CMakeLists.txt\t2020-02-27 03:12:33.000000000 +0800\n++++ grpc-patch/CMakeLists.txt\t2021-04-07 21:27:12.317207600 +0800\n+@@ -12992,7 +12992,7 @@ if(gRPC_BUILD_CODEGEN AND gRPC_BUILD_GRP\n+ add_executable(grpc_cpp_plugin\n+   src/compiler/cpp_plugin.cc\n+ )\n+-\n++set_target_properties(grpc_cpp_plugin PROPERTIES INSTALL_RPATH $ORIGIN/../lib)\n+ target_include_directories(grpc_cpp_plugin\n+   PRIVATE\n+     ${CMAKE_CURRENT_SOURCE_DIR}\n+--- grpc/cmake/cares.cmake\t2020-02-27 03:12:33.000000000 +0800\n++++ grpc-patch/cmake/cares.cmake\t2021-04-10 14:22:35.895725700 +0800\n+@@ -39,7 +39,7 @@ if(gRPC_CARES_PROVIDER STREQUAL \"module\"\n+     set(gRPC_INSTALL FALSE)\n+   endif()\n+ elseif(gRPC_CARES_PROVIDER STREQUAL \"package\")\n+-  find_package(c-ares 1.13.0 REQUIRED)\n++  find_package(c-ares REQUIRED) # cmake 3.19+ cannot find cares 1.15.0\n+   if(TARGET c-ares::cares)\n+     set(_gRPC_CARES_LIBRARIES c-ares::cares)\n+   endif()\ndiff -Npur grpc/CMakeLists.txt grpc-patch/CMakeLists.txt\n--- grpc/CMakeLists.txt\t2020-02-27 03:12:33.000000000 +0800\n+++ grpc-patch/CMakeLists.txt\t2021-04-22 21:15:04.458188400 +0800\n@@ -936,6 +936,8 @@ set_target_properties(address_sorting PR\n   SOVERSION ${gRPC_CORE_SOVERSION}\n )\n\n+set_target_properties(address_sorting PROPERTIES OUTPUT_NAME mindspore_serving_address_sorting)\n+\n if(WIN32 AND MSVC)\n   set_target_properties(address_sorting PROPERTIES COMPILE_PDB_NAME \"address_sorting\"\n     COMPILE_PDB_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}\"\n@@ -1404,6 +1406,8 @@ set_target_properties(gpr PROPERTIES\n   SOVERSION ${gRPC_CORE_SOVERSION}\n )\n\n+set_target_properties(gpr PROPERTIES OUTPUT_NAME mindspore_serving_gpr)\n+\n if(WIN32 AND MSVC)\n   set_target_properties(gpr PROPERTIES COMPILE_PDB_NAME \"gpr\"\n     COMPILE_PDB_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}\"\n@@ -1869,6 +1873,8 @@ set_target_properties(grpc PROPERTIES\n   SOVERSION ${gRPC_CORE_SOVERSION}\n )\n\n+set_target_properties(grpc PROPERTIES OUTPUT_NAME mindspore_serving_grpc)\n+\n if(WIN32 AND MSVC)\n   set_target_properties(grpc PROPERTIES COMPILE_PDB_NAME \"grpc\"\n     COMPILE_PDB_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}\"\n@@ -3696,6 +3702,8 @@ set_target_properties(grpc++ PROPERTIES\n   SOVERSION ${gRPC_CPP_SOVERSION}\n )\n\n+set_target_properties(grpc++ PROPERTIES OUTPUT_NAME mindspore_serving_grpc++)\n+\n if(WIN32 AND MSVC)\n   set_target_properties(grpc++ PROPERTIES COMPILE_PDB_NAME \"grpc++\"\n     COMPILE_PDB_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}\"\n@@ -4279,6 +4287,8 @@ set_target_properties(grpc++_reflection\n   SOVERSION ${gRPC_CPP_SOVERSION}\n )\n\n+set_target_properties(grpc++_reflection PROPERTIES OUTPUT_NAME mindspore_serving_grpc++_reflection)\n+\n if(WIN32 AND MSVC)\n   set_target_properties(grpc++_reflection PROPERTIES COMPILE_PDB_NAME \"grpc++_reflection\"\n     COMPILE_PDB_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}\"\n@@ -5896,6 +5906,8 @@ set_target_properties(upb PROPERTIES\n   SOVERSION ${gRPC_CORE_SOVERSION}\n )\n\n+set_target_properties(upb PROPERTIES OUTPUT_NAME mindspore_serving_upb)\n+\n if(WIN32 AND MSVC)\n   set_target_properties(upb PROPERTIES COMPILE_PDB_NAME \"upb\"\n     COMPILE_PDB_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}\"\n@@ -12992,7 +13004,7 @@ if(gRPC_BUILD_CODEGEN AND gRPC_BUILD_GRP\n add_executable(grpc_cpp_plugin\n   src/compiler/cpp_plugin.cc\n )\n-\n+set_target_properties(grpc_cpp_plugin PROPERTIES INSTALL_RPATH $ORIGIN/../lib)\n target_include_directories(grpc_cpp_plugin\n   PRIVATE\n     ${CMAKE_CURRENT_SOURCE_DIR}\n@@ -13251,6 +13263,8 @@ add_executable(grpc_python_plugin\n   src/compiler/python_plugin.cc\n )\n\n+set_target_properties(grpc_python_plugin PROPERTIES INSTALL_RPATH $ORIGIN/../lib)\n+\n target_include_directories(grpc_python_plugin\n   PRIVATE\n     ${CMAKE_CURRENT_SOURCE_DIR}\ndiff -Npur grpc/cmake/cares.cmake grpc-patch/cmake/cares.cmake\n--- grpc/cmake/cares.cmake\t2020-02-27 03:12:33.000000000 +0800\n+++ grpc-patch/cmake/cares.cmake\t2021-04-22 21:05:06.398251400 +0800\n@@ -39,7 +39,7 @@ if(gRPC_CARES_PROVIDER STREQUAL \"module\"\n     set(gRPC_INSTALL FALSE)\n   endif()\n elseif(gRPC_CARES_PROVIDER STREQUAL \"package\")\n-  find_package(c-ares 1.13.0 REQUIRED)\n+  find_package(c-ares REQUIRED) # cmake 3.19+ cannot find cares 1.15.0\n   if(TARGET c-ares::cares)\n     set(_gRPC_CARES_LIBRARIES c-ares::cares)\n   endif()\n"
  },
  {
    "path": "third_party/patch/libevent/libevent.patch001",
    "content": "diff -Npur libevent/CMakeLists.txt libevent-modify/CMakeLists.txt\n--- libevent/CMakeLists.txt\t2020-07-05 20:02:46.000000000 +0800\n+++ libevent-modify/CMakeLists.txt\t2021-04-19 16:36:57.982307500 +0800\n@@ -852,7 +852,7 @@ if (NOT EVENT__DISABLE_OPENSSL)\n \n     list(APPEND SRC_OPENSSL bufferevent_openssl.c)\n     list(APPEND HDR_PUBLIC include/event2/bufferevent_ssl.h)\n-    list(APPEND LIB_APPS ${OPENSSL_LIBRARIES})\n+    list(APPEND LIB_APPS ${OPENSSL_LIBRARIES} -ldl)\n endif()\n \n if (NOT EVENT__DISABLE_THREAD_SUPPORT)\ndiff -Npur libevent/cmake/AddEventLibrary.cmake libevent-modify/cmake/AddEventLibrary.cmake\n--- libevent/cmake/AddEventLibrary.cmake\t2020-07-05 20:02:46.000000000 +0800\n+++ libevent-modify/cmake/AddEventLibrary.cmake\t2021-04-19 16:36:57.982307500 +0800\n@@ -153,1 +153,0 @@\n-                INSTALL_NAME_DIR \"${CMAKE_INSTALL_PREFIX}/lib\"\n"
  },
  {
    "path": "third_party/patch/openssl/CVE-2021-3711.patch",
    "content": "diff --git a/crypto/sm2/sm2_crypt.c b/crypto/sm2/sm2_crypt.c\nindex ef505f6441..1188abfc6b 100644\n--- a/crypto/sm2/sm2_crypt.c\n+++ b/crypto/sm2/sm2_crypt.c\n@@ -61,29 +61,20 @@ static size_t ec_field_size(const EC_GROUP *group)\n     return field_size;\n }\n \n-int sm2_plaintext_size(const EC_KEY *key, const EVP_MD *digest, size_t msg_len,\n-                       size_t *pt_size)\n+int sm2_plaintext_size(const unsigned char *ct, size_t ct_size, size_t *pt_size)\n {\n-    const size_t field_size = ec_field_size(EC_KEY_get0_group(key));\n-    const int md_size = EVP_MD_size(digest);\n-    size_t overhead;\n+    struct SM2_Ciphertext_st *sm2_ctext = NULL;\n \n-    if (md_size < 0) {\n-        SM2err(SM2_F_SM2_PLAINTEXT_SIZE, SM2_R_INVALID_DIGEST);\n-        return 0;\n-    }\n-    if (field_size == 0) {\n-        SM2err(SM2_F_SM2_PLAINTEXT_SIZE, SM2_R_INVALID_FIELD);\n-        return 0;\n-    }\n+    sm2_ctext = d2i_SM2_Ciphertext(NULL, &ct, ct_size);\n \n-    overhead = 10 + 2 * field_size + (size_t)md_size;\n-    if (msg_len <= overhead) {\n+    if (sm2_ctext == NULL) {\n         SM2err(SM2_F_SM2_PLAINTEXT_SIZE, SM2_R_INVALID_ENCODING);\n         return 0;\n     }\n \n-    *pt_size = msg_len - overhead;\n+    *pt_size = sm2_ctext->C2->length;\n+    SM2_Ciphertext_free(sm2_ctext);\n+\n     return 1;\n }\n \ndiff --git a/crypto/sm2/sm2_pmeth.c b/crypto/sm2/sm2_pmeth.c\nindex b42a14c32f..27025fbf3a 100644\n--- a/crypto/sm2/sm2_pmeth.c\n+++ b/crypto/sm2/sm2_pmeth.c\n@@ -151,7 +151,7 @@ static int pkey_sm2_decrypt(EVP_PKEY_CTX *ctx,\n     const EVP_MD *md = (dctx->md == NULL) ? EVP_sm3() : dctx->md;\n \n     if (out == NULL) {\n-        if (!sm2_plaintext_size(ec, md, inlen, outlen))\n+        if (!sm2_plaintext_size(in, inlen, outlen))\n             return -1;\n         else\n             return 1;\ndiff --git a/include/crypto/sm2.h b/include/crypto/sm2.h\nindex 76ee80baff..50851a83ce 100644\n--- a/include/crypto/sm2.h\n+++ b/include/crypto/sm2.h\n@@ -60,8 +60,7 @@ int sm2_verify(const unsigned char *dgst, int dgstlen,\n int sm2_ciphertext_size(const EC_KEY *key, const EVP_MD *digest, size_t msg_len,\n                         size_t *ct_size);\n \n-int sm2_plaintext_size(const EC_KEY *key, const EVP_MD *digest, size_t msg_len,\n-                       size_t *pt_size);\n+int sm2_plaintext_size(const unsigned char *ct, size_t ct_size, size_t *pt_size);\n \n int sm2_encrypt(const EC_KEY *key,\n                 const EVP_MD *digest,\ndiff --git a/test/sm2_internal_test.c b/test/sm2_internal_test.c\nindex 2bb73947ff..41827bb82f 100644\n--- a/test/sm2_internal_test.c\n+++ b/test/sm2_internal_test.c\n@@ -185,7 +185,7 @@ static int test_sm2_crypt(const EC_GROUP *group,\n     if (!TEST_mem_eq(ctext, ctext_len, expected, ctext_len))\n         goto done;\n \n-    if (!TEST_true(sm2_plaintext_size(key, digest, ctext_len, &ptext_len))\n+    if (!TEST_true(sm2_plaintext_size(ctext, ctext_len, &ptext_len))\n             || !TEST_int_eq(ptext_len, msg_len))\n         goto done;\n \n"
  },
  {
    "path": "third_party/patch/openssl/CVE-2021-3712.patch",
    "content": "diff --git a/crypto/ec/ec_asn1.c b/crypto/ec/ec_asn1.c\nindex 7b7c75ce84..e497a25909 100644\n--- a/crypto/ec/ec_asn1.c\n+++ b/crypto/ec/ec_asn1.c\n@@ -761,7 +761,10 @@ EC_GROUP *EC_GROUP_new_from_ecparameters(const ECPARAMETERS *params)\n         ret->seed_len = params->curve->seed->length;\n     }\n \n-    if (!params->order || !params->base || !params->base->data) {\n+    if (params->order == NULL\n+            || params->base == NULL\n+            || params->base->data == NULL\n+            || params->base->length == 0) {\n         ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);\n         goto err;\n     }\n\n"
  },
  {
    "path": "third_party/patch/openssl/CVE-2021-4160.patch",
    "content": "diff --git a/crypto/bn/asm/mips.pl b/crypto/bn/asm/mips.pl\nindex 95cb227dc5..91b7aac6e7 100644\n--- a/crypto/bn/asm/mips.pl\n+++ b/crypto/bn/asm/mips.pl\n@@ -1986,6 +1986,8 @@ $code.=<<___;\n \tsltu\t$at,$c_2,$t_1\n \t$ADDU\t$c_3,$t_2,$at\n \t$ST\t$c_2,$BNSZ($a0)\n+\tsltu\t$at,$c_3,$t_2\n+\t$ADDU\t$c_1,$at\n \tmflo\t($t_1,$a_2,$a_0)\n \tmfhi\t($t_2,$a_2,$a_0)\n ___\n@@ -2196,6 +2198,8 @@ $code.=<<___;\n \tsltu\t$at,$c_2,$t_1\n \t$ADDU\t$c_3,$t_2,$at\n \t$ST\t$c_2,$BNSZ($a0)\n+\tsltu\t$at,$c_3,$t_2\n+\t$ADDU\t$c_1,$at\n \tmflo\t($t_1,$a_2,$a_0)\n \tmfhi\t($t_2,$a_2,$a_0)\n ___\ndiff --git a/test/bntest.c b/test/bntest.c\nindex 87e5c4065b..fa9fc07cef 100644\n--- a/test/bntest.c\n+++ b/test/bntest.c\n@@ -630,6 +630,51 @@ static int test_modexp_mont5(void)\n     if (!TEST_BN_eq(c, d))\n         goto err;\n\n+    /*\n+     * Regression test for overflow bug in bn_sqr_comba4/8 for\n+     * mips-linux-gnu and mipsel-linux-gnu 32bit targets.\n+     */\n+    {\n+        static const char *ehex[] = {\n+            \"95564994a96c45954227b845a1e99cb939d5a1da99ee91acc962396ae999a9ee\",\n+            \"38603790448f2f7694c242a875f0cad0aae658eba085f312d2febbbd128dd2b5\",\n+            \"8f7d1149f03724215d704344d0d62c587ae3c5939cba4b9b5f3dc5e8e911ef9a\",\n+            \"5ce1a5a749a4989d0d8368f6e1f8cdf3a362a6c97fb02047ff152b480a4ad985\",\n+            \"2d45efdf0770542992afca6a0590d52930434bba96017afbc9f99e112950a8b1\",\n+            \"a359473ec376f329bdae6a19f503be6d4be7393c4e43468831234e27e3838680\",\n+            \"b949390d2e416a3f9759e5349ab4c253f6f29f819a6fe4cbfd27ada34903300e\",\n+            \"da021f62839f5878a36f1bc3085375b00fd5fa3e68d316c0fdace87a97558465\",\n+            NULL};\n+        static const char *phex[] = {\n+            \"f95dc0f980fbd22e90caa5a387cc4a369f3f830d50dd321c40db8c09a7e1a241\",\n+            \"a536e096622d3280c0c1ba849c1f4a79bf490f60006d081e8cf69960189f0d31\",\n+            \"2cd9e17073a3fba7881b21474a13b334116cb2f5dbf3189a6de3515d0840f053\",\n+            \"c776d3982d391b6d04d642dda5cc6d1640174c09875addb70595658f89efb439\",\n+            \"dc6fbd55f903aadd307982d3f659207f265e1ec6271b274521b7a5e28e8fd7a5\",\n+            \"5df089292820477802a43cf5b6b94e999e8c9944ddebb0d0e95a60f88cb7e813\",\n+            \"ba110d20e1024774107dd02949031864923b3cb8c3f7250d6d1287b0a40db6a4\",\n+            \"7bd5a469518eb65aa207ddc47d8c6e5fc8e0c105be8fc1d4b57b2e27540471d5\",\n+            NULL};\n+        static const char *mhex[] = {\n+            \"fef15d5ce4625f1bccfbba49fc8439c72bf8202af039a2259678941b60bb4a8f\",\n+            \"2987e965d58fd8cf86a856674d519763d0e1211cc9f8596971050d56d9b35db3\",\n+            \"785866cfbca17cfdbed6060be3629d894f924a89fdc1efc624f80d41a22f1900\",\n+            \"9503fcc3824ef62ccb9208430c26f2d8ceb2c63488ec4c07437aa4c96c43dd8b\",\n+            \"9289ed00a712ff66ee195dc71f5e4ead02172b63c543d69baf495f5fd63ba7bc\",\n+            \"c633bd309c016e37736da92129d0b053d4ab28d21ad7d8b6fab2a8bbdc8ee647\",\n+            \"d2fbcf2cf426cf892e6f5639e0252993965dfb73ccd277407014ea784aaa280c\",\n+            \"b7b03972bc8b0baa72360bdb44b82415b86b2f260f877791cd33ba8f2d65229b\",\n+            NULL};\n+\n+        if (!TEST_true(parse_bigBN(&e, ehex))\n+                || !TEST_true(parse_bigBN(&p, phex))\n+                || !TEST_true(parse_bigBN(&m, mhex))\n+                || !TEST_true(BN_mod_exp_mont_consttime(d, e, p, m, ctx, NULL))\n+                || !TEST_true(BN_mod_exp_simple(a, e, p, m, ctx))\n+                || !TEST_BN_eq(a, d))\n+            goto err;\n+    }\n+\n     /* Zero input */\n     if (!TEST_true(BN_bntest_rand(p, 1024, 0, 0)))\n         goto err;"
  },
  {
    "path": "third_party/patch/openssl/CVE-2022-0778.patch",
    "content": "diff --git a/crypto/bn/bn_sqrt.c b/crypto/bn/bn_sqrt.c\nindex 1723d5ded5..53b0f55985 100644\n--- a/crypto/bn/bn_sqrt.c\n+++ b/crypto/bn/bn_sqrt.c\n@@ -14,7 +14,8 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)\n /*\n  * Returns 'ret' such that ret^2 == a (mod p), using the Tonelli/Shanks\n  * algorithm (cf. Henri Cohen, \"A Course in Algebraic Computational Number\n- * Theory\", algorithm 1.5.1). 'p' must be prime!\n+ * Theory\", algorithm 1.5.1). 'p' must be prime, otherwise an error or\n+ * an incorrect \"result\" will be returned.\n  */\n {\n     BIGNUM *ret = in;\n@@ -301,18 +302,23 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)\n             goto vrfy;\n         }\n \n-        /* find smallest  i  such that  b^(2^i) = 1 */\n-        i = 1;\n-        if (!BN_mod_sqr(t, b, p, ctx))\n-            goto end;\n-        while (!BN_is_one(t)) {\n-            i++;\n-            if (i == e) {\n-                BNerr(BN_F_BN_MOD_SQRT, BN_R_NOT_A_SQUARE);\n-                goto end;\n+        /* Find the smallest i, 0 < i < e, such that b^(2^i) = 1. */\n+        for (i = 1; i < e; i++) {\n+            if (i == 1) {\n+                if (!BN_mod_sqr(t, b, p, ctx))\n+                    goto end;\n+\n+            } else {\n+                if (!BN_mod_mul(t, t, t, p, ctx))\n+                    goto end;\n             }\n-            if (!BN_mod_mul(t, t, t, p, ctx))\n-                goto end;\n+            if (BN_is_one(t))\n+                break;\n+        }\n+        /* If not found, a is not a square or p is not prime. */\n+        if (i >= e) {\n+            BNerr(BN_F_BN_MOD_SQRT, BN_R_NOT_A_SQUARE);\n+            goto end;\n         }\n \n         /* t := y^2^(e - i - 1) */"
  },
  {
    "path": "third_party/patch/openssl/CVE-2022-1292.patch",
    "content": "diff --git a/tools/c_rehash.in b/tools/c_rehash.in\nindex fa7c6c9fef..83c1cc80e0 100644\n--- a/tools/c_rehash.in\n+++ b/tools/c_rehash.in\n@@ -152,6 +152,23 @@ sub check_file {\n \treturn ($is_cert, $is_crl);\n }\n\n+sub compute_hash {\n+    my $fh;\n+    if ( $^O eq \"VMS\" ) {\n+        # VMS uses the open through shell\n+        # The file names are safe there and list form is unsupported\n+        if (!open($fh, \"-|\", join(' ', @_))) {\n+            print STDERR \"Cannot compute hash on '$fname'\\n\";\n+            return;\n+        }\n+    } else {\n+        if (!open($fh, \"-|\", @_)) {\n+            print STDERR \"Cannot compute hash on '$fname'\\n\";\n+            return;\n+        }\n+    }\n+    return (<$fh>, <$fh>);\n+}\n\n # Link a certificate to its subject name hash value, each hash is of\n # the form <hash>.<n> where n is an integer. If the hash value already exists\n@@ -161,10 +178,12 @@ sub check_file {\n\n sub link_hash_cert {\n \t\tmy $fname = $_[0];\n-\t\t$fname =~ s/\\\"/\\\\\\\"/g;\n-\t\tmy ($hash, $fprint) = `\"$openssl\" x509 $x509hash -fingerprint -noout -in \"$fname\"`;\n+\t\tmy ($hash, $fprint) = compute_hash($openssl, \"x509\", $x509hash,\n+\t\t\t\t\t\t   \"-fingerprint\", \"-noout\",\n+\t\t\t\t\t\t   \"-in\", $fname);\n \t\tchomp $hash;\n \t\tchomp $fprint;\n+\t\treturn if !$hash;\n \t\t$fprint =~ s/^.*=//;\n \t\t$fprint =~ tr/://d;\n \t\tmy $suffix = 0;\n@@ -202,10 +221,12 @@ sub link_hash_cert {\n\n sub link_hash_crl {\n \t\tmy $fname = $_[0];\n-\t\t$fname =~ s/'/'\\\\''/g;\n-\t\tmy ($hash, $fprint) = `\"$openssl\" crl $crlhash -fingerprint -noout -in '$fname'`;\n+\t\tmy ($hash, $fprint) = compute_hash($openssl, \"crl\", $crlhash,\n+\t\t\t\t\t\t   \"-fingerprint\", \"-noout\",\n+\t\t\t\t\t\t   \"-in\", $fname);\n \t\tchomp $hash;\n \t\tchomp $fprint;\n+\t\treturn if !$hash;\n \t\t$fprint =~ s/^.*=//;\n \t\t$fprint =~ tr/://d;\n \t\tmy $suffix = 0;"
  },
  {
    "path": "third_party/patch/openssl/CVE-2022-2068.patch",
    "content": "diff --git a/tools/c_rehash.in b/tools/c_rehash.in\nindex cfd18f5da1..9d2a6f6db7 100644\n--- a/tools/c_rehash.in\n+++ b/tools/c_rehash.in\n@@ -104,52 +104,78 @@ foreach (@dirlist) {\n }\n exit($errorcount);\n\n+sub copy_file {\n+    my ($src_fname, $dst_fname) = @_;\n+\n+    if (open(my $in, \"<\", $src_fname)) {\n+        if (open(my $out, \">\", $dst_fname)) {\n+            print $out $_ while (<$in>);\n+            close $out;\n+        } else {\n+            warn \"Cannot open $dst_fname for write, $!\";\n+        }\n+        close $in;\n+    } else {\n+        warn \"Cannot open $src_fname for read, $!\";\n+    }\n+}\n+\n sub hash_dir {\n-\tmy %hashlist;\n-\tprint \"Doing $_[0]\\n\";\n-\tchdir $_[0];\n-\topendir(DIR, \".\");\n-\tmy @flist = sort readdir(DIR);\n-\tclosedir DIR;\n-\tif ( $removelinks ) {\n-\t\t# Delete any existing symbolic links\n-\t\tforeach (grep {/^[\\da-f]+\\.r{0,1}\\d+$/} @flist) {\n-\t\t\tif (-l $_) {\n-\t\t\t\tprint \"unlink $_\" if $verbose;\n-\t\t\t\tunlink $_ || warn \"Can't unlink $_, $!\\n\";\n-\t\t\t}\n-\t\t}\n-\t}\n-\tFILE: foreach $fname (grep {/\\.(pem)|(crt)|(cer)|(crl)$/} @flist) {\n-\t\t# Check to see if certificates and/or CRLs present.\n-\t\tmy ($cert, $crl) = check_file($fname);\n-\t\tif (!$cert && !$crl) {\n-\t\t\tprint STDERR \"WARNING: $fname does not contain a certificate or CRL: skipping\\n\";\n-\t\t\tnext;\n-\t\t}\n-\t\tlink_hash_cert($fname) if ($cert);\n-\t\tlink_hash_crl($fname) if ($crl);\n-\t}\n+    my $dir = shift;\n+    my %hashlist;\n+\n+    print \"Doing $dir\\n\";\n+\n+    if (!chdir $dir) {\n+        print STDERR \"WARNING: Cannot chdir to '$dir', $!\\n\";\n+        return;\n+    }\n+\n+    opendir(DIR, \".\") || print STDERR \"WARNING: Cannot opendir '.', $!\\n\";\n+    my @flist = sort readdir(DIR);\n+    closedir DIR;\n+    if ( $removelinks ) {\n+        # Delete any existing symbolic links\n+        foreach (grep {/^[\\da-f]+\\.r{0,1}\\d+$/} @flist) {\n+            if (-l $_) {\n+                print \"unlink $_\\n\" if $verbose;\n+                unlink $_ || warn \"Can't unlink $_, $!\\n\";\n+            }\n+        }\n+    }\n+    FILE: foreach $fname (grep {/\\.(pem)|(crt)|(cer)|(crl)$/} @flist) {\n+        # Check to see if certificates and/or CRLs present.\n+        my ($cert, $crl) = check_file($fname);\n+        if (!$cert && !$crl) {\n+            print STDERR \"WARNING: $fname does not contain a certificate or CRL: skipping\\n\";\n+            next;\n+        }\n+        link_hash_cert($fname) if ($cert);\n+        link_hash_crl($fname) if ($crl);\n+    }\n+\n+    chdir $pwd;\n }\n\n sub check_file {\n-\tmy ($is_cert, $is_crl) = (0,0);\n-\tmy $fname = $_[0];\n-\topen IN, $fname;\n-\twhile(<IN>) {\n-\t\tif (/^-----BEGIN (.*)-----/) {\n-\t\t\tmy $hdr = $1;\n-\t\t\tif ($hdr =~ /^(X509 |TRUSTED |)CERTIFICATE$/) {\n-\t\t\t\t$is_cert = 1;\n-\t\t\t\tlast if ($is_crl);\n-\t\t\t} elsif ($hdr eq \"X509 CRL\") {\n-\t\t\t\t$is_crl = 1;\n-\t\t\t\tlast if ($is_cert);\n-\t\t\t}\n-\t\t}\n-\t}\n-\tclose IN;\n-\treturn ($is_cert, $is_crl);\n+    my ($is_cert, $is_crl) = (0,0);\n+    my $fname = $_[0];\n+\n+    open(my $in, \"<\", $fname);\n+    while(<$in>) {\n+        if (/^-----BEGIN (.*)-----/) {\n+            my $hdr = $1;\n+            if ($hdr =~ /^(X509 |TRUSTED |)CERTIFICATE$/) {\n+                $is_cert = 1;\n+                last if ($is_crl);\n+            } elsif ($hdr eq \"X509 CRL\") {\n+                $is_crl = 1;\n+                last if ($is_cert);\n+            }\n+        }\n+    }\n+    close $in;\n+    return ($is_cert, $is_crl);\n }\n\n sub compute_hash {\n@@ -177,76 +203,48 @@ sub compute_hash {\n # certificate fingerprints\n\n sub link_hash_cert {\n-\t\tmy $fname = $_[0];\n-\t\tmy ($hash, $fprint) = compute_hash($openssl, \"x509\", $x509hash,\n-\t\t\t\t\t\t   \"-fingerprint\", \"-noout\",\n-\t\t\t\t\t\t   \"-in\", $fname);\n-\t\tchomp $hash;\n-\t\tchomp $fprint;\n-\t\treturn if !$hash;\n-\t\t$fprint =~ s/^.*=//;\n-\t\t$fprint =~ tr/://d;\n-\t\tmy $suffix = 0;\n-\t\t# Search for an unused hash filename\n-\t\twhile(exists $hashlist{\"$hash.$suffix\"}) {\n-\t\t\t# Hash matches: if fingerprint matches its a duplicate cert\n-\t\t\tif ($hashlist{\"$hash.$suffix\"} eq $fprint) {\n-\t\t\t\tprint STDERR \"WARNING: Skipping duplicate certificate $fname\\n\";\n-\t\t\t\treturn;\n-\t\t\t}\n-\t\t\t$suffix++;\n-\t\t}\n-\t\t$hash .= \".$suffix\";\n-\t\tif ($symlink_exists) {\n-\t\t\tprint \"link $fname -> $hash\\n\" if $verbose;\n-\t\t\tsymlink $fname, $hash || warn \"Can't symlink, $!\";\n-\t\t} else {\n-\t\t\tprint \"copy $fname -> $hash\\n\" if $verbose;\n-                        if (open($in, \"<\", $fname)) {\n-                            if (open($out,\">\", $hash)) {\n-                                print $out $_ while (<$in>);\n-                                close $out;\n-                            } else {\n-                                warn \"can't open $hash for write, $!\";\n-                            }\n-                            close $in;\n-                        } else {\n-                            warn \"can't open $fname for read, $!\";\n-                        }\n-\t\t}\n-\t\t$hashlist{$hash} = $fprint;\n+    link_hash($_[0], 'cert');\n }\n\n # Same as above except for a CRL. CRL links are of the form <hash>.r<n>\n\n sub link_hash_crl {\n-\t\tmy $fname = $_[0];\n-\t\tmy ($hash, $fprint) = compute_hash($openssl, \"crl\", $crlhash,\n-\t\t\t\t\t\t   \"-fingerprint\", \"-noout\",\n-\t\t\t\t\t\t   \"-in\", $fname);\n-\t\tchomp $hash;\n-\t\tchomp $fprint;\n-\t\treturn if !$hash;\n-\t\t$fprint =~ s/^.*=//;\n-\t\t$fprint =~ tr/://d;\n-\t\tmy $suffix = 0;\n-\t\t# Search for an unused hash filename\n-\t\twhile(exists $hashlist{\"$hash.r$suffix\"}) {\n-\t\t\t# Hash matches: if fingerprint matches its a duplicate cert\n-\t\t\tif ($hashlist{\"$hash.r$suffix\"} eq $fprint) {\n-\t\t\t\tprint STDERR \"WARNING: Skipping duplicate CRL $fname\\n\";\n-\t\t\t\treturn;\n-\t\t\t}\n-\t\t\t$suffix++;\n-\t\t}\n-\t\t$hash .= \".r$suffix\";\n-\t\tif ($symlink_exists) {\n-\t\t\tprint \"link $fname -> $hash\\n\" if $verbose;\n-\t\t\tsymlink $fname, $hash || warn \"Can't symlink, $!\";\n-\t\t} else {\n-\t\t\tprint \"cp $fname -> $hash\\n\" if $verbose;\n-\t\t\tsystem (\"cp\", $fname, $hash);\n-                        warn \"Can't copy, $!\" if ($? >> 8) != 0;\n-\t\t}\n-\t\t$hashlist{$hash} = $fprint;\n+    link_hash($_[0], 'crl');\n+}\n+\n+sub link_hash {\n+    my ($fname, $type) = @_;\n+    my $is_cert = $type eq 'cert';\n+\n+    my ($hash, $fprint) = compute_hash($openssl,\n+                                       $is_cert ? \"x509\" : \"crl\",\n+                                       $is_cert ? $x509hash : $crlhash,\n+                                       \"-fingerprint\", \"-noout\",\n+                                       \"-in\", $fname);\n+    chomp $hash;\n+    chomp $fprint;\n+    return if !$hash;\n+    $fprint =~ s/^.*=//;\n+    $fprint =~ tr/://d;\n+    my $suffix = 0;\n+    # Search for an unused hash filename\n+    my $crlmark = $is_cert ? \"\" : \"r\";\n+    while(exists $hashlist{\"$hash.$crlmark$suffix\"}) {\n+        # Hash matches: if fingerprint matches its a duplicate cert\n+        if ($hashlist{\"$hash.$crlmark$suffix\"} eq $fprint) {\n+            my $what = $is_cert ? 'certificate' : 'CRL';\n+            print STDERR \"WARNING: Skipping duplicate $what $fname\\n\";\n+            return;\n+        }\n+        $suffix++;\n+    }\n+    $hash .= \".$crlmark$suffix\";\n+    if ($symlink_exists) {\n+        print \"link $fname -> $hash\\n\" if $verbose;\n+        symlink $fname, $hash || warn \"Can't symlink, $!\";\n+    } else {\n+        print \"copy $fname -> $hash\\n\" if $verbose;\n+        copy_file($fname, $hash);\n+    }\n+    $hashlist{$hash} = $fprint;\n }"
  },
  {
    "path": "third_party/patch/openssl/CVE-2022-2097.patch",
    "content": "diff --git a/crypto/aes/asm/aesni-x86.pl b/crypto/aes/asm/aesni-x86.pl\nindex fe2b26542a..812758e02e 100644\n--- a/crypto/aes/asm/aesni-x86.pl\n+++ b/crypto/aes/asm/aesni-x86.pl\n@@ -2027,7 +2027,7 @@ my ($l_,$block,$i1,$i3,$i5) = ($rounds_,$key_,$rounds,$len,$out);\n \t&movdqu\t\t(&QWP(-16*2,$out,$inp),$inout4);\n \t&movdqu\t\t(&QWP(-16*1,$out,$inp),$inout5);\n \t&cmp\t\t($inp,$len);\t\t\t# done yet?\n-\t&jb\t\t(&label(\"grandloop\"));\n+\t&jbe\t\t(&label(\"grandloop\"));\n \n &set_label(\"short\");\n \t&add\t\t($len,16*6);\n@@ -2453,7 +2453,7 @@ my ($l_,$block,$i1,$i3,$i5) = ($rounds_,$key_,$rounds,$len,$out);\n \t&pxor\t\t($rndkey1,$inout5);\n \t&movdqu\t\t(&QWP(-16*1,$out,$inp),$inout5);\n \t&cmp\t\t($inp,$len);\t\t\t# done yet?\n-\t&jb\t\t(&label(\"grandloop\"));\n+\t&jbe\t\t(&label(\"grandloop\"));\n \n &set_label(\"short\");\n \t&add\t\t($len,16*6);"
  },
  {
    "path": "third_party/patch/openssl/CVE-2022-4304.patch",
    "content": "diff --git a/crypto/bn/bn_blind.c b/crypto/bn/bn_blind.c\nindex 76fc7ebcff..6e9d239321 100644\n--- a/crypto/bn/bn_blind.c\n+++ b/crypto/bn/bn_blind.c\n@@ -13,20 +13,6 @@\n\n #define BN_BLINDING_COUNTER     32\n\n-struct bn_blinding_st {\n-    BIGNUM *A;\n-    BIGNUM *Ai;\n-    BIGNUM *e;\n-    BIGNUM *mod;                /* just a reference */\n-    CRYPTO_THREAD_ID tid;\n-    int counter;\n-    unsigned long flags;\n-    BN_MONT_CTX *m_ctx;\n-    int (*bn_mod_exp) (BIGNUM *r, const BIGNUM *a, const BIGNUM *p,\n-                       const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);\n-    CRYPTO_RWLOCK *lock;\n-};\n-\n BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod)\n {\n     BN_BLINDING *ret = NULL;\ndiff --git a/crypto/bn/bn_err.c b/crypto/bn/bn_err.c\nindex dd87c152cf..3dd8d9a568 100644\n--- a/crypto/bn/bn_err.c\n+++ b/crypto/bn/bn_err.c\n@@ -73,6 +73,8 @@ static const ERR_STRING_DATA BN_str_functs[] = {\n     {ERR_PACK(ERR_LIB_BN, BN_F_BN_SET_WORDS, 0), \"bn_set_words\"},\n     {ERR_PACK(ERR_LIB_BN, BN_F_BN_STACK_PUSH, 0), \"BN_STACK_push\"},\n     {ERR_PACK(ERR_LIB_BN, BN_F_BN_USUB, 0), \"BN_usub\"},\n+    {ERR_PACK(ERR_LIB_BN, BN_F_OSSL_BN_RSA_DO_UNBLIND, 0),\n+    \"ossl_bn_rsa_do_unblind\"},\n     {0, NULL}\n };\n\ndiff --git a/crypto/bn/bn_local.h b/crypto/bn/bn_local.h\nindex 62a969b134..4d8cb64675 100644\n--- a/crypto/bn/bn_local.h\n+++ b/crypto/bn/bn_local.h\n@@ -283,6 +283,20 @@ struct bn_gencb_st {\n     } cb;\n };\n\n+struct bn_blinding_st {\n+    BIGNUM *A;\n+    BIGNUM *Ai;\n+    BIGNUM *e;\n+    BIGNUM *mod;                /* just a reference */\n+    CRYPTO_THREAD_ID tid;\n+    int counter;\n+    unsigned long flags;\n+    BN_MONT_CTX *m_ctx;\n+    int (*bn_mod_exp) (BIGNUM *r, const BIGNUM *a, const BIGNUM *p,\n+                       const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);\n+    CRYPTO_RWLOCK *lock;\n+};\n+\n /*-\n  * BN_window_bits_for_exponent_size -- macro for sliding window mod_exp functions\n  *\ndiff --git a/crypto/bn/build.info b/crypto/bn/build.info\nindex b9ed5322fa..c9fe2fdada 100644\n--- a/crypto/bn/build.info\n+++ b/crypto/bn/build.info\n@@ -5,7 +5,8 @@ SOURCE[../../libcrypto]=\\\n         bn_kron.c bn_sqrt.c bn_gcd.c bn_prime.c bn_err.c bn_sqr.c \\\n         {- $target{bn_asm_src} -} \\\n         bn_recp.c bn_mont.c bn_mpi.c bn_exp2.c bn_gf2m.c bn_nist.c \\\n-        bn_depr.c bn_const.c bn_x931p.c bn_intern.c bn_dh.c bn_srp.c\n+        bn_depr.c bn_const.c bn_x931p.c bn_intern.c bn_dh.c bn_srp.c \\\n+        rsa_sup_mul.c\n\n INCLUDE[bn_exp.o]=..\n\ndiff --git a/crypto/bn/rsa_sup_mul.c b/crypto/bn/rsa_sup_mul.c\nnew file mode 100644\nindex 0000000000..acafefd5fe\n--- /dev/null\n+++ b/crypto/bn/rsa_sup_mul.c\n@@ -0,0 +1,614 @@\n+#include <openssl/e_os2.h>\n+#include <stddef.h>\n+#include <sys/types.h>\n+#include <string.h>\n+#include <openssl/bn.h>\n+#include <openssl/err.h>\n+#include <openssl/rsaerr.h>\n+#include \"internal/numbers.h\"\n+#include \"internal/constant_time.h\"\n+#include \"bn_local.h\"\n+\n+# if BN_BYTES == 8\n+typedef uint64_t limb_t;\n+#  if defined(__SIZEOF_INT128__) && __SIZEOF_INT128__ == 16\n+/* nonstandard; implemented by gcc on 64-bit platforms */\n+typedef __uint128_t limb2_t;\n+#   define HAVE_LIMB2_T\n+#  endif\n+#  define LIMB_BIT_SIZE 64\n+#  define LIMB_BYTE_SIZE 8\n+# elif BN_BYTES == 4\n+typedef uint32_t limb_t;\n+typedef uint64_t limb2_t;\n+#  define LIMB_BIT_SIZE 32\n+#  define LIMB_BYTE_SIZE 4\n+#  define HAVE_LIMB2_T\n+# else\n+#  error \"Not supported\"\n+# endif\n+\n+/*\n+ * For multiplication we're using schoolbook multiplication,\n+ * so if we have two numbers, each with 6 \"digits\" (words)\n+ * the multiplication is calculated as follows:\n+ *                        A B C D E F\n+ *                     x  I J K L M N\n+ *                     --------------\n+ *                                N*F\n+ *                              N*E\n+ *                            N*D\n+ *                          N*C\n+ *                        N*B\n+ *                      N*A\n+ *                              M*F\n+ *                            M*E\n+ *                          M*D\n+ *                        M*C\n+ *                      M*B\n+ *                    M*A\n+ *                            L*F\n+ *                          L*E\n+ *                        L*D\n+ *                      L*C\n+ *                    L*B\n+ *                  L*A\n+ *                          K*F\n+ *                        K*E\n+ *                      K*D\n+ *                    K*C\n+ *                  K*B\n+ *                K*A\n+ *                        J*F\n+ *                      J*E\n+ *                    J*D\n+ *                  J*C\n+ *                J*B\n+ *              J*A\n+ *                      I*F\n+ *                    I*E\n+ *                  I*D\n+ *                I*C\n+ *              I*B\n+ *         +  I*A\n+ *         ==========================\n+ *                        N*B N*D N*F\n+ *                    + N*A N*C N*E\n+ *                    + M*B M*D M*F\n+ *                  + M*A M*C M*E\n+ *                  + L*B L*D L*F\n+ *                + L*A L*C L*E\n+ *                + K*B K*D K*F\n+ *              + K*A K*C K*E\n+ *              + J*B J*D J*F\n+ *            + J*A J*C J*E\n+ *            + I*B I*D I*F\n+ *          + I*A I*C I*E\n+ *\n+ *                1+1 1+3 1+5\n+ *              1+0 1+2 1+4\n+ *              0+1 0+3 0+5\n+ *            0+0 0+2 0+4\n+ *\n+ *            0 1 2 3 4 5 6\n+ * which requires n^2 multiplications and 2n full length additions\n+ * as we can keep every other result of limb multiplication in two separate\n+ * limbs\n+ */\n+\n+#if defined HAVE_LIMB2_T\n+static ossl_inline void _mul_limb(limb_t *hi, limb_t *lo, limb_t a, limb_t b)\n+{\n+    limb2_t t;\n+    /*\n+     * this is idiomatic code to tell compiler to use the native mul\n+     * those three lines will actually compile to single instruction\n+     */\n+\n+    t = (limb2_t)a * b;\n+    *hi = t >> LIMB_BIT_SIZE;\n+    *lo = (limb_t)t;\n+}\n+#elif (BN_BYTES == 8) && (defined _MSC_VER)\n+/* https://learn.microsoft.com/en-us/cpp/intrinsics/umul128?view=msvc-170 */\n+#pragma intrinsic(_umul128)\n+static ossl_inline void _mul_limb(limb_t *hi, limb_t *lo, limb_t a, limb_t b)\n+{\n+    *lo = _umul128(a, b, hi);\n+}\n+#else\n+/*\n+ * if the compiler doesn't have either a 128bit data type nor a \"return\n+ * high 64 bits of multiplication\"\n+ */\n+static ossl_inline void _mul_limb(limb_t *hi, limb_t *lo, limb_t a, limb_t b)\n+{\n+    limb_t a_low = (limb_t)(uint32_t)a;\n+    limb_t a_hi = a >> 32;\n+    limb_t b_low = (limb_t)(uint32_t)b;\n+    limb_t b_hi = b >> 32;\n+\n+    limb_t p0 = a_low * b_low;\n+    limb_t p1 = a_low * b_hi;\n+    limb_t p2 = a_hi * b_low;\n+    limb_t p3 = a_hi * b_hi;\n+\n+    uint32_t cy = (uint32_t)(((p0 >> 32) + (uint32_t)p1 + (uint32_t)p2) >> 32);\n+\n+    *lo = p0 + (p1 << 32) + (p2 << 32);\n+    *hi = p3 + (p1 >> 32) + (p2 >> 32) + cy;\n+}\n+#endif\n+\n+/* add two limbs with carry in, return carry out */\n+static ossl_inline limb_t _add_limb(limb_t *ret, limb_t a, limb_t b, limb_t carry)\n+{\n+    limb_t carry1, carry2, t;\n+    /*\n+     * `c = a + b; if (c < a)` is idiomatic code that makes compilers\n+     * use add with carry on assembly level\n+     */\n+\n+    *ret = a + carry;\n+    if (*ret < a)\n+        carry1 = 1;\n+    else\n+        carry1 = 0;\n+\n+    t = *ret;\n+    *ret = t + b;\n+    if (*ret < t)\n+        carry2 = 1;\n+    else\n+        carry2 = 0;\n+\n+    return carry1 + carry2;\n+}\n+\n+/*\n+ * add two numbers of the same size, return overflow\n+ *\n+ * add a to b, place result in ret; all arrays need to be n limbs long\n+ * return overflow from addition (0 or 1)\n+ */\n+static ossl_inline limb_t add(limb_t *ret, limb_t *a, limb_t *b, size_t n)\n+{\n+    limb_t c = 0;\n+    ossl_ssize_t i;\n+\n+    for(i = n - 1; i > -1; i--)\n+        c = _add_limb(&ret[i], a[i], b[i], c);\n+\n+    return c;\n+}\n+\n+/*\n+ * return number of limbs necessary for temporary values\n+ * when multiplying numbers n limbs large\n+ */\n+static ossl_inline size_t mul_limb_numb(size_t n)\n+{\n+    return  2 * n * 2;\n+}\n+\n+/*\n+ * multiply two numbers of the same size\n+ *\n+ * multiply a by b, place result in ret; a and b need to be n limbs long\n+ * ret needs to be 2*n limbs long, tmp needs to be mul_limb_numb(n) limbs\n+ * long\n+ */\n+static void limb_mul(limb_t *ret, limb_t *a, limb_t *b, size_t n, limb_t *tmp)\n+{\n+    limb_t *r_odd, *r_even;\n+    size_t i, j, k;\n+\n+    r_odd = tmp;\n+    r_even = &tmp[2 * n];\n+\n+    memset(ret, 0, 2 * n * sizeof(limb_t));\n+\n+    for (i = 0; i < n; i++) {\n+        for (k = 0; k < i + n + 1; k++) {\n+            r_even[k] = 0;\n+            r_odd[k] = 0;\n+        }\n+        for (j = 0; j < n; j++) {\n+            /*\n+             * place results from even and odd limbs in separate arrays so that\n+             * we don't have to calculate overflow every time we get individual\n+             * limb multiplication result\n+             */\n+            if (j % 2 == 0)\n+                _mul_limb(&r_even[i + j], &r_even[i + j + 1], a[i], b[j]);\n+            else\n+                _mul_limb(&r_odd[i + j], &r_odd[i + j + 1], a[i], b[j]);\n+        }\n+        /*\n+         * skip the least significant limbs when adding multiples of\n+         * more significant limbs (they're zero anyway)\n+         */\n+        add(ret, ret, r_even, n + i + 1);\n+        add(ret, ret, r_odd, n + i + 1);\n+    }\n+}\n+\n+/* modifies the value in place by performing a right shift by one bit */\n+static ossl_inline void rshift1(limb_t *val, size_t n)\n+{\n+    limb_t shift_in = 0, shift_out = 0;\n+    size_t i;\n+\n+    for (i = 0; i < n; i++) {\n+        shift_out = val[i] & 1;\n+        val[i] = shift_in << (LIMB_BIT_SIZE - 1) | (val[i] >> 1);\n+        shift_in = shift_out;\n+    }\n+}\n+\n+/* extend the LSB of flag to all bits of limb */\n+static ossl_inline limb_t mk_mask(limb_t flag)\n+{\n+    flag |= flag << 1;\n+    flag |= flag << 2;\n+    flag |= flag << 4;\n+    flag |= flag << 8;\n+    flag |= flag << 16;\n+#if (LIMB_BYTE_SIZE == 8)\n+    flag |= flag << 32;\n+#endif\n+    return flag;\n+}\n+\n+/*\n+ * copy from either a or b to ret based on flag\n+ * when flag == 0, then copies from b\n+ * when flag == 1, then copies from a\n+ */\n+static ossl_inline void cselect(limb_t flag, limb_t *ret, limb_t *a, limb_t *b, size_t n)\n+{\n+    /*\n+     * would be more efficient with non volatile mask, but then gcc\n+     * generates code with jumps\n+     */\n+    volatile limb_t mask;\n+    size_t i;\n+\n+    mask = mk_mask(flag);\n+    for (i = 0; i < n; i++) {\n+#if (LIMB_BYTE_SIZE == 8)\n+        ret[i] = constant_time_select_64(mask, a[i], b[i]);\n+#else\n+        ret[i] = constant_time_select_32(mask, a[i], b[i]);\n+#endif\n+    }\n+}\n+\n+static limb_t _sub_limb(limb_t *ret, limb_t a, limb_t b, limb_t borrow)\n+{\n+    limb_t borrow1, borrow2, t;\n+    /*\n+     * while it doesn't look constant-time, this is idiomatic code\n+     * to tell compilers to use the carry bit from subtraction\n+     */\n+\n+    *ret = a - borrow;\n+    if (*ret > a)\n+        borrow1 = 1;\n+    else\n+        borrow1 = 0;\n+\n+    t = *ret;\n+    *ret = t - b;\n+    if (*ret > t)\n+        borrow2 = 1;\n+    else\n+        borrow2 = 0;\n+\n+    return borrow1 + borrow2;\n+}\n+\n+/*\n+ * place the result of a - b into ret, return the borrow bit.\n+ * All arrays need to be n limbs long\n+ */\n+static limb_t sub(limb_t *ret, limb_t *a, limb_t *b, size_t n)\n+{\n+    limb_t borrow = 0;\n+    ossl_ssize_t i;\n+\n+    for (i = n - 1; i > -1; i--)\n+        borrow = _sub_limb(&ret[i], a[i], b[i], borrow);\n+\n+    return borrow;\n+}\n+\n+/* return the number of limbs necessary to allocate for the mod() tmp operand */\n+static ossl_inline size_t mod_limb_numb(size_t anum, size_t modnum)\n+{\n+    return (anum + modnum) * 3;\n+}\n+\n+/*\n+ * calculate a % mod, place the result in ret\n+ * size of a is defined by anum, size of ret and mod is modnum,\n+ * size of tmp is returned by mod_limb_numb()\n+ */\n+static void mod(limb_t *ret, limb_t *a, size_t anum, limb_t *mod,\n+               size_t modnum, limb_t *tmp)\n+{\n+    limb_t *atmp, *modtmp, *rettmp;\n+    limb_t res;\n+    size_t i;\n+\n+    memset(tmp, 0, mod_limb_numb(anum, modnum) * LIMB_BYTE_SIZE);\n+\n+    atmp = tmp;\n+    modtmp = &tmp[anum + modnum];\n+    rettmp = &tmp[(anum + modnum) * 2];\n+\n+    for (i = modnum; i <modnum + anum; i++)\n+        atmp[i] = a[i-modnum];\n+\n+    for (i = 0; i < modnum; i++)\n+        modtmp[i] = mod[i];\n+\n+    for (i = 0; i < anum * LIMB_BIT_SIZE; i++) {\n+        rshift1(modtmp, anum + modnum);\n+        res = sub(rettmp, atmp, modtmp, anum+modnum);\n+        cselect(res, atmp, atmp, rettmp, anum+modnum);\n+    }\n+\n+    memcpy(ret, &atmp[anum], sizeof(limb_t) * modnum);\n+}\n+\n+/* necessary size of tmp for a _mul_add_limb() call with provided anum */\n+static ossl_inline size_t _mul_add_limb_numb(size_t anum)\n+{\n+    return 2 * (anum + 1);\n+}\n+\n+/* multiply a by m, add to ret, return carry */\n+static limb_t _mul_add_limb(limb_t *ret, limb_t *a, size_t anum,\n+                           limb_t m, limb_t *tmp)\n+{\n+    limb_t carry = 0;\n+    limb_t *r_odd, *r_even;\n+    size_t i;\n+\n+    memset(tmp, 0, sizeof(limb_t) * (anum + 1) * 2);\n+\n+    r_odd = tmp;\n+    r_even = &tmp[anum + 1];\n+\n+    for (i = 0; i < anum; i++) {\n+        /*\n+         * place the results from even and odd limbs in separate arrays\n+         * so that we have to worry about carry just once\n+         */\n+        if (i % 2 == 0)\n+            _mul_limb(&r_even[i], &r_even[i + 1], a[i], m);\n+        else\n+            _mul_limb(&r_odd[i], &r_odd[i + 1], a[i], m);\n+    }\n+    /* assert: add() carry here will be equal zero */\n+    add(r_even, r_even, r_odd, anum + 1);\n+    /*\n+     * while here it will not overflow as the max value from multiplication\n+     * is -2 while max overflow from addition is 1, so the max value of\n+     * carry is -1 (i.e. max int)\n+     */\n+    carry = add(ret, ret, &r_even[1], anum) + r_even[0];\n+\n+    return carry;\n+}\n+\n+static ossl_inline size_t mod_montgomery_limb_numb(size_t modnum)\n+{\n+    return modnum * 2 + _mul_add_limb_numb(modnum);\n+}\n+\n+/*\n+ * calculate a % mod, place result in ret\n+ * assumes that a is in Montgomery form with the R (Montgomery modulus) being\n+ * smallest power of two big enough to fit mod and that's also a power\n+ * of the count of number of bits in limb_t (B).\n+ * For calculation, we also need n', such that mod * n' == -1 mod B.\n+ * anum must be <= 2 * modnum\n+ * ret needs to be modnum words long\n+ * tmp needs to be mod_montgomery_limb_numb(modnum) limbs long\n+ */\n+static void mod_montgomery(limb_t *ret, limb_t *a, size_t anum, limb_t *mod,\n+                          size_t modnum, limb_t ni0, limb_t *tmp)\n+{\n+    limb_t carry, v;\n+    limb_t *res, *rp, *tmp2;\n+    ossl_ssize_t i;\n+\n+    res = tmp;\n+    /*\n+     * for intermediate result we need an integer twice as long as modulus\n+     * but keep the input in the least significant limbs\n+     */\n+    memset(res, 0, sizeof(limb_t) * (modnum * 2));\n+    memcpy(&res[modnum * 2 - anum], a, sizeof(limb_t) * anum);\n+    rp = &res[modnum];\n+    tmp2 = &res[modnum * 2];\n+\n+    carry = 0;\n+\n+    /* add multiples of the modulus to the value until R divides it cleanly */\n+    for (i = modnum; i > 0; i--, rp--) {\n+        v = _mul_add_limb(rp, mod, modnum, rp[modnum - 1] * ni0, tmp2);\n+        v = v + carry + rp[-1];\n+        carry |= (v != rp[-1]);\n+        carry &= (v <= rp[-1]);\n+        rp[-1] = v;\n+    }\n+\n+    /* perform the final reduction by mod... */\n+    carry -= sub(ret, rp, mod, modnum);\n+\n+    /* ...conditionally */\n+    cselect(carry, ret, rp, ret, modnum);\n+}\n+\n+/* allocated buffer should be freed afterwards */\n+static void BN_to_limb(const BIGNUM *bn, limb_t *buf, size_t limbs)\n+{\n+    int i;\n+    int real_limbs = (BN_num_bytes(bn) + LIMB_BYTE_SIZE - 1) / LIMB_BYTE_SIZE;\n+    limb_t *ptr = buf + (limbs - real_limbs);\n+\n+    for (i = 0; i < real_limbs; i++)\n+         ptr[i] = bn->d[real_limbs - i - 1];\n+}\n+\n+#if LIMB_BYTE_SIZE == 8\n+static ossl_inline uint64_t be64(uint64_t host)\n+{\n+    const union {\n+        long one;\n+        char little;\n+    } is_endian = { 1 };\n+\n+    if (is_endian.little) {\n+        uint64_t big = 0;\n+\n+        big |= (host & 0xff00000000000000) >> 56;\n+        big |= (host & 0x00ff000000000000) >> 40;\n+        big |= (host & 0x0000ff0000000000) >> 24;\n+        big |= (host & 0x000000ff00000000) >>  8;\n+        big |= (host & 0x00000000ff000000) <<  8;\n+        big |= (host & 0x0000000000ff0000) << 24;\n+        big |= (host & 0x000000000000ff00) << 40;\n+        big |= (host & 0x00000000000000ff) << 56;\n+        return big;\n+    } else {\n+        return host;\n+    }\n+}\n+\n+#else\n+/* Not all platforms have htobe32(). */\n+static ossl_inline uint32_t be32(uint32_t host)\n+{\n+    const union {\n+        long one;\n+        char little;\n+    } is_endian = { 1 };\n+\n+    if (is_endian.little) {\n+        uint32_t big = 0;\n+\n+        big |= (host & 0xff000000) >> 24;\n+        big |= (host & 0x00ff0000) >> 8;\n+        big |= (host & 0x0000ff00) << 8;\n+        big |= (host & 0x000000ff) << 24;\n+        return big;\n+    } else {\n+        return host;\n+    }\n+}\n+#endif\n+\n+/*\n+ * We assume that intermediate, possible_arg2, blinding, and ctx are used\n+ * similar to BN_BLINDING_invert_ex() arguments.\n+ * to_mod is RSA modulus.\n+ * buf and num is the serialization buffer and its length.\n+ *\n+ * Here we use classic/Montgomery multiplication and modulo. After the calculation finished\n+ * we serialize the new structure instead of BIGNUMs taking endianness into account.\n+ */\n+int ossl_bn_rsa_do_unblind(const BIGNUM *intermediate,\n+                           const BN_BLINDING *blinding,\n+                           const BIGNUM *possible_arg2,\n+                           const BIGNUM *to_mod, BN_CTX *ctx,\n+                           unsigned char *buf, int num)\n+{\n+    limb_t *l_im = NULL, *l_mul = NULL, *l_mod = NULL;\n+    limb_t *l_ret = NULL, *l_tmp = NULL, l_buf;\n+    size_t l_im_count = 0, l_mul_count = 0, l_size = 0, l_mod_count = 0;\n+    size_t l_tmp_count = 0;\n+    int ret = 0;\n+    size_t i;\n+    unsigned char *tmp;\n+    const BIGNUM *arg1 = intermediate;\n+    const BIGNUM *arg2 = (possible_arg2 == NULL) ? blinding->Ai : possible_arg2;\n+\n+    l_im_count  = (BN_num_bytes(arg1)   + LIMB_BYTE_SIZE - 1) / LIMB_BYTE_SIZE;\n+    l_mul_count = (BN_num_bytes(arg2)   + LIMB_BYTE_SIZE - 1) / LIMB_BYTE_SIZE;\n+    l_mod_count = (BN_num_bytes(to_mod) + LIMB_BYTE_SIZE - 1) / LIMB_BYTE_SIZE;\n+\n+    l_size = l_im_count > l_mul_count ? l_im_count : l_mul_count;\n+    l_im  = OPENSSL_zalloc(l_size * LIMB_BYTE_SIZE);\n+    l_mul = OPENSSL_zalloc(l_size * LIMB_BYTE_SIZE);\n+    l_mod = OPENSSL_zalloc(l_mod_count * LIMB_BYTE_SIZE);\n+\n+    if ((l_im == NULL) || (l_mul == NULL) || (l_mod == NULL))\n+        goto err;\n+\n+    BN_to_limb(arg1,   l_im,  l_size);\n+    BN_to_limb(arg2,   l_mul, l_size);\n+    BN_to_limb(to_mod, l_mod, l_mod_count);\n+\n+    l_ret = OPENSSL_malloc(2 * l_size * LIMB_BYTE_SIZE);\n+\n+    if (blinding->m_ctx != NULL) {\n+        l_tmp_count = mul_limb_numb(l_size) > mod_montgomery_limb_numb(l_mod_count) ?\n+                      mul_limb_numb(l_size) : mod_montgomery_limb_numb(l_mod_count);\n+        l_tmp = OPENSSL_malloc(l_tmp_count * LIMB_BYTE_SIZE);\n+    } else {\n+        l_tmp_count = mul_limb_numb(l_size) > mod_limb_numb(2 * l_size, l_mod_count) ?\n+                      mul_limb_numb(l_size) : mod_limb_numb(2 * l_size, l_mod_count);\n+        l_tmp = OPENSSL_malloc(l_tmp_count * LIMB_BYTE_SIZE);\n+    }\n+\n+    if ((l_ret == NULL) || (l_tmp == NULL))\n+        goto err;\n+\n+    if (blinding->m_ctx != NULL) {\n+        limb_mul(l_ret, l_im, l_mul, l_size, l_tmp);\n+        mod_montgomery(l_ret, l_ret, 2 * l_size, l_mod, l_mod_count,\n+                       blinding->m_ctx->n0[0], l_tmp);\n+    } else {\n+        limb_mul(l_ret, l_im, l_mul, l_size, l_tmp);\n+        mod(l_ret, l_ret, 2 * l_size, l_mod, l_mod_count, l_tmp);\n+    }\n+\n+    /* modulus size in bytes can be equal to num but after limbs conversion it becomes bigger */\n+    if (num < BN_num_bytes(to_mod)) {\n+        BNerr(BN_F_OSSL_BN_RSA_DO_UNBLIND, ERR_R_PASSED_INVALID_ARGUMENT);\n+        goto err;\n+    }\n+\n+    memset(buf, 0, num);\n+    tmp = buf + num - BN_num_bytes(to_mod);\n+    for (i = 0; i < l_mod_count; i++) {\n+#if LIMB_BYTE_SIZE == 8\n+        l_buf = be64(l_ret[i]);\n+#else\n+        l_buf = be32(l_ret[i]);\n+#endif\n+        if (i == 0) {\n+            int delta = LIMB_BYTE_SIZE - ((l_mod_count * LIMB_BYTE_SIZE) - num);\n+\n+            memcpy(tmp, ((char *)&l_buf) + LIMB_BYTE_SIZE - delta, delta);\n+            tmp += delta;\n+        } else {\n+            memcpy(tmp, &l_buf, LIMB_BYTE_SIZE);\n+            tmp += LIMB_BYTE_SIZE;\n+        }\n+    }\n+    ret = num;\n+\n+ err:\n+    OPENSSL_free(l_im);\n+    OPENSSL_free(l_mul);\n+    OPENSSL_free(l_mod);\n+    OPENSSL_free(l_tmp);\n+    OPENSSL_free(l_ret);\n+\n+    return ret;\n+}\ndiff --git a/crypto/err/openssl.txt b/crypto/err/openssl.txt\nindex 9f91a4a811..ba3a46d5b9 100644\n--- a/crypto/err/openssl.txt\n+++ b/crypto/err/openssl.txt\n@@ -1,4 +1,4 @@\n-# Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.\n+# Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.\n #\n # Licensed under the OpenSSL license (the \"License\").  You may not use\n # this file except in compliance with the License.  You can obtain a copy\n@@ -232,6 +232,7 @@ BN_F_BN_RSHIFT:146:BN_rshift\n BN_F_BN_SET_WORDS:144:bn_set_words\n BN_F_BN_STACK_PUSH:148:BN_STACK_push\n BN_F_BN_USUB:115:BN_usub\n+BN_F_OSSL_BN_RSA_DO_UNBLIND:151:ossl_bn_rsa_do_unblind\n BUF_F_BUF_MEM_GROW:100:BUF_MEM_grow\n BUF_F_BUF_MEM_GROW_CLEAN:105:BUF_MEM_grow_clean\n BUF_F_BUF_MEM_NEW:101:BUF_MEM_new\ndiff --git a/crypto/rsa/rsa_ossl.c b/crypto/rsa/rsa_ossl.c\nindex b52a66f6a6..6c3c0cf78d 100644\n--- a/crypto/rsa/rsa_ossl.c\n+++ b/crypto/rsa/rsa_ossl.c\n@@ -465,11 +465,20 @@ static int rsa_ossl_private_decrypt(int flen, const unsigned char *from,\n         BN_free(d);\n     }\n\n-    if (blinding)\n-        if (!rsa_blinding_invert(blinding, ret, unblind, ctx))\n+    if (blinding) {\n+        /*\n+         * ossl_bn_rsa_do_unblind() combines blinding inversion and\n+         * 0-padded BN BE serialization\n+         */\n+        j = ossl_bn_rsa_do_unblind(ret, blinding, unblind, rsa->n, ctx,\n+                                   buf, num);\n+        if (j == 0)\n             goto err;\n-\n-    j = BN_bn2binpad(ret, buf, num);\n+    } else {\n+        j = BN_bn2binpad(ret, buf, num);\n+        if (j < 0)\n+            goto err;\n+    }\n\n     switch (padding) {\n     case RSA_PKCS1_PADDING:\ndiff --git a/include/crypto/bn.h b/include/crypto/bn.h\nindex 60afda1dad..b5f36fb25a 100644\n--- a/include/crypto/bn.h\n+++ b/include/crypto/bn.h\n@@ -86,5 +86,10 @@ int bn_lshift_fixed_top(BIGNUM *r, const BIGNUM *a, int n);\n int bn_rshift_fixed_top(BIGNUM *r, const BIGNUM *a, int n);\n int bn_div_fixed_top(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m,\n                      const BIGNUM *d, BN_CTX *ctx);\n+int ossl_bn_rsa_do_unblind(const BIGNUM *intermediate,\n+                           const BN_BLINDING *blinding,\n+                           const BIGNUM *possible_arg2,\n+                           const BIGNUM *to_mod, BN_CTX *ctx,\n+                           unsigned char *buf, int num);\n\n #endif\ndiff --git a/include/openssl/bnerr.h b/include/openssl/bnerr.h\nindex 9f3c7cfaab..a0752cea52 100644\n--- a/include/openssl/bnerr.h\n+++ b/include/openssl/bnerr.h\n@@ -72,6 +72,7 @@ int ERR_load_BN_strings(void);\n # define BN_F_BN_SET_WORDS                                144\n # define BN_F_BN_STACK_PUSH                               148\n # define BN_F_BN_USUB                                     115\n+# define BN_F_OSSL_BN_RSA_DO_UNBLIND                      151\n\n /*\n  * BN reason codes."
  },
  {
    "path": "third_party/patch/openssl/CVE-2022-4450.patch",
    "content": "diff --git a/crypto/pem/pem_lib.c b/crypto/pem/pem_lib.c\nindex d416d939ea..328c30cdbb 100644\n--- a/crypto/pem/pem_lib.c\n+++ b/crypto/pem/pem_lib.c\n@@ -957,7 +957,9 @@ int PEM_read_bio_ex(BIO *bp, char **name_out, char **header,\n     *data = pem_malloc(len, flags);\n     if (*header == NULL || *data == NULL) {\n         pem_free(*header, flags, 0);\n+        *header = NULL;\n         pem_free(*data, flags, 0);\n+        *data = NULL;\n         goto end;\n     }\n     BIO_read(headerB, *header, headerlen);"
  },
  {
    "path": "third_party/patch/openssl/CVE-2023-0215.patch",
    "content": "diff --git a/crypto/asn1/bio_ndef.c b/crypto/asn1/bio_ndef.c\nindex 760e4846a4..f8d4b1b9aa 100644\n--- a/crypto/asn1/bio_ndef.c\n+++ b/crypto/asn1/bio_ndef.c\n@@ -49,12 +49,19 @@ static int ndef_suffix(BIO *b, unsigned char **pbuf, int *plen, void *parg);\n static int ndef_suffix_free(BIO *b, unsigned char **pbuf, int *plen,\n                             void *parg);\n\n+/*\n+ * On success, the returned BIO owns the input BIO as part of its BIO chain.\n+ * On failure, NULL is returned and the input BIO is owned by the caller.\n+ *\n+ * Unfortunately cannot constify this due to CMS_stream() and PKCS7_stream()\n+ */\n BIO *BIO_new_NDEF(BIO *out, ASN1_VALUE *val, const ASN1_ITEM *it)\n {\n     NDEF_SUPPORT *ndef_aux = NULL;\n     BIO *asn_bio = NULL;\n     const ASN1_AUX *aux = it->funcs;\n     ASN1_STREAM_ARG sarg;\n+    BIO *pop_bio = NULL;\n\n     if (!aux || !aux->asn1_cb) {\n         ASN1err(ASN1_F_BIO_NEW_NDEF, ASN1_R_STREAMING_NOT_SUPPORTED);\n@@ -69,21 +76,39 @@ BIO *BIO_new_NDEF(BIO *out, ASN1_VALUE *val, const ASN1_ITEM *it)\n     out = BIO_push(asn_bio, out);\n     if (out == NULL)\n         goto err;\n+    pop_bio = asn_bio;\n\n-    BIO_asn1_set_prefix(asn_bio, ndef_prefix, ndef_prefix_free);\n-    BIO_asn1_set_suffix(asn_bio, ndef_suffix, ndef_suffix_free);\n+    if (BIO_asn1_set_prefix(asn_bio, ndef_prefix, ndef_prefix_free) <= 0\n+            || BIO_asn1_set_suffix(asn_bio, ndef_suffix, ndef_suffix_free) <= 0\n+            || BIO_ctrl(asn_bio, BIO_C_SET_EX_ARG, 0, ndef_aux) <= 0)\n+        goto err;\n\n     /*\n-     * Now let callback prepends any digest, cipher etc BIOs ASN1 structure\n-     * needs.\n+     * Now let the callback prepend any digest, cipher, etc., that the BIO's\n+     * ASN1 structure needs.\n      */\n\n     sarg.out = out;\n     sarg.ndef_bio = NULL;\n     sarg.boundary = NULL;\n\n-    if (aux->asn1_cb(ASN1_OP_STREAM_PRE, &val, it, &sarg) <= 0)\n+    /*\n+     * The asn1_cb(), must not have mutated asn_bio on error, leaving it in the\n+     * middle of some partially built, but not returned BIO chain.\n+     */\n+    if (aux->asn1_cb(ASN1_OP_STREAM_PRE, &val, it, &sarg) <= 0) {\n+        /*\n+         * ndef_aux is now owned by asn_bio so we must not free it in the err\n+         * clean up block\n+         */\n+        ndef_aux = NULL;\n         goto err;\n+    }\n+\n+    /*\n+     * We must not fail now because the callback has prepended additional\n+     * BIOs to the chain\n+     */\n\n     ndef_aux->val = val;\n     ndef_aux->it = it;\n@@ -91,11 +116,11 @@ BIO *BIO_new_NDEF(BIO *out, ASN1_VALUE *val, const ASN1_ITEM *it)\n     ndef_aux->boundary = sarg.boundary;\n     ndef_aux->out = out;\n\n-    BIO_ctrl(asn_bio, BIO_C_SET_EX_ARG, 0, ndef_aux);\n-\n     return sarg.ndef_bio;\n\n  err:\n+    /* BIO_pop() is NULL safe */\n+    (void)BIO_pop(pop_bio);\n     BIO_free(asn_bio);\n     OPENSSL_free(ndef_aux);\n     return NULL;\ndiff --git a/test/recipes/80-test_cms.t b/test/recipes/80-test_cms.t\nindex 5dc6a3aebe..ec11bfc253 100644\n--- a/test/recipes/80-test_cms.t\n+++ b/test/recipes/80-test_cms.t\n@@ -13,7 +13,7 @@ use warnings;\n use POSIX;\n use File::Spec::Functions qw/catfile/;\n use File::Compare qw/compare_text/;\n-use OpenSSL::Test qw/:DEFAULT srctop_dir srctop_file/;\n+use OpenSSL::Test qw/:DEFAULT srctop_dir srctop_file with/;\n use OpenSSL::Test::Utils;\n\n setup(\"test_cms\");\n@@ -27,7 +27,7 @@ my $smcont   = srctop_file(\"test\", \"smcont.txt\");\n my ($no_des, $no_dh, $no_dsa, $no_ec, $no_ec2m, $no_rc2, $no_zlib)\n     = disabled qw/des dh dsa ec ec2m rc2 zlib/;\n\n-plan tests => 6;\n+plan tests => 7;\n\n my @smime_pkcs7_tests = (\n\n@@ -584,3 +584,14 @@ sub check_availability {\n\n     return \"\";\n }\n+\n+# Check that we get the expected failure return code\n+with({ exit_checker => sub { return shift == 6; } },\n+    sub {\n+        ok(run(app(['openssl', 'cms', '-encrypt',\n+                    '-in', srctop_file(\"test\", \"smcont.txt\"),\n+                    '-stream', '-recip',\n+                    srctop_file(\"test/smime-certs\", \"badrsa.pem\"),\n+                   ])),\n+            \"Check failure during BIO setup with -stream is handled correctly\");\n+    });\ndiff --git a/test/smime-certs/badrsa.pem b/test/smime-certs/badrsa.pem\nnew file mode 100644\nindex 0000000000..f824fc2267\n--- /dev/null\n+++ b/test/smime-certs/badrsa.pem\n@@ -0,0 +1,18 @@\n+-----BEGIN CERTIFICATE-----\n+MIIDbTCCAlWgAwIBAgIToTV4Z0iuK08vZP20oTh//hC8BDANBgkqhkiG9w0BAQ0FADAtMSswKQYD\n+VfcDEyJTYW1wbGUgTEFNUFMgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MCAXDTE5MTEyMDA2NTQxOFoY\n+DzIwNTIwOTI3MDY1NDE4WjAZMRcwFQYDVQQDEw5BbGljZSBMb3ZlbGFjZTCCASIwDQYJKoZIhvcN\n+AQEBBQADggEPADCCAQoCggEBALT0iehYOBY+TZp/T5K2KNI05Hwr+E3wP6XTvyi6WWyTgBK9LCOw\n+I2juwdRrjFBmXkk7pWpjXwsA3A5GOtz0FpfgyC7OxsVcF7q4WHWZWleYXFKlQHJD73nQwXP968+A\n+/3rBX7PhO0DBbZnfitOLPgPEwjTtdg0VQQ6Wz+CRQ/YbHPKaw7aRphZO63dKvIKp4cQVtkWQHi6s\n+yTjGsgkLcLNau5LZDQUdsGV+SAo3nBdWCRYV+I65x8Kf4hCxqqmjV3d/2NKRu0BXnDe/N+iDz3X0\n+zEoj0fqXgq4SWcC0nsG1lyyXt1TL270I6ATKRGJWiQVCCpDtc0NT6vdJ45bCSxgCAwEAAaOBlzCB\n+lDAMBgNVHRMBAf8EAjAAMB4GA1UdEQQXMBWBE2FsaWNlQHNtaW1lLmV4YW1wbGUwEwYDVR0lBAww\n+CgYIKwYBBQUHAwQwDwYDVR0PAQH/BAUDAwfAADAdBgNVHQ4EFgQUu/bMsi0dBhIcl64papAQ0yBm\n+ZnMwHwYDVR0jBBgwFoAUeF8OWnjYa+RUcD2z3ez38fL6wEcwDQYJKoZIhvcNAQENBQADggEBABbW\n+eonR6TMTckehDKNOabwaCIcekahAIL6l9tTzUX5ew6ufiAPlC6I/zQlmUaU0iSyFDG1NW14kNbFt\n+5CAokyLhMtE4ASHBIHbiOp/ZSbUBTVYJZB61ot7w1/ol5QECSs08b8zrxIncf+t2DHGuVEy/Qq1d\n+rBz8d4ay8zpqAE1tUyL5Da6ZiKUfWwZQXSI/JlbjQFzYQqTRDnzHWrg1xPeMTO1P2/cplFaseTiv\n+yk4cYwOp/W9UAWymOZXF8WcJYCIUXkdcG/nEZxr057KlScrJmFXOoh7Y+8ON4iWYYcAfiNgpUFo/\n+j8BAwrKKaFvdlZS9k1Ypb2+UQY75mKJE9Bg=\n+-----END CERTIFICATE-----"
  },
  {
    "path": "third_party/patch/openssl/CVE-2023-0286.patch",
    "content": "diff --git a/crypto/x509v3/v3_genn.c b/crypto/x509v3/v3_genn.c\nindex 87a5eff47c..e54ddc55c9 100644\n--- a/crypto/x509v3/v3_genn.c\n+++ b/crypto/x509v3/v3_genn.c\n@@ -98,7 +98,7 @@ int GENERAL_NAME_cmp(GENERAL_NAME *a, GENERAL_NAME *b)\n         return -1;\n     switch (a->type) {\n     case GEN_X400:\n-        result = ASN1_TYPE_cmp(a->d.x400Address, b->d.x400Address);\n+        result = ASN1_STRING_cmp(a->d.x400Address, b->d.x400Address);\n         break;\n\n     case GEN_EDIPARTY:\ndiff --git a/include/openssl/x509v3.h b/include/openssl/x509v3.h\nindex 90fa3592ce..e61c0f29d4 100644\n--- a/include/openssl/x509v3.h\n+++ b/include/openssl/x509v3.h\n@@ -136,7 +136,7 @@ typedef struct GENERAL_NAME_st {\n         OTHERNAME *otherName;   /* otherName */\n         ASN1_IA5STRING *rfc822Name;\n         ASN1_IA5STRING *dNSName;\n-        ASN1_TYPE *x400Address;\n+        ASN1_STRING *x400Address;\n         X509_NAME *directoryName;\n         EDIPARTYNAME *ediPartyName;\n         ASN1_IA5STRING *uniformResourceIdentifier;\ndiff --git a/test/v3nametest.c b/test/v3nametest.c\nindex d1852190b8..37819da8fd 100644\n--- a/test/v3nametest.c\n+++ b/test/v3nametest.c\n@@ -646,6 +646,14 @@ static struct gennamedata {\n             0xb7, 0x09, 0x02, 0x02\n         },\n         15\n+    }, {\n+        /*\n+         * Regression test for CVE-2023-0286.\n+         */\n+        {\n+            0xa3, 0x00\n+        },\n+        2\n     }\n };\n"
  },
  {
    "path": "third_party/patch/openssl/CVE-2023-0464.patch",
    "content": "From 879f7080d7e141f415c79eaa3a8ac4a3dad0348b Mon Sep 17 00:00:00 2001\nFrom: Pauli <pauli@openssl.org>\nDate: Wed, 8 Mar 2023 15:28:20 +1100\nSubject: [PATCH] x509: excessive resource use verifying policy constraints\n\nA security vulnerability has been identified in all supported versions\nof OpenSSL related to the verification of X.509 certificate chains\nthat include policy constraints.  Attackers may be able to exploit this\nvulnerability by creating a malicious certificate chain that triggers\nexponential use of computational resources, leading to a denial-of-service\n(DoS) attack on affected systems.\n\nFixes CVE-2023-0464\n\nReviewed-by: Tomas Mraz <tomas@openssl.org>\nReviewed-by: Shane Lontis <shane.lontis@oracle.com>\n(Merged from https://github.com/openssl/openssl/pull/20569)\n---\n crypto/x509v3/pcy_local.h |  8 +++++++-\n crypto/x509v3/pcy_node.c  | 12 +++++++++---\n crypto/x509v3/pcy_tree.c  | 37 +++++++++++++++++++++++++++----------\n 3 files changed, 43 insertions(+), 14 deletions(-)\n\ndiff --git a/crypto/x509v3/pcy_local.h b/crypto/x509v3/pcy_local.h\nindex 5daf78de45..344aa06765 100644\n--- a/crypto/x509v3/pcy_local.h\n+++ b/crypto/x509v3/pcy_local.h\n@@ -111,6 +111,11 @@ struct X509_POLICY_LEVEL_st {\n };\n \n struct X509_POLICY_TREE_st {\n+    /* The number of nodes in the tree */\n+    size_t node_count;\n+    /* The maximum number of nodes in the tree */\n+    size_t node_maximum;\n+\n     /* This is the tree 'level' data */\n     X509_POLICY_LEVEL *levels;\n     int nlevel;\n@@ -159,7 +164,8 @@ X509_POLICY_NODE *tree_find_sk(STACK_OF(X509_POLICY_NODE) *sk,\n X509_POLICY_NODE *level_add_node(X509_POLICY_LEVEL *level,\n                                  X509_POLICY_DATA *data,\n                                  X509_POLICY_NODE *parent,\n-                                 X509_POLICY_TREE *tree);\n+                                 X509_POLICY_TREE *tree,\n+                                 int extra_data);\n void policy_node_free(X509_POLICY_NODE *node);\n int policy_node_match(const X509_POLICY_LEVEL *lvl,\n                       const X509_POLICY_NODE *node, const ASN1_OBJECT *oid);\ndiff --git a/crypto/x509v3/pcy_node.c b/crypto/x509v3/pcy_node.c\nindex e2d7b15322..d574fb9d66 100644\n--- a/crypto/x509v3/pcy_node.c\n+++ b/crypto/x509v3/pcy_node.c\n@@ -59,10 +59,15 @@ X509_POLICY_NODE *level_find_node(const X509_POLICY_LEVEL *level,\n X509_POLICY_NODE *level_add_node(X509_POLICY_LEVEL *level,\n                                  X509_POLICY_DATA *data,\n                                  X509_POLICY_NODE *parent,\n-                                 X509_POLICY_TREE *tree)\n+                                 X509_POLICY_TREE *tree,\n+                                 int extra_data)\n {\n     X509_POLICY_NODE *node;\n \n+    /* Verify that the tree isn't too large.  This mitigates CVE-2023-0464 */\n+    if (tree->node_maximum > 0 && tree->node_count >= tree->node_maximum)\n+        return NULL;\n+\n     node = OPENSSL_zalloc(sizeof(*node));\n     if (node == NULL) {\n         X509V3err(X509V3_F_LEVEL_ADD_NODE, ERR_R_MALLOC_FAILURE);\n@@ -70,7 +75,7 @@ X509_POLICY_NODE *level_add_node(X509_POLICY_LEVEL *level,\n     }\n     node->data = data;\n     node->parent = parent;\n-    if (level) {\n+    if (level != NULL) {\n         if (OBJ_obj2nid(data->valid_policy) == NID_any_policy) {\n             if (level->anyPolicy)\n                 goto node_error;\n@@ -90,7 +95,7 @@ X509_POLICY_NODE *level_add_node(X509_POLICY_LEVEL *level,\n         }\n     }\n \n-    if (tree) {\n+    if (extra_data) {\n         if (tree->extra_data == NULL)\n             tree->extra_data = sk_X509_POLICY_DATA_new_null();\n         if (tree->extra_data == NULL){\n@@ -103,6 +108,7 @@ X509_POLICY_NODE *level_add_node(X509_POLICY_LEVEL *level,\n         }\n     }\n \n+    tree->node_count++;\n     if (parent)\n         parent->nchild++;\n \ndiff --git a/crypto/x509v3/pcy_tree.c b/crypto/x509v3/pcy_tree.c\nindex 6e8322cbc5..6c7fd35405 100644\n--- a/crypto/x509v3/pcy_tree.c\n+++ b/crypto/x509v3/pcy_tree.c\n@@ -13,6 +13,18 @@\n \n #include \"pcy_local.h\"\n \n+/*\n+ * If the maximum number of nodes in the policy tree isn't defined, set it to\n+ * a generous default of 1000 nodes.\n+ *\n+ * Defining this to be zero means unlimited policy tree growth which opens the\n+ * door on CVE-2023-0464.\n+ */\n+\n+#ifndef OPENSSL_POLICY_TREE_NODES_MAX\n+# define OPENSSL_POLICY_TREE_NODES_MAX 1000\n+#endif\n+\n /*\n  * Enable this to print out the complete policy tree at various point during\n  * evaluation.\n@@ -168,6 +180,9 @@ static int tree_init(X509_POLICY_TREE **ptree, STACK_OF(X509) *certs,\n         return X509_PCY_TREE_INTERNAL;\n     }\n \n+    /* Limit the growth of the tree to mitigate CVE-2023-0464 */\n+    tree->node_maximum = OPENSSL_POLICY_TREE_NODES_MAX;\n+\n     /*\n      * http://tools.ietf.org/html/rfc5280#section-6.1.2, figure 3.\n      *\n@@ -184,7 +199,7 @@ static int tree_init(X509_POLICY_TREE **ptree, STACK_OF(X509) *certs,\n     level = tree->levels;\n     if ((data = policy_data_new(NULL, OBJ_nid2obj(NID_any_policy), 0)) == NULL)\n         goto bad_tree;\n-    if (level_add_node(level, data, NULL, tree) == NULL) {\n+    if (level_add_node(level, data, NULL, tree, 1) == NULL) {\n         policy_data_free(data);\n         goto bad_tree;\n     }\n@@ -243,7 +258,8 @@ static int tree_init(X509_POLICY_TREE **ptree, STACK_OF(X509) *certs,\n  * Return value: 1 on success, 0 otherwise\n  */\n static int tree_link_matching_nodes(X509_POLICY_LEVEL *curr,\n-                                    X509_POLICY_DATA *data)\n+                                    X509_POLICY_DATA *data,\n+                                    X509_POLICY_TREE *tree)\n {\n     X509_POLICY_LEVEL *last = curr - 1;\n     int i, matched = 0;\n@@ -253,13 +269,13 @@ static int tree_link_matching_nodes(X509_POLICY_LEVEL *curr,\n         X509_POLICY_NODE *node = sk_X509_POLICY_NODE_value(last->nodes, i);\n \n         if (policy_node_match(last, node, data->valid_policy)) {\n-            if (level_add_node(curr, data, node, NULL) == NULL)\n+            if (level_add_node(curr, data, node, tree, 0) == NULL)\n                 return 0;\n             matched = 1;\n         }\n     }\n     if (!matched && last->anyPolicy) {\n-        if (level_add_node(curr, data, last->anyPolicy, NULL) == NULL)\n+        if (level_add_node(curr, data, last->anyPolicy, tree, 0) == NULL)\n             return 0;\n     }\n     return 1;\n@@ -272,7 +288,8 @@ static int tree_link_matching_nodes(X509_POLICY_LEVEL *curr,\n  * Return value: 1 on success, 0 otherwise.\n  */\n static int tree_link_nodes(X509_POLICY_LEVEL *curr,\n-                           const X509_POLICY_CACHE *cache)\n+                           const X509_POLICY_CACHE *cache,\n+                           X509_POLICY_TREE *tree)\n {\n     int i;\n \n@@ -280,7 +297,7 @@ static int tree_link_nodes(X509_POLICY_LEVEL *curr,\n         X509_POLICY_DATA *data = sk_X509_POLICY_DATA_value(cache->data, i);\n \n         /* Look for matching nodes in previous level */\n-        if (!tree_link_matching_nodes(curr, data))\n+        if (!tree_link_matching_nodes(curr, data, tree))\n             return 0;\n     }\n     return 1;\n@@ -311,7 +328,7 @@ static int tree_add_unmatched(X509_POLICY_LEVEL *curr,\n     /* Curr may not have anyPolicy */\n     data->qualifier_set = cache->anyPolicy->qualifier_set;\n     data->flags |= POLICY_DATA_FLAG_SHARED_QUALIFIERS;\n-    if (level_add_node(curr, data, node, tree) == NULL) {\n+    if (level_add_node(curr, data, node, tree, 1) == NULL) {\n         policy_data_free(data);\n         return 0;\n     }\n@@ -373,7 +390,7 @@ static int tree_link_any(X509_POLICY_LEVEL *curr,\n     }\n     /* Finally add link to anyPolicy */\n     if (last->anyPolicy &&\n-        level_add_node(curr, cache->anyPolicy, last->anyPolicy, NULL) == NULL)\n+        level_add_node(curr, cache->anyPolicy, last->anyPolicy, tree, 0) == NULL)\n         return 0;\n     return 1;\n }\n@@ -555,7 +572,7 @@ static int tree_calculate_user_set(X509_POLICY_TREE *tree,\n             extra->qualifier_set = anyPolicy->data->qualifier_set;\n             extra->flags = POLICY_DATA_FLAG_SHARED_QUALIFIERS\n                 | POLICY_DATA_FLAG_EXTRA_NODE;\n-            node = level_add_node(NULL, extra, anyPolicy->parent, tree);\n+            node = level_add_node(NULL, extra, anyPolicy->parent, tree, 1);\n         }\n         if (!tree->user_policies) {\n             tree->user_policies = sk_X509_POLICY_NODE_new_null();\n@@ -582,7 +599,7 @@ static int tree_evaluate(X509_POLICY_TREE *tree)\n \n     for (i = 1; i < tree->nlevel; i++, curr++) {\n         cache = policy_cache_set(curr->cert);\n-        if (!tree_link_nodes(curr, cache))\n+        if (!tree_link_nodes(curr, cache, tree))\n             return X509_PCY_TREE_INTERNAL;\n \n         if (!(curr->flags & X509_V_FLAG_INHIBIT_ANY)\n-- \n2.34.1\n\n"
  },
  {
    "path": "third_party/patch/openssl/CVE-2023-0465.patch",
    "content": "From b013765abfa80036dc779dd0e50602c57bb3bf95 Mon Sep 17 00:00:00 2001\nFrom: Matt Caswell <matt@openssl.org>\nDate: Tue, 7 Mar 2023 16:52:55 +0000\nSubject: [PATCH] Ensure that EXFLAG_INVALID_POLICY is checked even in leaf\n certs\n\nEven though we check the leaf cert to confirm it is valid, we\nlater ignored the invalid flag and did not notice that the leaf\ncert was bad.\n\nFixes: CVE-2023-0465\n\nReviewed-by: Hugo Landau <hlandau@openssl.org>\nReviewed-by: Tomas Mraz <tomas@openssl.org>\n(Merged from https://github.com/openssl/openssl/pull/20588)\n---\n crypto/x509/x509_vfy.c | 11 +++++++++--\n 1 file changed, 9 insertions(+), 2 deletions(-)\n\ndiff --git a/crypto/x509/x509_vfy.c b/crypto/x509/x509_vfy.c\nindex 925fbb5412..1dfe4f9f31 100644\n--- a/crypto/x509/x509_vfy.c\n+++ b/crypto/x509/x509_vfy.c\n@@ -1649,18 +1649,25 @@ static int check_policy(X509_STORE_CTX *ctx)\n     }\n     /* Invalid or inconsistent extensions */\n     if (ret == X509_PCY_TREE_INVALID) {\n-        int i;\n+        int i, cbcalled = 0;\n \n         /* Locate certificates with bad extensions and notify callback. */\n-        for (i = 1; i < sk_X509_num(ctx->chain); i++) {\n+        for (i = 0; i < sk_X509_num(ctx->chain); i++) {\n             X509 *x = sk_X509_value(ctx->chain, i);\n \n             if (!(x->ex_flags & EXFLAG_INVALID_POLICY))\n                 continue;\n+            cbcalled = 1;\n             if (!verify_cb_cert(ctx, x, i,\n                                 X509_V_ERR_INVALID_POLICY_EXTENSION))\n                 return 0;\n         }\n+        if (!cbcalled) {\n+            /* Should not be able to get here */\n+            X509err(X509_F_CHECK_POLICY, ERR_R_INTERNAL_ERROR);\n+            return 0;\n+        }\n+        /* The callback ignored the error so we return success */\n         return 1;\n     }\n     if (ret == X509_PCY_TREE_FAILURE) {\n-- \n2.34.1\n\n"
  },
  {
    "path": "third_party/patch/openssl/CVE-2023-0466.patch",
    "content": "diff --git a/doc/man3/X509_VERIFY_PARAM_set_flags.pod b/doc/man3/X509_VERIFY_PARAM_set_flags.pod\nindex f6f304bf7b..aa292f9336 100644\n--- a/doc/man3/X509_VERIFY_PARAM_set_flags.pod\n+++ b/doc/man3/X509_VERIFY_PARAM_set_flags.pod\n@@ -92,8 +92,9 @@ B<trust>.\n X509_VERIFY_PARAM_set_time() sets the verification time in B<param> to\n B<t>. Normally the current time is used.\n \n-X509_VERIFY_PARAM_add0_policy() enables policy checking (it is disabled\n-by default) and adds B<policy> to the acceptable policy set.\n+X509_VERIFY_PARAM_add0_policy() adds B<policy> to the acceptable policy set.\n+Contrary to preexisting documentation of this function it does not enable\n+policy checking.\n \n X509_VERIFY_PARAM_set1_policies() enables policy checking (it is disabled\n by default) and sets the acceptable policy set to B<policies>. Any existing\n@@ -377,6 +378,10 @@ and has no effect.\n \n The X509_VERIFY_PARAM_get_hostflags() function was added in OpenSSL 1.1.0i.\n \n+The function X509_VERIFY_PARAM_add0_policy() was historically documented as\n+enabling policy checking however the implementation has never done this.\n+The documentation was changed to align with the implementation.\n+\n =head1 COPYRIGHT\n \n Copyright 2009-2020 The OpenSSL Project Authors. All Rights Reserved.\n"
  },
  {
    "path": "third_party/patch/openssl/CVE-2023-2650.patch",
    "content": "From 9e209944b35cf82368071f160a744b6178f9b098 Mon Sep 17 00:00:00 2001\nFrom: Richard Levitte <levitte@openssl.org>\nDate: Fri, 12 May 2023 10:00:13 +0200\nSubject: [PATCH] Restrict the size of OBJECT IDENTIFIERs that OBJ_obj2txt will\n translate\n\nOBJ_obj2txt() would translate any size OBJECT IDENTIFIER to canonical\nnumeric text form.  For gigantic sub-identifiers, this would take a very\nlong time, the time complexity being O(n^2) where n is the size of that\nsub-identifier.\n\nTo mitigate this, a restriction on the size that OBJ_obj2txt() will\ntranslate to canonical numeric text form is added, based on RFC 2578\n(STD 58), which says this:\n\n> 3.5. OBJECT IDENTIFIER values\n>\n> An OBJECT IDENTIFIER value is an ordered list of non-negative numbers.\n> For the SMIv2, each number in the list is referred to as a sub-identifier,\n> there are at most 128 sub-identifiers in a value, and each sub-identifier\n> has a maximum value of 2^32-1 (4294967295 decimal).\n\nFixes otc/security#96\nFixes CVE-2023-2650\n\nReviewed-by: Matt Caswell <matt@openssl.org>\nReviewed-by: Tomas Mraz <tomas@openssl.org>\n---\n crypto/objects/obj_dat.c | 19 +++++++++++++++++++\n\ndiff --git a/crypto/objects/obj_dat.c b/crypto/objects/obj_dat.c\nindex 7e8de727f3..d699915b20 100644\n--- a/crypto/objects/obj_dat.c\n+++ b/crypto/objects/obj_dat.c\n@@ -428,6 +428,25 @@ int OBJ_obj2txt(char *buf, int buf_len, const ASN1_OBJECT *a, int no_name)\n     first = 1;\n     bl = NULL;\n \n+    /*\n+     * RFC 2578 (STD 58) says this about OBJECT IDENTIFIERs:\n+     *\n+     * > 3.5. OBJECT IDENTIFIER values\n+     * >\n+     * > An OBJECT IDENTIFIER value is an ordered list of non-negative\n+     * > numbers. For the SMIv2, each number in the list is referred to as a\n+     * > sub-identifier, there are at most 128 sub-identifiers in a value,\n+     * > and each sub-identifier has a maximum value of 2^32-1 (4294967295\n+     * > decimal).\n+     *\n+     * So a legitimate OID according to this RFC is at most (32 * 128 / 7),\n+     * i.e. 586 bytes long.\n+     *\n+     * Ref: https://datatracker.ietf.org/doc/html/rfc2578#section-3.5\n+     */\n+    if (len > 586)\n+        goto err;\n+\n     while (len > 0) {\n         l = 0;\n         use_bn = 0;\n-- \n2.34.1\n\n"
  },
  {
    "path": "third_party/patch/openssl/CVE-2023-3446.patch",
    "content": "From 8780a896543a654e757db1b9396383f9d8095528 Mon Sep 17 00:00:00 2001\nFrom: Matt Caswell <matt@openssl.org>\nDate: Thu, 6 Jul 2023 16:36:35 +0100\nSubject: [PATCH] Fix DH_check() excessive time with over sized modulus\n\nThe DH_check() function checks numerous aspects of the key or parameters\nthat have been supplied. Some of those checks use the supplied modulus\nvalue even if it is excessively large.\n\nThere is already a maximum DH modulus size (10,000 bits) over which\nOpenSSL will not generate or derive keys. DH_check() will however still\nperform various tests for validity on such a large modulus. We introduce a\nnew maximum (32,768) over which DH_check() will just fail.\n\nAn application that calls DH_check() and supplies a key or parameters\nobtained from an untrusted source could be vulnerable to a Denial of\nService attack.\n\nThe function DH_check() is itself called by a number of other OpenSSL\nfunctions. An application calling any of those other functions may\nsimilarly be affected. The other functions affected by this are\nDH_check_ex() and EVP_PKEY_param_check().\n\nCVE-2023-3446\n\nReviewed-by: Paul Dale <pauli@openssl.org>\nReviewed-by: Tom Cosgrove <tom.cosgrove@arm.com>\nReviewed-by: Bernd Edlinger <bernd.edlinger@hotmail.de>\nReviewed-by: Tomas Mraz <tomas@openssl.org>\n(Merged from https://github.com/openssl/openssl/pull/21452)\n---\n crypto/dh/dh_check.c    | 6 ++++++\n crypto/dh/dh_err.c      | 3 ++-\n crypto/err/openssl.txt  | 1 +\n include/openssl/dh.h    | 3 +++\n include/openssl/dherr.h | 3 ++-\n 5 files changed, 15 insertions(+), 3 deletions(-)\n\ndiff --git a/crypto/dh/dh_check.c b/crypto/dh/dh_check.c\nindex 4ac169e75c..e5f9dd5030 100644\n--- a/crypto/dh/dh_check.c\n+++ b/crypto/dh/dh_check.c\n@@ -101,6 +101,12 @@ int DH_check(const DH *dh, int *ret)\n     BN_CTX *ctx = NULL;\n     BIGNUM *t1 = NULL, *t2 = NULL;\n \n+    /* Don't do any checks at all with an excessively large modulus */\n+    if (BN_num_bits(dh->p) > OPENSSL_DH_CHECK_MAX_MODULUS_BITS) {\n+        DHerr(DH_F_DH_CHECK, DH_R_MODULUS_TOO_LARGE);\n+        return 0;\n+    }\n+\n     if (!DH_check_params(dh, ret))\n         return 0;\n \ndiff --git a/crypto/dh/dh_err.c b/crypto/dh/dh_err.c\nindex 7285587b4a..92800d3fcc 100644\n--- a/crypto/dh/dh_err.c\n+++ b/crypto/dh/dh_err.c\n@@ -1,6 +1,6 @@\n /*\n  * Generated by util/mkerr.pl DO NOT EDIT\n- * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved.\n+ * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved.\n  *\n  * Licensed under the OpenSSL license (the \"License\").  You may not use\n  * this file except in compliance with the License.  You can obtain a copy\n@@ -18,6 +18,7 @@ static const ERR_STRING_DATA DH_str_functs[] = {\n     {ERR_PACK(ERR_LIB_DH, DH_F_DHPARAMS_PRINT_FP, 0), \"DHparams_print_fp\"},\n     {ERR_PACK(ERR_LIB_DH, DH_F_DH_BUILTIN_GENPARAMS, 0),\n      \"dh_builtin_genparams\"},\n+    {ERR_PACK(ERR_LIB_DH, DH_F_DH_CHECK, 0), \"DH_check\"},\n     {ERR_PACK(ERR_LIB_DH, DH_F_DH_CHECK_EX, 0), \"DH_check_ex\"},\n     {ERR_PACK(ERR_LIB_DH, DH_F_DH_CHECK_PARAMS_EX, 0), \"DH_check_params_ex\"},\n     {ERR_PACK(ERR_LIB_DH, DH_F_DH_CHECK_PUB_KEY_EX, 0), \"DH_check_pub_key_ex\"},\ndiff --git a/crypto/err/openssl.txt b/crypto/err/openssl.txt\nindex 9f91a4a811..c0a3cd720b 100644\n--- a/crypto/err/openssl.txt\n+++ b/crypto/err/openssl.txt\n@@ -401,6 +401,7 @@ CT_F_SCT_SET_VERSION:104:SCT_set_version\n DH_F_COMPUTE_KEY:102:compute_key\n DH_F_DHPARAMS_PRINT_FP:101:DHparams_print_fp\n DH_F_DH_BUILTIN_GENPARAMS:106:dh_builtin_genparams\n+DH_F_DH_CHECK:126:DH_check\n DH_F_DH_CHECK_EX:121:DH_check_ex\n DH_F_DH_CHECK_PARAMS_EX:122:DH_check_params_ex\n DH_F_DH_CHECK_PUB_KEY_EX:123:DH_check_pub_key_ex\ndiff --git a/include/openssl/dh.h b/include/openssl/dh.h\nindex 3527540cdd..892e31559d 100644\n--- a/include/openssl/dh.h\n+++ b/include/openssl/dh.h\n@@ -29,6 +29,9 @@ extern \"C\" {\n # ifndef OPENSSL_DH_MAX_MODULUS_BITS\n #  define OPENSSL_DH_MAX_MODULUS_BITS    10000\n # endif\n+# ifndef OPENSSL_DH_CHECK_MAX_MODULUS_BITS\n+#  define OPENSSL_DH_CHECK_MAX_MODULUS_BITS  32768\n+# endif\n \n # define OPENSSL_DH_FIPS_MIN_MODULUS_BITS 1024\n \ndiff --git a/include/openssl/dherr.h b/include/openssl/dherr.h\nindex 916b3bed0b..528c819856 100644\n--- a/include/openssl/dherr.h\n+++ b/include/openssl/dherr.h\n@@ -1,6 +1,6 @@\n /*\n  * Generated by util/mkerr.pl DO NOT EDIT\n- * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved.\n+ * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved.\n  *\n  * Licensed under the OpenSSL license (the \"License\").  You may not use\n  * this file except in compliance with the License.  You can obtain a copy\n@@ -30,6 +30,7 @@ int ERR_load_DH_strings(void);\n #  define DH_F_COMPUTE_KEY                                 102\n #  define DH_F_DHPARAMS_PRINT_FP                           101\n #  define DH_F_DH_BUILTIN_GENPARAMS                        106\n+#  define DH_F_DH_CHECK                                    126\n #  define DH_F_DH_CHECK_EX                                 121\n #  define DH_F_DH_CHECK_PARAMS_EX                          122\n #  define DH_F_DH_CHECK_PUB_KEY_EX                         123\n-- \n2.34.1\n\n"
  },
  {
    "path": "third_party/patch/openssl/CVE-2023-4807.patch",
    "content": "From a632d534c73eeb3e3db8c7540d811194ef7c79ff Mon Sep 17 00:00:00 2001\nFrom: Bernd Edlinger <bernd.edlinger@hotmail.de>\nDate: Tue, 22 Aug 2023 16:07:30 +0200\nSubject: [PATCH] Avoid clobbering non-volatile XMM registers\n\nThis affects some Poly1305 assembler functions\nwhich are only used for certain CPU types.\n\nRemove those functions for Windows targets,\nas a simple interim solution.\n\nFixes #21522\n\nReviewed-by: Tomas Mraz <tomas@openssl.org>\nReviewed-by: Paul Dale <pauli@openssl.org>\n(Merged from https://github.com/openssl/openssl/pull/21808)\n\n(cherry picked from commit 7b8e27bc2e02238986d89ef0ece067ec1b48e165)\n---\n crypto/poly1305/asm/poly1305-x86_64.pl | 4 ++--\n 1 file changed, 2 insertions(+), 2 deletions(-)\n\ndiff --git a/crypto/poly1305/asm/poly1305-x86_64.pl b/crypto/poly1305/asm/poly1305-x86_64.pl\nindex 5f834d8faf..801455c639 100755\n--- a/crypto/poly1305/asm/poly1305-x86_64.pl\n+++ b/crypto/poly1305/asm/poly1305-x86_64.pl\n@@ -193,7 +193,7 @@ $code.=<<___\tif ($avx>1);\n \tbt\t\\$`5+32`,%r9\t\t# AVX2?\n \tcmovc\t%rax,%r10\n ___\n-$code.=<<___\tif ($avx>3);\n+$code.=<<___\tif ($avx>3 && !$win64);\n \tmov\t\\$`(1<<31|1<<21|1<<16)`,%rax\n \tshr\t\\$32,%r9\n \tand\t%rax,%r9\n@@ -2722,7 +2722,7 @@ $code.=<<___;\n .cfi_endproc\n .size\tpoly1305_blocks_avx512,.-poly1305_blocks_avx512\n ___\n-if ($avx>3) {\n+if ($avx>3 && !$win64) {\n ########################################################################\n # VPMADD52 version using 2^44 radix.\n #\n-- \n2.34.1\n\n"
  },
  {
    "path": "third_party/patch/protobuf/CVE-2021-22570.patch",
    "content": "diff --git a/src/google/protobuf/descriptor.cc b/src/google/protobuf/descriptor.cc\nindex 9a448ffc8..40510b46c 100644\n--- a/src/google/protobuf/descriptor.cc\n+++ b/src/google/protobuf/descriptor.cc\n@@ -1090,7 +1090,7 @@ inline void DescriptorPool::Tables::FindAllExtensions(\n \n bool DescriptorPool::Tables::AddSymbol(const std::string& full_name,\n                                        Symbol symbol) {\n-  if (InsertIfNotPresent(&symbols_by_name_, full_name.c_str(), symbol)) {\n+  if (InsertIfNotPresent(&symbols_by_name_, full_name, symbol)) {\n     symbols_after_checkpoint_.push_back(full_name.c_str());\n     return true;\n   } else {\n@@ -1106,7 +1106,7 @@ bool FileDescriptorTables::AddAliasUnderParent(const void* parent,\n }\n \n bool DescriptorPool::Tables::AddFile(const FileDescriptor* file) {\n-  if (InsertIfNotPresent(&files_by_name_, file->name().c_str(), file)) {\n+  if (InsertIfNotPresent(&files_by_name_, file->name(), file)) {\n     files_after_checkpoint_.push_back(file->name().c_str());\n     return true;\n   } else {\n@@ -2628,6 +2628,8 @@ void Descriptor::DebugString(int depth, std::string* contents,\n       const Descriptor::ReservedRange* range = reserved_range(i);\n       if (range->end == range->start + 1) {\n         strings::SubstituteAndAppend(contents, \"$0, \", range->start);\n+      } else if (range->end > FieldDescriptor::kMaxNumber) {\n+        strings::SubstituteAndAppend(contents, \"$0 to max, \", range->start);\n       } else {\n         strings::SubstituteAndAppend(contents, \"$0 to $1, \", range->start,\n                                   range->end - 1);\n@@ -2831,6 +2833,8 @@ void EnumDescriptor::DebugString(\n       const EnumDescriptor::ReservedRange* range = reserved_range(i);\n       if (range->end == range->start) {\n         strings::SubstituteAndAppend(contents, \"$0, \", range->start);\n+      } else if (range->end == INT_MAX) {\n+        strings::SubstituteAndAppend(contents, \"$0 to max, \", range->start);\n       } else {\n         strings::SubstituteAndAppend(contents, \"$0 to $1, \", range->start,\n                                   range->end);\n@@ -4022,6 +4026,12 @@ bool DescriptorBuilder::AddSymbol(const std::string& full_name,\n   // Use its file as the parent instead.\n   if (parent == nullptr) parent = file_;\n \n+  if (full_name.find('\\0') != std::string::npos) {\n+    AddError(full_name, proto, DescriptorPool::ErrorCollector::NAME,\n+             \"\\\"\" + full_name + \"\\\" contains null character.\");\n+    return false;\n+  }\n+\n   if (tables_->AddSymbol(full_name, symbol)) {\n     if (!file_tables_->AddAliasUnderParent(parent, name, symbol)) {\n       // This is only possible if there was already an error adding something of\n@@ -4061,6 +4071,11 @@ bool DescriptorBuilder::AddSymbol(const std::string& full_name,\n void DescriptorBuilder::AddPackage(const std::string& name,\n                                    const Message& proto,\n                                    const FileDescriptor* file) {\n+  if (name.find('\\0') != std::string::npos) {\n+    AddError(name, proto, DescriptorPool::ErrorCollector::NAME,\n+             \"\\\"\" + name + \"\\\" contains null character.\");\n+    return;\n+  }\n   if (tables_->AddSymbol(name, Symbol(file))) {\n     // Success.  Also add parent package, if any.\n     std::string::size_type dot_pos = name.find_last_of('.');\n@@ -4374,6 +4389,12 @@ FileDescriptor* DescriptorBuilder::BuildFileImpl(\n   }\n   result->pool_ = pool_;\n \n+  if (result->name().find('\\0') != std::string::npos) {\n+    AddError(result->name(), proto, DescriptorPool::ErrorCollector::NAME,\n+             \"\\\"\" + result->name() + \"\\\" contains null character.\");\n+    return nullptr;\n+  }\n+\n   // Add to tables.\n   if (!tables_->AddFile(result)) {\n     AddError(proto.name(), proto, DescriptorPool::ErrorCollector::OTHER,\ndiff --git a/src/google/protobuf/descriptor_unittest.cc b/src/google/protobuf/descriptor_unittest.cc\nindex 6085a122a..56c180aa4 100644\n--- a/src/google/protobuf/descriptor_unittest.cc\n+++ b/src/google/protobuf/descriptor_unittest.cc\n@@ -3786,6 +3786,45 @@ TEST_F(ValidationErrorTest, InvalidPackageName) {\n       \"foo.proto: foo.$: NAME: \\\"$\\\" is not a valid identifier.\\n\");\n }\n \n+// 'str' is a static C-style string that may contain '\\0'\n+#define STATIC_STR(str) std::string((str), sizeof(str) - 1)\n+\n+TEST_F(ValidationErrorTest, NullCharSymbolName) {\n+  BuildFileWithErrors(\n+      \"name: \\\"bar.proto\\\" \"\n+      \"package: \\\"foo\\\"\"\n+      \"message_type { \"\n+      \"  name: '\\\\000\\\\001\\\\013.Bar' \"\n+      \"  field { name: \\\"foo\\\" number:  9 label:LABEL_OPTIONAL type:TYPE_INT32 \"\n+      \"} \"\n+      \"}\",\n+      STATIC_STR(\"bar.proto: foo.\\0\\x1\\v.Bar: NAME: \\\"\\0\\x1\\v.Bar\\\" is not a \"\n+                 \"valid identifier.\\nbar.proto: foo.\\0\\x1\\v.Bar: NAME: \"\n+                 \"\\\"\\0\\x1\\v.Bar\\\" is not a valid identifier.\\nbar.proto: \"\n+                 \"foo.\\0\\x1\\v.Bar: NAME: \\\"\\0\\x1\\v.Bar\\\" is not a valid \"\n+                 \"identifier.\\nbar.proto: foo.\\0\\x1\\v.Bar: NAME: \"\n+                 \"\\\"\\0\\x1\\v.Bar\\\" is not a valid identifier.\\nbar.proto: \"\n+                 \"foo.\\0\\x1\\v.Bar.foo: NAME: \\\"foo.\\0\\x1\\v.Bar.foo\\\" contains \"\n+                 \"null character.\\nbar.proto: foo.\\0\\x1\\v.Bar: NAME: \"\n+                 \"\\\"foo.\\0\\x1\\v.Bar\\\" contains null character.\\n\"));\n+}\n+\n+TEST_F(ValidationErrorTest, NullCharFileName) {\n+  BuildFileWithErrors(\n+      \"name: \\\"bar\\\\000\\\\001\\\\013.proto\\\" \"\n+      \"package: \\\"outer.foo\\\"\",\n+      STATIC_STR(\"bar\\0\\x1\\v.proto: bar\\0\\x1\\v.proto: NAME: \"\n+                 \"\\\"bar\\0\\x1\\v.proto\\\" contains null character.\\n\"));\n+}\n+\n+TEST_F(ValidationErrorTest, NullCharPackageName) {\n+  BuildFileWithErrors(\n+      \"name: \\\"bar.proto\\\" \"\n+      \"package: \\\"\\\\000\\\\001\\\\013.\\\"\",\n+      STATIC_STR(\"bar.proto: \\0\\x1\\v.: NAME: \\\"\\0\\x1\\v.\\\" contains null \"\n+                 \"character.\\n\"));\n+}\n+\n TEST_F(ValidationErrorTest, MissingFileName) {\n   BuildFileWithErrors(\"\",\n \n@@ -4001,6 +4040,32 @@ TEST_F(ValidationErrorTest, ReservedFieldsDebugString) {\n       file->DebugString());\n }\n \n+TEST_F(ValidationErrorTest, DebugStringReservedRangeMax) {\n+  const FileDescriptor* file = BuildFile(strings::Substitute(\n+      \"name: \\\"foo.proto\\\" \"\n+      \"enum_type { \"\n+      \"  name: \\\"Bar\\\"\"\n+      \"  value { name:\\\"BAR\\\" number:1 }\"\n+      \"  reserved_range { start: 5 end: $0 }\"\n+      \"}\"\n+      \"message_type {\"\n+      \"  name: \\\"Foo\\\"\"\n+      \"  reserved_range { start: 5 end: $1 }\"\n+      \"}\",\n+      std::numeric_limits<int>::max(), FieldDescriptor::kMaxNumber + 1));\n+\n+  ASSERT_EQ(\n+      \"syntax = \\\"proto2\\\";\\n\\n\"\n+      \"enum Bar {\\n\"\n+      \"  BAR = 1;\\n\"\n+      \"  reserved 5 to max;\\n\"\n+      \"}\\n\\n\"\n+      \"message Foo {\\n\"\n+      \"  reserved 5 to max;\\n\"\n+      \"}\\n\\n\",\n+      file->DebugString());\n+}\n+\n TEST_F(ValidationErrorTest, EnumReservedFieldError) {\n   BuildFileWithErrors(\n       \"name: \\\"foo.proto\\\" \"\n\n\n"
  },
  {
    "path": "third_party/patch/protobuf/CVE-2022-1941.patch",
    "content": "diff --git a/src/google/protobuf/extension_set_inl.h b/src/google/protobuf/extension_set_inl.h\nindex 074784b96..aff050a81 100644\n--- a/src/google/protobuf/extension_set_inl.h\n+++ b/src/google/protobuf/extension_set_inl.h\n@@ -206,16 +206,22 @@ const char* ExtensionSet::ParseMessageSetItemTmpl(\n     const char* ptr, const Msg* containing_type,\n     internal::InternalMetadata* metadata, internal::ParseContext* ctx) {\n   std::string payload;\n-  uint32 type_id = 0;\n-  bool payload_read = false;\n+\n+  uint32_t type_id;\n+  enum class State { kNoTag, kHasType, kHasPayload, kDone };\n+  State state = State::kNoTag;\n+\n   while (!ctx->Done(&ptr)) {\n     uint32 tag = static_cast<uint8>(*ptr++);\n     if (tag == WireFormatLite::kMessageSetTypeIdTag) {\n       uint64 tmp;\n       ptr = ParseBigVarint(ptr, &tmp);\n       GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);\n-      type_id = tmp;\n-      if (payload_read) {\n+      if (state == State::kNoTag) {\n+        type_id = tmp;\n+        state = State::kHasType;\n+      } else if (state == State::kHasPayload) {\n+        type_id = tmp;\n         ExtensionInfo extension;\n         bool was_packed_on_wire;\n         if (!FindExtension(2, type_id, containing_type, ctx, &extension,\n@@ -241,20 +247,26 @@ const char* ExtensionSet::ParseMessageSetItemTmpl(\n           GOOGLE_PROTOBUF_PARSER_ASSERT(value->_InternalParse(p, &tmp_ctx) &&\n                                          tmp_ctx.EndedAtLimit());\n         }\n-        type_id = 0;\n+        state = State::kDone;\n       }\n     } else if (tag == WireFormatLite::kMessageSetMessageTag) {\n-      if (type_id != 0) {\n-        ptr = ParseFieldMaybeLazily(static_cast<uint64>(type_id) * 8 + 2, ptr,\n-                                    containing_type, metadata, ctx);\n+\n+      if (state == State::kHasType) {\n+        ptr = ParseFieldMaybeLazily(static_cast<uint64_t>(type_id) * 8 + 2, ptr,\n+                                    containing_type, metadata, ctx);\n         GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);\n-        type_id = 0;\n+        state = State::kDone;\n       } else {\n-        int32 size = ReadSize(&ptr);\n+\n+        std::string tmp;\n+        int32_t size = ReadSize(&ptr);\n         GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);\n-        ptr = ctx->ReadString(ptr, size, &payload);\n+        ptr = ctx->ReadString(ptr, size, &tmp);\n         GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);\n-        payload_read = true;\n+        if (state == State::kNoTag) {\n+          payload = std::move(tmp);\n+          state = State::kHasPayload;\n+        }\n       }\n     } else {\n       ptr = ReadTag(ptr - 1, &tag);\ndiff --git a/src/google/protobuf/wire_format.cc b/src/google/protobuf/wire_format.cc\nindex 16edf2ce3..88fb09169 100644\n--- a/src/google/protobuf/wire_format.cc\n+++ b/src/google/protobuf/wire_format.cc\n@@ -659,9 +659,11 @@ struct WireFormat::MessageSetParser {\n   const char* _InternalParse(const char* ptr, internal::ParseContext* ctx) {\n     // Parse a MessageSetItem\n     auto metadata = reflection->MutableInternalMetadata(msg);\n+    enum class State { kNoTag, kHasType, kHasPayload, kDone };\n+    State state = State::kNoTag;\n+\n     std::string payload;\n-    uint32 type_id = 0;\n-    bool payload_read = false;\n+    uint32_t type_id = 0;\n     while (!ctx->Done(&ptr)) {\n       // We use 64 bit tags in order to allow typeid's that span the whole\n       // range of 32 bit numbers.\n@@ -670,8 +672,11 @@ struct WireFormat::MessageSetParser {\n         uint64 tmp;\n         ptr = ParseBigVarint(ptr, &tmp);\n         GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);\n-        type_id = tmp;\n-        if (payload_read) {\n+        if (state == State::kNoTag) {\n+          type_id = tmp;\n+          state = State::kHasType;\n+        } else if (state == State::kHasPayload) {\n+          type_id = tmp;\n           const FieldDescriptor* field;\n           if (ctx->data().pool == nullptr) {\n             field = reflection->FindKnownExtensionByNumber(type_id);\n@@ -698,17 +703,18 @@ struct WireFormat::MessageSetParser {\n             GOOGLE_PROTOBUF_PARSER_ASSERT(value->_InternalParse(p, &tmp_ctx) &&\n                                            tmp_ctx.EndedAtLimit());\n           }\n-          type_id = 0;\n+          state = State::kDone;\n         }\n         continue;\n       } else if (tag == WireFormatLite::kMessageSetMessageTag) {\n-        if (type_id == 0) {\n-          int32 size = ReadSize(&ptr);\n+\n+        if (state == State::kNoTag) {\n+          int32_t size = ReadSize(&ptr);\n           GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);\n           ptr = ctx->ReadString(ptr, size, &payload);\n           GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);\n-          payload_read = true;\n-        } else {\n+          state = State::kHasPayload;\n+        } else if (state == State::kHasType) {\n           // We're now parsing the payload\n           const FieldDescriptor* field = nullptr;\n           if (descriptor->IsExtensionNumber(type_id)) {\n@@ -722,7 +728,12 @@ struct WireFormat::MessageSetParser {\n           ptr = WireFormat::_InternalParseAndMergeField(\n               msg, ptr, ctx, static_cast<uint64>(type_id) * 8 + 2, reflection,\n               field);\n-          type_id = 0;\n+          state = State::kDone;\n+        } else {\n+          int32_t size = ReadSize(&ptr);\n+          GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);\n+          ptr = ctx->Skip(ptr, size);\n+          GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);\n         }\n       } else {\n         // An unknown field in MessageSetItem.\ndiff --git a/src/google/protobuf/wire_format_lite.h b/src/google/protobuf/wire_format_lite.h\nindex c742fe869..4130bc531 100644\n--- a/src/google/protobuf/wire_format_lite.h\n+++ b/src/google/protobuf/wire_format_lite.h\n@@ -1798,6 +1798,9 @@ bool ParseMessageSetItemImpl(io::CodedInputStream* input, MS ms) {\n   // we can parse it later.\n   std::string message_data;\n \n+  enum class State { kNoTag, kHasType, kHasPayload, kDone };\n+  State state = State::kNoTag;\n+\n   while (true) {\n     const uint32 tag = input->ReadTagNoLastTag();\n     if (tag == 0) return false;\n@@ -1806,26 +1809,34 @@ bool ParseMessageSetItemImpl(io::CodedInputStream* input, MS ms) {\n       case WireFormatLite::kMessageSetTypeIdTag: {\n         uint32 type_id;\n         if (!input->ReadVarint32(&type_id)) return false;\n-        last_type_id = type_id;\n-\n-        if (!message_data.empty()) {\n+        if (state == State::kNoTag) {\n+          last_type_id = type_id;\n+          state = State::kHasType;\n+        } else if (state == State::kHasPayload) {\n           // We saw some message data before the type_id.  Have to parse it\n           // now.\n           io::CodedInputStream sub_input(\n               reinterpret_cast<const uint8*>(message_data.data()),\n               static_cast<int>(message_data.size()));\n           sub_input.SetRecursionLimit(input->RecursionBudget());\n-          if (!ms.ParseField(last_type_id, &sub_input)) {\n+          if (!ms.ParseField(type_id, &sub_input)) {\n             return false;\n           }\n           message_data.clear();\n+          state = State::kDone;\n         }\n \n         break;\n       }\n \n       case WireFormatLite::kMessageSetMessageTag: {\n-        if (last_type_id == 0) {\n+        if (state == State::kHasType) {\n+          // Already saw type_id, so we can parse this directly.\n+          if (!ms.ParseField(last_type_id, input)) {\n+            return false;\n+          }\n+          state = State::kDone;\n+        } else if (state == State::kNoTag) {\n           // We haven't seen a type_id yet.  Append this data to message_data.\n           uint32 length;\n           if (!input->ReadVarint32(&length)) return false;\n@@ -1836,11 +1847,9 @@ bool ParseMessageSetItemImpl(io::CodedInputStream* input, MS ms) {\n           auto ptr = reinterpret_cast<uint8*>(&message_data[0]);\n           ptr = io::CodedOutputStream::WriteVarint32ToArray(length, ptr);\n           if (!input->ReadRaw(ptr, length)) return false;\n+          state = State::kHasPayload;\n         } else {\n-          // Already saw type_id, so we can parse this directly.\n-          if (!ms.ParseField(last_type_id, input)) {\n-            return false;\n-          }\n+          if (!ms.SkipField(tag, input)) return false;\n         }\n \n         break;\n"
  },
  {
    "path": "third_party/patch/pybind11/pybind11.patch001",
    "content": "diff --git a/include/pybind11/pybind11.h b/include/pybind11/pybind11.h\nindex 3bffbb28..4a6a9809 100644\n--- a/include/pybind11/pybind11.h\n+++ b/include/pybind11/pybind11.h\n@@ -21,6 +21,7 @@\n #  pragma warning disable 1875  // offsetof applied to non-POD (Plain Old Data) types is nonstandard\n #  pragma warning disable 2196  // warning #2196: routine is both \"inline\" and \"noinline\"\n #elif defined(_MSC_VER)\n+#include <corecrt.h>\n #  pragma warning(push)\n #  pragma warning(disable: 4100) // warning C4100: Unreferenced formal parameter\n #  pragma warning(disable: 4127) // warning C4127: Conditional expression is constant\n\n"
  },
  {
    "path": "third_party/patch/zlib/CVE-2018-25032.patch",
    "content": "diff -Npur zlib-1.2.11/deflate.c zlib-1.2.11-change/deflate.c\n--- zlib-1.2.11/deflate.c\t2017-01-16 01:29:40.000000000 +0800\n+++ zlib-1.2.11-change/deflate.c\t2022-07-28 04:48:30.310281281 +0800\n@@ -252,10 +252,6 @@ int ZEXPORT deflateInit2_(strm, level, m\n     int wrap = 1;\n     static const char my_version[] = ZLIB_VERSION;\n \n-    ushf *overlay;\n-    /* We overlay pending_buf and d_buf+l_buf. This works since the average\n-     * output size for (length,distance) codes is <= 24 bits.\n-     */\n \n     if (version == Z_NULL || version[0] != my_version[0] ||\n         stream_size != sizeof(z_stream)) {\n@@ -326,9 +322,47 @@ int ZEXPORT deflateInit2_(strm, level, m\n \n     s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */\n \n-    overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);\n-    s->pending_buf = (uchf *) overlay;\n-    s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);\n+    /* We overlay pending_buf and sym_buf. This works since the average size\n+     * for length/distance pairs over any compressed block is assured to be 31\n+     * bits or less.\n+     *\n+     * Analysis: The longest fixed codes are a length code of 8 bits plus 5\n+     * extra bits, for lengths 131 to 257. The longest fixed distance codes are\n+     * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest\n+     * possible fixed-codes length/distance pair is then 31 bits total.\n+     *\n+     * sym_buf starts one-fourth of the way into pending_buf. So there are\n+     * three bytes in sym_buf for every four bytes in pending_buf. Each symbol\n+     * in sym_buf is three bytes -- two for the distance and one for the\n+     * literal/length. As each symbol is consumed, the pointer to the next\n+     * sym_buf value to read moves forward three bytes. From that symbol, up to\n+     * 31 bits are written to pending_buf. The closest the written pending_buf\n+     * bits gets to the next sym_buf symbol to read is just before the last\n+     * code is written. At that time, 31*(n-2) bits have been written, just\n+     * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at\n+     * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1\n+     * symbols are written.) The closest the writing gets to what is unread is\n+     * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and\n+     * can range from 128 to 32768.\n+     *\n+     * Therefore, at a minimum, there are 142 bits of space between what is\n+     * written and what is read in the overlain buffers, so the symbols cannot\n+     * be overwritten by the compressed data. That space is actually 139 bits,\n+     * due to the three-bit fixed-code block header.\n+     *\n+     * That covers the case where either Z_FIXED is specified, forcing fixed\n+     * codes, or when the use of fixed codes is chosen, because that choice\n+     * results in a smaller compressed block than dynamic codes. That latter\n+     * condition then assures that the above analysis also covers all dynamic\n+     * blocks. A dynamic-code block will only be chosen to be emitted if it has\n+     * fewer bits than a fixed-code block would for the same set of symbols.\n+     * Therefore its average symbol length is assured to be less than 31. So\n+     * the compressed data for a dynamic block also cannot overwrite the\n+     * symbols from which it is being constructed.\n+     */\n+\n+    s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4);\n+    s->pending_buf_size = (ulg)s->lit_bufsize * 4;\n \n     if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||\n         s->pending_buf == Z_NULL) {\n@@ -337,8 +371,12 @@ int ZEXPORT deflateInit2_(strm, level, m\n         deflateEnd (strm);\n         return Z_MEM_ERROR;\n     }\n-    s->d_buf = overlay + s->lit_bufsize/sizeof(ush);\n-    s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;\n+    s->sym_buf = s->pending_buf + s->lit_bufsize;\n+    s->sym_end = (s->lit_bufsize - 1) * 3;\n+    /* We avoid equality with lit_bufsize*3 because of wraparound at 64K\n+     * on 16 bit machines and because stored blocks are restricted to\n+     * 64K-1 bytes.\n+     */\n \n     s->level = level;\n     s->strategy = strategy;\n@@ -549,7 +587,7 @@ int ZEXPORT deflatePrime (strm, bits, va\n \n     if (deflateStateCheck(strm)) return Z_STREAM_ERROR;\n     s = strm->state;\n-    if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))\n+    if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))\n         return Z_BUF_ERROR;\n     do {\n         put = Buf_size - s->bi_valid;\n@@ -1108,7 +1146,6 @@ int ZEXPORT deflateCopy (dest, source)\n #else\n     deflate_state *ds;\n     deflate_state *ss;\n-    ushf *overlay;\n \n \n     if (deflateStateCheck(source) || dest == Z_NULL) {\n@@ -1128,8 +1165,7 @@ int ZEXPORT deflateCopy (dest, source)\n     ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));\n     ds->prev   = (Posf *)  ZALLOC(dest, ds->w_size, sizeof(Pos));\n     ds->head   = (Posf *)  ZALLOC(dest, ds->hash_size, sizeof(Pos));\n-    overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);\n-    ds->pending_buf = (uchf *) overlay;\n+    ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4);\n \n     if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||\n         ds->pending_buf == Z_NULL) {\n@@ -1143,8 +1179,7 @@ int ZEXPORT deflateCopy (dest, source)\n     zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);\n \n     ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);\n-    ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);\n-    ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;\n+    ds->sym_buf = ds->pending_buf + ds->lit_bufsize;\n \n     ds->l_desc.dyn_tree = ds->dyn_ltree;\n     ds->d_desc.dyn_tree = ds->dyn_dtree;\n@@ -1912,7 +1947,7 @@ local block_state deflate_fast(s, flush)\n         FLUSH_BLOCK(s, 1);\n         return finish_done;\n     }\n-    if (s->last_lit)\n+    if (s->sym_next)\n         FLUSH_BLOCK(s, 0);\n     return block_done;\n }\n@@ -2043,7 +2078,7 @@ local block_state deflate_slow(s, flush)\n         FLUSH_BLOCK(s, 1);\n         return finish_done;\n     }\n-    if (s->last_lit)\n+    if (s->sym_next)\n         FLUSH_BLOCK(s, 0);\n     return block_done;\n }\n@@ -2118,7 +2153,7 @@ local block_state deflate_rle(s, flush)\n         FLUSH_BLOCK(s, 1);\n         return finish_done;\n     }\n-    if (s->last_lit)\n+    if (s->sym_next)\n         FLUSH_BLOCK(s, 0);\n     return block_done;\n }\n@@ -2157,7 +2192,7 @@ local block_state deflate_huff(s, flush)\n         FLUSH_BLOCK(s, 1);\n         return finish_done;\n     }\n-    if (s->last_lit)\n+    if (s->sym_next)\n         FLUSH_BLOCK(s, 0);\n     return block_done;\n }\ndiff -Npur zlib-1.2.11/deflate.h zlib-1.2.11-change/deflate.h\n--- zlib-1.2.11/deflate.h\t2017-01-01 15:37:10.000000000 +0800\n+++ zlib-1.2.11-change/deflate.h\t2022-07-28 04:42:55.134287681 +0800\n@@ -217,7 +217,7 @@ typedef struct internal_state {\n     /* Depth of each subtree used as tie breaker for trees of equal frequency\n      */\n \n-    uchf *l_buf;          /* buffer for literals or lengths */\n+    uchf *sym_buf;        /* buffer for distances and literals/lengths */\n \n     uInt  lit_bufsize;\n     /* Size of match buffer for literals/lengths.  There are 4 reasons for\n@@ -239,13 +239,8 @@ typedef struct internal_state {\n      *   - I can't count above 4\n      */\n \n-    uInt last_lit;      /* running index in l_buf */\n-\n-    ushf *d_buf;\n-    /* Buffer for distances. To simplify the code, d_buf and l_buf have\n-     * the same number of elements. To use different lengths, an extra flag\n-     * array would be necessary.\n-     */\n+    uInt sym_next;      /* running index in sym_buf */\n+    uInt sym_end;       /* symbol table full when sym_next reaches this */\n \n     ulg opt_len;        /* bit length of current block with optimal trees */\n     ulg static_len;     /* bit length of current block with static trees */\n@@ -325,20 +320,22 @@ void ZLIB_INTERNAL _tr_stored_block OF((\n \n # define _tr_tally_lit(s, c, flush) \\\n   { uch cc = (c); \\\n-    s->d_buf[s->last_lit] = 0; \\\n-    s->l_buf[s->last_lit++] = cc; \\\n+    s->sym_buf[s->sym_next++] = 0; \\\n+    s->sym_buf[s->sym_next++] = 0; \\\n+    s->sym_buf[s->sym_next++] = cc; \\\n     s->dyn_ltree[cc].Freq++; \\\n-    flush = (s->last_lit == s->lit_bufsize-1); \\\n+    flush = (s->sym_next == s->sym_end); \\\n    }\n # define _tr_tally_dist(s, distance, length, flush) \\\n   { uch len = (uch)(length); \\\n     ush dist = (ush)(distance); \\\n-    s->d_buf[s->last_lit] = dist; \\\n-    s->l_buf[s->last_lit++] = len; \\\n+    s->sym_buf[s->sym_next++] = dist; \\\n+    s->sym_buf[s->sym_next++] = dist >> 8; \\\n+    s->sym_buf[s->sym_next++] = len; \\\n     dist--; \\\n     s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \\\n     s->dyn_dtree[d_code(dist)].Freq++; \\\n-    flush = (s->last_lit == s->lit_bufsize-1); \\\n+    flush = (s->sym_next == s->sym_end); \\\n   }\n #else\n # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c)\ndiff -Npur zlib-1.2.11/trees.c zlib-1.2.11-change/trees.c\n--- zlib-1.2.11/trees.c\t2017-01-16 01:07:14.000000000 +0800\n+++ zlib-1.2.11-change/trees.c\t2022-07-28 05:00:04.094268034 +0800\n@@ -416,7 +416,7 @@ local void init_block(s)\n \n     s->dyn_ltree[END_BLOCK].Freq = 1;\n     s->opt_len = s->static_len = 0L;\n-    s->last_lit = s->matches = 0;\n+    s->sym_next = s->matches = 0;\n }\n \n #define SMALLEST 1\n@@ -947,7 +947,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, bu\n \n         Tracev((stderr, \"\\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u \",\n                 opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,\n-                s->last_lit));\n+                s->sym_next / 3));\n \n         if (static_lenb <= opt_lenb) opt_lenb = static_lenb;\n \n@@ -1016,8 +1016,9 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc\n     unsigned dist;  /* distance of matched string */\n     unsigned lc;    /* match length-MIN_MATCH or unmatched char (if dist==0) */\n {\n-    s->d_buf[s->last_lit] = (ush)dist;\n-    s->l_buf[s->last_lit++] = (uch)lc;\n+    s->sym_buf[s->sym_next++] = dist;\n+    s->sym_buf[s->sym_next++] = dist >> 8;\n+    s->sym_buf[s->sym_next++] = lc;\n     if (dist == 0) {\n         /* lc is the unmatched char */\n         s->dyn_ltree[lc].Freq++;\n@@ -1032,30 +1033,7 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc\n         s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++;\n         s->dyn_dtree[d_code(dist)].Freq++;\n     }\n-\n-#ifdef TRUNCATE_BLOCK\n-    /* Try to guess if it is profitable to stop the current block here */\n-    if ((s->last_lit & 0x1fff) == 0 && s->level > 2) {\n-        /* Compute an upper bound for the compressed length */\n-        ulg out_length = (ulg)s->last_lit*8L;\n-        ulg in_length = (ulg)((long)s->strstart - s->block_start);\n-        int dcode;\n-        for (dcode = 0; dcode < D_CODES; dcode++) {\n-            out_length += (ulg)s->dyn_dtree[dcode].Freq *\n-                (5L+extra_dbits[dcode]);\n-        }\n-        out_length >>= 3;\n-        Tracev((stderr,\"\\nlast_lit %u, in %ld, out ~%ld(%ld%%) \",\n-               s->last_lit, in_length, out_length,\n-               100L - out_length*100L/in_length));\n-        if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;\n-    }\n-#endif\n-    return (s->last_lit == s->lit_bufsize-1);\n-    /* We avoid equality with lit_bufsize because of wraparound at 64K\n-     * on 16 bit machines and because stored blocks are restricted to\n-     * 64K-1 bytes.\n-     */\n+    return (s->sym_next == s->sym_end);\n }\n \n /* ===========================================================================\n@@ -1068,13 +1046,14 @@ local void compress_block(s, ltree, dtre\n {\n     unsigned dist;      /* distance of matched string */\n     int lc;             /* match length or unmatched char (if dist == 0) */\n-    unsigned lx = 0;    /* running index in l_buf */\n+    unsigned sx = 0;    /* running index in sym_buf */\n     unsigned code;      /* the code to send */\n     int extra;          /* number of extra bits to send */\n \n-    if (s->last_lit != 0) do {\n-        dist = s->d_buf[lx];\n-        lc = s->l_buf[lx++];\n+    if (s->sym_next != 0) do {\n+        dist = s->sym_buf[sx++] & 0xff;\n+        dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8;\n+        lc = s->sym_buf[sx++];\n         if (dist == 0) {\n             send_code(s, lc, ltree); /* send a literal byte */\n             Tracecv(isgraph(lc), (stderr,\" '%c' \", lc));\n@@ -1099,11 +1078,10 @@ local void compress_block(s, ltree, dtre\n             }\n         } /* literal or match pair ? */\n \n-        /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */\n-        Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,\n-               \"pendingBuf overflow\");\n+        /* Check that the overlay between pending_buf and sym_buf is ok: */\n+        Assert(s->pending < s->lit_bufsize + sx, \"pendingBuf overflow\");\n \n-    } while (lx < s->last_lit);\n+    } while (sx < s->sym_next);\n \n     send_code(s, END_BLOCK, ltree);\n }\n"
  },
  {
    "path": "third_party/patch/zlib/CVE-2022-37434.patch",
    "content": "diff -Npur zlib-1.2.11/inflate.c zlib-1.2.11-change/inflate.c\n--- zlib-1.2.11/inflate.c\t2017-01-01 15:37:10.000000000 +0800\n+++ zlib-1.2.11-change/inflate.c\t2022-08-17 06:25:06.033176873 +0800\n@@ -759,8 +759,9 @@ int flush;\n                 if (copy > have) copy = have;\n                 if (copy) {\n                     if (state->head != Z_NULL &&\n-                        state->head->extra != Z_NULL) {\n-                        len = state->head->extra_len - state->length;\n+                        state->head->extra != Z_NULL &&\n+                        (len = state->head->extra_len - state->length) <\n+                        state->head->extra_max) {\n                         zmemcpy(state->head->extra + len, next,\n                                 len + copy > state->head->extra_max ?\n                                 state->head->extra_max - len : copy);\n"
  },
  {
    "path": "third_party/securec/CMakeLists.txt",
    "content": "SET(CMAKE_BUILD_TYPE \"Debug\")\nif (CMAKE_SYSTEM_NAME MATCHES \"Windows\")\n    SET(CMAKE_C_FLAGS_DEBUG \"$ENV{CFLAGS} -fPIC -O0 -Wall -Wno-deprecated-declarations -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -fstack-protector-all\")\nelse()\n    SET(CMAKE_C_FLAGS_DEBUG \"$ENV{CFLAGS} -fPIC -O0 -Wall -Wno-deprecated-declarations -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -fstack-protector-all -D_LIBCPP_INLINE_VISIBILITY='' -D'_LIBCPP_EXTERN_TEMPLATE(...)='\")\nendif()\nSET(CMAKE_C_FLAGS_RELEASE \"$ENV{CFLAGS} -fPIC -O3 -Wall -Wno-deprecated-declarations -fstack-protector-all\")\nset(CMAKE_EXPORT_COMPILE_COMMANDS ON)\n\n#add flags\nset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -I/usr/local/include -Werror\")\n\ninclude_directories(./include)\nadd_subdirectory(src)\n"
  },
  {
    "path": "third_party/securec/include/securec.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef __SECUREC_H__5D13A042_DC3F_4ED9_A8D1_882811274C27\n#define __SECUREC_H__5D13A042_DC3F_4ED9_A8D1_882811274C27\n\n#include \"securectype.h\"\n#include <stdarg.h>\n\n#ifndef SECUREC_HAVE_ERRNO_H\n#if SECUREC_IN_KERNEL\n#define SECUREC_HAVE_ERRNO_H 0\n#else\n#define SECUREC_HAVE_ERRNO_H 1\n#endif\n#endif\n\n/* EINVAL ERANGE may defined in errno.h */\n#if SECUREC_HAVE_ERRNO_H\n#include <errno.h>\n#endif\n\n/* define error code */\n#if defined(SECUREC_NEED_ERRNO_TYPE) || !defined(__STDC_WANT_LIB_EXT1__) || \\\n    (defined(__STDC_WANT_LIB_EXT1__) && (__STDC_WANT_LIB_EXT1__ == 0))\n#ifndef SECUREC_DEFINED_ERRNO_TYPE\n#define SECUREC_DEFINED_ERRNO_TYPE\n/* just check whether macrodefinition exists. */\n#ifndef errno_t\ntypedef int errno_t;\n#endif\n#endif\n#endif\n\n/* success */\n#ifndef EOK\n#define EOK 0\n#endif\n\n#ifndef EINVAL\n/* The src buffer is not correct and destination buffer cant not be reset */\n#define EINVAL 22\n#endif\n\n#ifndef EINVAL_AND_RESET\n/* Once the error is detected, the dest buffer must be reseted! */\n#define EINVAL_AND_RESET (22 | 128)\n#endif\n\n#ifndef ERANGE\n/* The destination buffer is not long enough and destination buffer can not be reset */\n#define ERANGE 34\n#endif\n\n#ifndef ERANGE_AND_RESET\n/* Once the error is detected, the dest buffer must be reseted! */\n#define ERANGE_AND_RESET  (34 | 128)\n#endif\n\n#ifndef EOVERLAP_AND_RESET\n/* Once the buffer overlap is detected, the dest buffer must be reseted! */\n#define EOVERLAP_AND_RESET (54 | 128)\n#endif\n\n/* if you need export the function of this library in Win32 dll, use __declspec(dllexport) */\n#ifndef SECUREC_API\n#if defined(SECUREC_DLL_EXPORT)\n#define SECUREC_API __declspec(dllexport)\n#elif defined(SECUREC_DLL_IMPORT)\n#define SECUREC_API __declspec(dllimport)\n#else\n/* Standardized function declaration . If a security function is declared in the your code,\n * it may cause a compilation alarm,Please delete the security function you declared\n * Adding extern under windows will cause the system to have inline functions to expand,\n * so do not add the extern in default\n */\n#if defined(_MSC_VER)\n#define SECUREC_API\n#else\n#define SECUREC_API extern\n#endif\n#endif\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n    /*\n     * Description: The GetHwSecureCVersion function get SecureC Version string and version number.\n     * Parameter: verNumber - to store version number\n     * Return:   version string\n     */\n    SECUREC_API const char *GetHwSecureCVersion(unsigned short *verNumber);\n\n#if SECUREC_ENABLE_MEMSET\n    /*\n     * Description: The memset_s function copies the value of c (converted to an unsigned char) into each of\n     * the first count characters of the object pointed to by dest.\n     * Parameter: dest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer\n     * Parameter: c - the value to be copied\n     * Parameter: count -copies fisrt count characters of  dest\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t memset_s(void *dest, size_t destMax, int c, size_t count);\n#endif\n\n#ifndef SECUREC_ONLY_DECLARE_MEMSET\n#define SECUREC_ONLY_DECLARE_MEMSET     0\n#endif\n\n#if SECUREC_ONLY_DECLARE_MEMSET == 0\n\n#if SECUREC_ENABLE_MEMMOVE\n    /*\n     * Description: The memmove_s function copies n characters from the object pointed to by src\n     * into the object pointed to by dest.\n     * Parameter: dest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer\n     * Parameter: src -source  address\n     * Parameter: count -copies count wide characters from the  src\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t memmove_s(void *dest, size_t destMax, const void *src, size_t count);\n#endif\n\n#if SECUREC_ENABLE_MEMCPY\n    /*\n     * Description: The memcpy_s function copies n characters from the object pointed to\n     * by src into the object pointed to by dest.\n     * Parameter: dest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer\n     * Parameter: src -source  address\n     * Parameter: count -copies count  characters from the  src\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t memcpy_s(void *dest, size_t destMax, const void *src, size_t count);\n#endif\n\n#if SECUREC_ENABLE_STRCPY\n    /*\n     * Description: The strcpy_s function copies the string pointed to by strSrc (including\n     * the terminating null character) into the array pointed to by strDest\n     * Parameter: strDest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer(including the terminating null character)\n     * Parameter: strSrc -source  address\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t strcpy_s(char *strDest, size_t destMax, const char *strSrc);\n#endif\n\n#if SECUREC_ENABLE_STRNCPY\n    /*\n     * Description: The strncpy_s function copies not more than n successive characters (not including\n     * the terminating null character)\n     *                     from the array pointed to by strSrc to the array pointed to by strDest\n     * Parameter: strDest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer(including the terminating null character)\n     * Parameter: strSrc -source  address\n     * Parameter: count -copies count  characters from the  src\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t strncpy_s(char *strDest, size_t destMax, const char *strSrc, size_t count);\n#endif\n\n#if SECUREC_ENABLE_STRCAT\n    /*\n     * Description: The strcat_s function appends a copy of the  string pointed to by strSrc (including\n     * the terminating null  character)\n     *                     to the end of the  string pointed to by strDest\n     * Parameter: strDest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer(including the terminating null wide character)\n     * Parameter: strSrc -source  address\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t strcat_s(char *strDest, size_t destMax, const char *strSrc);\n#endif\n\n#if SECUREC_ENABLE_STRNCAT\n    /*\n     * Description: The strncat_s function appends not more than n successive  characters (not including\n     * the terminating null  character)\n     *                       from the array pointed to by strSrc to the end of the  string pointed to by strDest.\n     * Parameter: strDest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer(including the terminating null character)\n     * Parameter: strSrc -source  address\n     * Parameter: count -copies count  characters from the  src\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t strncat_s(char *strDest, size_t destMax, const char *strSrc, size_t count);\n#endif\n\n#if SECUREC_ENABLE_VSPRINTF\n    /*\n     * Description:  The vsprintf_s function is equivalent to the vsprintf function except for the Parameter: destMax\n     * and the explicit runtime-constraints violation\n     * Parameter: strDest -  produce output according to a format ,write to the character string strDest\n     * Parameter: destMax - The maximum length of destination buffer(including the terminating null wide characte)\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of characters printed(not including the terminating null byte ('\\0')),\n     * If an error occurred Return: -1.\n     */\n    SECUREC_API int vsprintf_s(char *strDest, size_t destMax, const char *format,\n                               va_list argList) SECUREC_ATTRIBUTE(3, 0);\n#endif\n\n#if SECUREC_ENABLE_SPRINTF\n    /*\n     * Description:  The sprintf_s function is equivalent to the sprintf function except for the Parameter: destMax\n     * and the explicit runtime-constraints violation\n     * Parameter: strDest -  produce output according to a format ,write to the character string strDest\n     * Parameter: destMax - The maximum length of destination buffer(including the terminating null byte ('\\0'))\n     * Parameter: format - fromat string\n     * Return:    the number of characters printed(not including the terminating null byte ('\\0')),\n     * If an error occurred Return: -1.\n     */\n    SECUREC_API int sprintf_s(char *strDest, size_t destMax, const char *format, ...) SECUREC_ATTRIBUTE(3, 4);\n#endif\n\n#if SECUREC_ENABLE_VSNPRINTF\n    /*\n     * Description:  The vsnprintf_s function is equivalent to the vsnprintf function except for the Parameter:\n     * destMax/count and the explicit runtime-constraints violation\n     * Parameter: strDest -  produce output according to a format ,write to the character string strDest\n     * Parameter: destMax - The maximum length of destination buffer(including the terminating null  byte ('\\0'))\n     * Parameter: count - do not write more than count bytes to strDest(not including the terminating null  byte ('\\0'))\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of characters printed(not including the terminating null byte ('\\0')),\n     * If an error occurred Return: -1.Pay special attention to returning -1 when truncation occurs\n     */\n    SECUREC_API int vsnprintf_s(char *strDest, size_t destMax, size_t count, const char *format,\n                                va_list argList) SECUREC_ATTRIBUTE(4, 0);\n#endif\n\n#if SECUREC_ENABLE_SNPRINTF\n    /*\n     * Description:  The snprintf_s function is equivalent to the snprintf function except for the Parameter:\n     * destMax/count and the explicit runtime-constraints violation\n     * Parameter: strDest -  produce output according to a format ,write to the character string strDest\n     * Parameter: destMax - The maximum length of destination buffer(including the terminating null  byte ('\\0'))\n     * Parameter: count - do not write more than count bytes to strDest(not including the terminating null  byte ('\\0'))\n     * Parameter: format - fromat string\n     * Return:    the number of characters printed(not including the terminating null byte ('\\0')),\n     * If an error occurred Return: -1.Pay special attention to returning -1 when truncation occurs\n     */\n    SECUREC_API int snprintf_s(char *strDest, size_t destMax, size_t count, const char *format,\n                               ...) SECUREC_ATTRIBUTE(4, 5);\n#endif\n\n#if SECUREC_SNPRINTF_TRUNCATED\n    /*\n     * Description:  The vsnprintf_truncated_s function is equivalent to the vsnprintf_s function except\n     * no count Parameter:  and Return: value\n     * Parameter: strDest -  produce output according to a format ,write to the character string strDest\n     * Parameter: destMax - The maximum length of destination buffer(including the terminating null  byte ('\\0'))\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of characters printed(not including the terminating null byte ('\\0')),\n     * If an error occurred Return: -1.Pay special attention to returning destMax - 1 when truncation occurs\n     */\n    SECUREC_API int vsnprintf_truncated_s(char *strDest, size_t destMax, const char *format,\n                                          va_list argList) SECUREC_ATTRIBUTE(3, 0);\n\n    /*\n     * Description:  The snprintf_truncated_s function is equivalent to the snprintf_2 function except\n     * no count Parameter:  and Return: value\n     * Parameter: strDest -  produce output according to a format ,write to the character string strDest\n     * Parameter: destMax - The maximum length of destination buffer(including the terminating null  byte ('\\0'))\n     * Parameter: format - fromat string\n     * Return:    the number of characters printed(not including the terminating null byte ('\\0')),\n     * If an error occurred Return: -1.Pay special attention to returning destMax - 1 when truncation occurs\n     */\n    SECUREC_API int snprintf_truncated_s(char *strDest, size_t destMax,\n                                         const char *format, ...) SECUREC_ATTRIBUTE(3, 4);\n#endif\n\n#if SECUREC_ENABLE_SCANF\n    /*\n     * Description:  The scanf_s function is equivalent to fscanf_s with the argument stdin\n     * interposed before the arguments to scanf_s\n     * Parameter: format - fromat string\n     * Return:   the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int scanf_s(const char *format, ...);\n#endif\n\n#if SECUREC_ENABLE_VSCANF\n    /*\n     * Description:  The vscanf_s function is equivalent to scanf_s, with the variable argument list replaced by argList\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int vscanf_s(const char *format, va_list argList);\n#endif\n\n#if SECUREC_ENABLE_SSCANF\n    /*\n     * Description:  The sscanf_s function is equivalent to fscanf_s, except that input is obtained from a\n     * string (specified by the argument buffer) rather than from a stream\n     * Parameter: buffer -  read character from  buffer\n     * Parameter: format - fromat string\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int sscanf_s(const char *buffer, const char *format, ...);\n#endif\n\n#if SECUREC_ENABLE_VSSCANF\n    /*\n     * Description:  The vsscanf_s function is equivalent to sscanf_s, with the variable argument list\n     * replaced by argList\n     * Parameter: buffer -  read character from  buffer\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int vsscanf_s(const char *buffer, const char *format, va_list argList);\n#endif\n\n#if SECUREC_ENABLE_FSCANF\n    /*\n     * Description:  The fscanf_s function is equivalent to fscanf except that the c, s, and [ conversion specifiers\n     * apply to a pair of arguments (unless assignment suppression is indicated by a*)\n     * Parameter: stream - stdio file stream\n     * Parameter: format - fromat string\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int fscanf_s(FILE *stream, const char *format, ...);\n#endif\n\n#if SECUREC_ENABLE_VFSCANF\n    /*\n     * Description:  The vfscanf_s function is equivalent to fscanf_s, with the variable argument list\n     * replaced by argList\n     * Parameter: stream - stdio file stream\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int vfscanf_s(FILE *stream, const char *format, va_list argList);\n#endif\n\n#if SECUREC_ENABLE_STRTOK\n    /*\n     * Description:  The strtok_s function parses a string into a sequence of strToken,\n     * replace all characters in strToken string that match to strDelimit set with 0.\n     * On the first call to strtok_s the string to be parsed should be specified in strToken.\n     * In each subsequent call that should parse the same string, strToken should be NULL\n     * Parameter: strToken - the string to be delimited\n     * Parameter: strDelimit -specifies a set of characters that delimit the tokens in the parsed string\n     * Parameter: context -is a pointer to a char * variable that is used internally by strtok_s function\n     * Return:  On the first call returns the address of the first non \\0 character, otherwise NULL is returned.\n     * In subsequent calls, the strtoken is set to NULL, and the context set is the same as the previous call,\n     * return NULL if the *context string length is equal 0, otherwise return *context.\n     */\n    SECUREC_API char *strtok_s(char *strToken, const char *strDelimit, char **context);\n#endif\n\n#if SECUREC_ENABLE_GETS && SECUREC_IN_KERNEL == 0\n    /*\n     * Description: The gets_s function reads at most one less than the number of characters specified\n     * by destMax from the stream pointed to by stdin, into the array pointed to by buffer\n     * Parameter: buffer - destination  address\n     * Parameter: destMax -The maximum length of destination buffer(including the terminating null character)\n     * Return:    buffer if there was no runtime-constraint violation,If an error occurred Return: NULL.\n     */\n    SECUREC_API char *gets_s(char *buffer, size_t destMax);\n#endif\n\n\n#if SECUREC_ENABLE_WCHAR_FUNC\n#if SECUREC_ENABLE_MEMCPY\n    /*\n     * Description: The wmemcpy_s function copies n successive wide characters from the object pointed to\n     * by src into the object pointed to by dest.\n     * Parameter: dest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer\n     * Parameter: src -source  address\n     * Parameter: count -copies count wide characters from the  src\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t wmemcpy_s(wchar_t *dest, size_t destMax, const wchar_t *src, size_t count);\n#endif\n\n#if SECUREC_ENABLE_MEMMOVE\n    /*\n     * Description: The wmemmove_s function copies n successive wide characters from the object\n     * pointed to by src into the object pointed to by dest.\n     * Parameter: dest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer\n     * Parameter: src -source  address\n     * Parameter: count -copies count wide characters from the  src\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t wmemmove_s(wchar_t *dest, size_t destMax, const wchar_t *src, size_t count);\n#endif\n\n#if SECUREC_ENABLE_STRCPY\n    /*\n     * Description: The wcscpy_s function copies the wide string pointed to by strSrc (including theterminating\n     * null wide character) into the array pointed to by strDest\n     * Parameter: strDest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer\n     * Parameter: strSrc -source  address\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t wcscpy_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc);\n#endif\n\n#if SECUREC_ENABLE_STRNCPY\n    /*\n     * Description: The wcsncpy_s function copies not more than n successive wide characters (not including the\n     * terminating null wide character) from the array pointed to by strSrc to the array pointed to by strDest\n     * Parameter: strDest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer(including the terminating wide character)\n     * Parameter: strSrc -source  address\n     * Parameter: count -copies count wide characters from the  src\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t wcsncpy_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc, size_t count);\n#endif\n\n#if SECUREC_ENABLE_STRCAT\n    /*\n     * Description: The wcscat_s function appends a copy of the wide string pointed to by strSrc (including the\n     * terminating null wide character) to the end of the wide string pointed to by strDest\n     * Parameter: strDest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer(including the terminating wide character)\n     * Parameter: strSrc -source  address\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t wcscat_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc);\n#endif\n\n#if SECUREC_ENABLE_STRNCAT\n    /*\n     * Description: The wcsncat_s function appends not more than n successive wide characters (not including the\n     * terminating null wide character) from the array pointed to by strSrc to the end of the wide string pointed to\n     * by strDest.\n     * Parameter: strDest - destination  address\n     * Parameter: destMax -The maximum length of destination buffer(including the terminating wide character)\n     * Parameter: strSrc -source  address\n     * Parameter: count -copies count wide characters from the  src\n     * Return:    EOK if there was no runtime-constraint violation\n     */\n    SECUREC_API errno_t wcsncat_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc, size_t count);\n#endif\n\n#if SECUREC_ENABLE_STRTOK\n    /*\n     * Description:  The  wcstok_s  function  is  the  wide-character  equivalent  of the strtok_s function\n     * Parameter: strToken - the string to be delimited\n     * Parameter: strDelimit -specifies a set of characters that delimit the tokens in the parsed string\n     * Parameter: context -is a pointer to a char * variable that is used internally by strtok_s function\n     * Return:    a pointer to the first character of a token, or a null pointer if there is no token\n     * or there is a runtime-constraint violation.\n     */\n    SECUREC_API wchar_t *wcstok_s(wchar_t *strToken, const wchar_t *strDelimit, wchar_t **context);\n#endif\n\n#if SECUREC_ENABLE_VSPRINTF\n    /*\n     * Description:  The  vswprintf_s  function  is  the  wide-character  equivalent  of the vsprintf_s function\n     * Parameter: strDest -  produce output according to a format ,write to the character string strDest\n     * Parameter: destMax - The maximum length of destination buffer(including the terminating null )\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of characters printed(not including the terminating null wide characte),\n     * If an error occurred Return: -1.\n     */\n    SECUREC_API int vswprintf_s(wchar_t *strDest, size_t destMax, const wchar_t *format, va_list argList);\n#endif\n\n#if SECUREC_ENABLE_SPRINTF\n\n    /*\n     * Description:  The  swprintf_s  function  is  the  wide-character  equivalent  of the sprintf_s function\n     * Parameter: strDest -  produce output according to a format ,write to the character string strDest\n     * Parameter: destMax - The maximum length of destination buffer(including the terminating null )\n     * Parameter: format - fromat string\n     * Return:    the number of characters printed(not including the terminating null wide characte),\n     * If an error occurred Return: -1.\n     */\n    SECUREC_API int swprintf_s(wchar_t *strDest, size_t destMax, const wchar_t *format, ...);\n#endif\n\n#if SECUREC_ENABLE_FSCANF\n    /*\n     * Description:  The  fwscanf_s  function  is  the  wide-character  equivalent  of the fscanf_s function\n     * Parameter: stream - stdio file stream\n     * Parameter: format - fromat string\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int fwscanf_s(FILE *stream, const wchar_t *format, ...);\n#endif\n\n#if SECUREC_ENABLE_VFSCANF\n    /*\n     * Description:  The  vfwscanf_s  function  is  the  wide-character  equivalent  of the vfscanf_s function\n     * Parameter: stream - stdio file stream\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int vfwscanf_s(FILE *stream, const wchar_t *format, va_list argList);\n#endif\n\n#if SECUREC_ENABLE_SCANF\n    /*\n     * Description:  The  wscanf_s  function  is  the  wide-character  equivalent  of the scanf_s function\n     * Parameter: format - fromat string\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int wscanf_s(const wchar_t *format, ...);\n#endif\n\n#if SECUREC_ENABLE_VSCANF\n    /*\n     * Description:  The  vwscanf_s  function  is  the  wide-character  equivalent  of the vscanf_s function\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int vwscanf_s(const wchar_t *format, va_list argList);\n#endif\n\n#if SECUREC_ENABLE_SSCANF\n    /*\n     * Description:  The  swscanf_s  function  is  the  wide-character  equivalent  of the sscanf_s function\n     * Parameter: buffer -  read character from  buffer\n     * Parameter: format - fromat string\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int swscanf_s(const wchar_t *buffer, const wchar_t *format, ...);\n#endif\n\n#if SECUREC_ENABLE_VSSCANF\n    /*\n     * Description:  The  vswscanf_s  function  is  the  wide-character  equivalent  of the vsscanf_s function\n     * Parameter: buffer -  read character from  buffer\n     * Parameter: format - fromat string\n     * Parameter: argList - instead of  a variable  number of arguments\n     * Return:    the number of input items assigned, If an error occurred Return: -1.\n     */\n    SECUREC_API int vswscanf_s(const wchar_t *buffer, const wchar_t *format, va_list argList);\n#endif\n#endif /* SECUREC_ENABLE_WCHAR_FUNC */\n#endif\n\n    /* those functions are used by macro ,must declare hare , also for  without function declaration warning */\n    extern errno_t strncpy_error(char *strDest, size_t destMax, const char *strSrc, size_t count);\n    extern errno_t strcpy_error(char *strDest, size_t destMax, const char *strSrc);\n\n#if SECUREC_WITH_PERFORMANCE_ADDONS\n    /* those functions are used by macro */\n    extern errno_t memset_sOptAsm(void *dest, size_t destMax, int c, size_t count);\n    extern errno_t memset_sOptTc(void *dest, size_t destMax, int c, size_t count);\n    extern errno_t memcpy_sOptAsm(void *dest, size_t destMax, const void *src, size_t count);\n    extern errno_t memcpy_sOptTc(void *dest, size_t destMax, const void *src, size_t count);\n\n/* strcpy_sp is a macro, NOT a function in performance optimization mode. */\n#define strcpy_sp(dest, destMax, src)  ((__builtin_constant_p((destMax)) && \\\n    __builtin_constant_p((src))) ?  \\\n    SECUREC_STRCPY_SM((dest), (destMax), (src)) : \\\n    strcpy_s((dest), (destMax), (src)))\n\n/* strncpy_sp is a macro, NOT a function in performance optimization mode. */\n#define strncpy_sp(dest, destMax, src, count)  ((__builtin_constant_p((count)) && \\\n    __builtin_constant_p((destMax)) && \\\n    __builtin_constant_p((src))) ?  \\\n    SECUREC_STRNCPY_SM((dest), (destMax), (src), (count)) : \\\n    strncpy_s((dest), (destMax), (src), (count)))\n\n/* strcat_sp is a macro, NOT a function in performance optimization mode. */\n#define strcat_sp(dest, destMax, src) ((__builtin_constant_p((destMax)) && \\\n    __builtin_constant_p((src))) ?  \\\n    SECUREC_STRCAT_SM((dest), (destMax), (src)) : \\\n    strcat_s((dest), (destMax), (src)))\n\n/* strncat_sp is a macro, NOT a function in performance optimization mode. */\n#define strncat_sp(dest, destMax, src, count) ((__builtin_constant_p((count)) &&  \\\n    __builtin_constant_p((destMax)) && \\\n    __builtin_constant_p((src))) ?  \\\n    SECUREC_STRNCAT_SM((dest), (destMax), (src), (count)) : \\\n    strncat_s((dest), (destMax), (src), (count)))\n\n/* memcpy_sp is a macro, NOT a function in performance optimization mode. */\n#define memcpy_sp(dest, destMax, src, count)  (__builtin_constant_p((count)) ? \\\n    (SECUREC_MEMCPY_SM((dest), (destMax), (src), (count))) : \\\n    (__builtin_constant_p((destMax)) ? \\\n    (((size_t)(destMax) > 0 && \\\n    (((unsigned long long)(destMax) & \\\n    (unsigned long long)(-2)) < SECUREC_MEM_MAX_LEN)) ? \\\n    memcpy_sOptTc((dest), (destMax), (src), (count)) : ERANGE) : \\\n    memcpy_sOptAsm((dest), (destMax), (src), (count))))\n\n/* memset_sp is a macro, NOT a function in performance optimization mode. */\n#define memset_sp(dest, destMax, c, count)  (__builtin_constant_p((count)) ? \\\n    (SECUREC_MEMSET_SM((dest), (destMax), (c), (count))) : \\\n    (__builtin_constant_p((destMax)) ? \\\n    (((size_t)(destMax) > 0 && \\\n    (((unsigned long long)(destMax) & \\\n    (unsigned long long)(-2)) < SECUREC_MEM_MAX_LEN)) ? \\\n    memset_sOptTc((dest), (destMax), (c), (count)) : ERANGE) : \\\n    memset_sOptAsm((dest), (destMax), (c), (count))))\n#else\n#define strcpy_sp   strcpy_s\n#define strncpy_sp  strncpy_s\n#define strcat_sp   strcat_s\n#define strncat_sp  strncat_s\n#define memcpy_sp   memcpy_s\n#define memset_sp   memset_s\n#endif\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n#endif /* __SECUREC_H__5D13A042_DC3F_4ED9_A8D1_882811274C27 */\n\n"
  },
  {
    "path": "third_party/securec/include/securectype.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef __SECURECTYPE_H__A7BBB686_AADA_451B_B9F9_44DACDAE18A7\n#define __SECURECTYPE_H__A7BBB686_AADA_451B_B9F9_44DACDAE18A7\n\n#ifndef SECUREC_USING_STD_SECURE_LIB\n#if defined(_MSC_VER) && _MSC_VER >= 1400\n#if defined(__STDC_WANT_SECURE_LIB__) && __STDC_WANT_SECURE_LIB__ == 0\n/* Security functions have been provided since vs2005, default use of system library functions */\n#define SECUREC_USING_STD_SECURE_LIB    0\n#else\n#define SECUREC_USING_STD_SECURE_LIB    1\n#endif\n#else\n#define SECUREC_USING_STD_SECURE_LIB    0\n#endif\n#endif\n\n\n/* Compatibility with older Secure C versions, shielding VC symbol redefinition warning */\n#if defined(_MSC_VER) && _MSC_VER >= 1400 && SECUREC_USING_STD_SECURE_LIB == 0\n#ifndef SECUREC_DISABLE_CRT_FUNC\n#define SECUREC_DISABLE_CRT_FUNC        1\n#endif\n#ifndef SECUREC_DISABLE_CRT_IMP\n#define SECUREC_DISABLE_CRT_IMP         1\n#endif\n#else /*  MSC VER */\n#ifndef SECUREC_DISABLE_CRT_FUNC\n#define SECUREC_DISABLE_CRT_FUNC        0\n#endif\n#ifndef SECUREC_DISABLE_CRT_IMP\n#define SECUREC_DISABLE_CRT_IMP         0\n#endif\n#endif\n\n#if SECUREC_DISABLE_CRT_FUNC\n#ifdef __STDC_WANT_SECURE_LIB__\n#undef __STDC_WANT_SECURE_LIB__\n#endif\n#define __STDC_WANT_SECURE_LIB__        0\n#endif\n\n#if SECUREC_DISABLE_CRT_IMP\n#ifdef _CRTIMP_ALTERNATIVE\n#undef _CRTIMP_ALTERNATIVE\n#endif\n#define _CRTIMP_ALTERNATIVE     /* comment microsoft *_s function */\n#endif\n\n/* Compile in kernel under macro control */\n#ifndef SECUREC_IN_KERNEL\n#ifdef __KERNEL__\n#define SECUREC_IN_KERNEL               1\n#else\n#define SECUREC_IN_KERNEL               0\n#endif\n#endif\n\n#if SECUREC_IN_KERNEL\n#ifndef SECUREC_ENABLE_SCANF_FILE\n#define SECUREC_ENABLE_SCANF_FILE       0\n#endif\n#ifndef SECUREC_ENABLE_WCHAR_FUNC\n#define SECUREC_ENABLE_WCHAR_FUNC       0\n#endif\n#else /* SECUREC_IN_KERNEL */\n#ifndef SECUREC_ENABLE_SCANF_FILE\n#define SECUREC_ENABLE_SCANF_FILE       1\n#endif\n#ifndef SECUREC_ENABLE_WCHAR_FUNC\n#define SECUREC_ENABLE_WCHAR_FUNC       1\n#endif\n#endif\n\n\n/* Default secure function declaration, default declarations for non-standard functions */\n#ifndef SECUREC_SNPRINTF_TRUNCATED\n#define SECUREC_SNPRINTF_TRUNCATED      1\n#endif\n\n#if SECUREC_USING_STD_SECURE_LIB\n#if defined(_MSC_VER) && _MSC_VER >= 1400\n/* Declare secure functions that are not available in the vs compiler */\n#ifndef SECUREC_ENABLE_MEMSET\n#define SECUREC_ENABLE_MEMSET           1\n#endif\n/* vs 2005 have vsnprintf_s function */\n#ifndef SECUREC_ENABLE_VSNPRINTF\n#define SECUREC_ENABLE_VSNPRINTF        0\n#endif\n#ifndef SECUREC_ENABLE_SNPRINTF\n/* vs 2005 have vsnprintf_s function Adapt the snprintf_s of the security function */\n#define snprintf_s _snprintf_s\n#define SECUREC_ENABLE_SNPRINTF         0\n#endif\n/* befor vs 2010 do not have v functions */\n#if _MSC_VER <= 1600 || defined(SECUREC_FOR_V_SCANFS)\n#ifndef SECUREC_ENABLE_VFSCANF\n#define SECUREC_ENABLE_VFSCANF          1\n#endif\n#ifndef SECUREC_ENABLE_VSCANF\n#define SECUREC_ENABLE_VSCANF           1\n#endif\n#ifndef SECUREC_ENABLE_VSSCANF\n#define SECUREC_ENABLE_VSSCANF          1\n#endif\n#endif\n\n#else /* _MSC_VER */\n#ifndef SECUREC_ENABLE_MEMSET\n#define SECUREC_ENABLE_MEMSET           0\n#endif\n#ifndef SECUREC_ENABLE_SNPRINTF\n#define SECUREC_ENABLE_SNPRINTF         0\n#endif\n#ifndef SECUREC_ENABLE_VSNPRINTF\n#define SECUREC_ENABLE_VSNPRINTF        0\n#endif\n#endif\n\n#ifndef SECUREC_ENABLE_MEMMOVE\n#define SECUREC_ENABLE_MEMMOVE          0\n#endif\n#ifndef SECUREC_ENABLE_MEMCPY\n#define SECUREC_ENABLE_MEMCPY           0\n#endif\n#ifndef SECUREC_ENABLE_STRCPY\n#define SECUREC_ENABLE_STRCPY           0\n#endif\n#ifndef SECUREC_ENABLE_STRNCPY\n#define SECUREC_ENABLE_STRNCPY          0\n#endif\n#ifndef SECUREC_ENABLE_STRCAT\n#define SECUREC_ENABLE_STRCAT           0\n#endif\n#ifndef SECUREC_ENABLE_STRNCAT\n#define SECUREC_ENABLE_STRNCAT          0\n#endif\n#ifndef SECUREC_ENABLE_SPRINTF\n#define SECUREC_ENABLE_SPRINTF          0\n#endif\n#ifndef SECUREC_ENABLE_VSPRINTF\n#define SECUREC_ENABLE_VSPRINTF          0\n#endif\n#ifndef SECUREC_ENABLE_SSCANF\n#define SECUREC_ENABLE_SSCANF           0\n#endif\n#ifndef SECUREC_ENABLE_VSSCANF\n#define SECUREC_ENABLE_VSSCANF          0\n#endif\n#ifndef SECUREC_ENABLE_SCANF\n#define SECUREC_ENABLE_SCANF            0\n#endif\n#ifndef SECUREC_ENABLE_VSCANF\n#define SECUREC_ENABLE_VSCANF           0\n#endif\n\n#ifndef SECUREC_ENABLE_FSCANF\n#define SECUREC_ENABLE_FSCANF           0\n#endif\n#ifndef SECUREC_ENABLE_VFSCANF\n#define SECUREC_ENABLE_VFSCANF          0\n#endif\n#ifndef SECUREC_ENABLE_STRTOK\n#define SECUREC_ENABLE_STRTOK           0\n#endif\n#ifndef SECUREC_ENABLE_GETS\n#define SECUREC_ENABLE_GETS             0\n#endif\n\n#else /* SECUREC_USE_STD_SECURE_LIB */\n\n#ifndef SECUREC_ENABLE_MEMSET\n#define SECUREC_ENABLE_MEMSET           1\n#endif\n#ifndef SECUREC_ENABLE_MEMMOVE\n#define SECUREC_ENABLE_MEMMOVE          1\n#endif\n#ifndef SECUREC_ENABLE_MEMCPY\n#define SECUREC_ENABLE_MEMCPY           1\n#endif\n#ifndef SECUREC_ENABLE_STRCPY\n#define SECUREC_ENABLE_STRCPY           1\n#endif\n#ifndef SECUREC_ENABLE_STRNCPY\n#define SECUREC_ENABLE_STRNCPY          1\n#endif\n#ifndef SECUREC_ENABLE_STRCAT\n#define SECUREC_ENABLE_STRCAT           1\n#endif\n#ifndef SECUREC_ENABLE_STRNCAT\n#define SECUREC_ENABLE_STRNCAT          1\n#endif\n#ifndef SECUREC_ENABLE_SPRINTF\n#define SECUREC_ENABLE_SPRINTF          1\n#endif\n#ifndef SECUREC_ENABLE_VSPRINTF\n#define SECUREC_ENABLE_VSPRINTF          1\n#endif\n#ifndef SECUREC_ENABLE_SNPRINTF\n#define SECUREC_ENABLE_SNPRINTF         1\n#endif\n#ifndef SECUREC_ENABLE_VSNPRINTF\n#define SECUREC_ENABLE_VSNPRINTF        1\n#endif\n#ifndef SECUREC_ENABLE_SSCANF\n#define SECUREC_ENABLE_SSCANF           1\n#endif\n#ifndef SECUREC_ENABLE_VSSCANF\n#define SECUREC_ENABLE_VSSCANF          1\n#endif\n#ifndef SECUREC_ENABLE_SCANF\n#if SECUREC_ENABLE_SCANF_FILE\n#define SECUREC_ENABLE_SCANF            1\n#else\n#define SECUREC_ENABLE_SCANF            0\n#endif\n#endif\n#ifndef SECUREC_ENABLE_VSCANF\n#if SECUREC_ENABLE_SCANF_FILE\n#define SECUREC_ENABLE_VSCANF           1\n#else\n#define SECUREC_ENABLE_VSCANF           0\n#endif\n#endif\n\n#ifndef SECUREC_ENABLE_FSCANF\n#if SECUREC_ENABLE_SCANF_FILE\n#define SECUREC_ENABLE_FSCANF           1\n#else\n#define SECUREC_ENABLE_FSCANF           0\n#endif\n#endif\n#ifndef SECUREC_ENABLE_VFSCANF\n#if SECUREC_ENABLE_SCANF_FILE\n#define SECUREC_ENABLE_VFSCANF          1\n#else\n#define SECUREC_ENABLE_VFSCANF          0\n#endif\n#endif\n\n#ifndef SECUREC_ENABLE_STRTOK\n#define SECUREC_ENABLE_STRTOK           1\n#endif\n#ifndef SECUREC_ENABLE_GETS\n#define SECUREC_ENABLE_GETS             1\n#endif\n#endif /* SECUREC_USE_STD_SECURE_LIB */\n\n#if SECUREC_ENABLE_SCANF_FILE == 0\n#if SECUREC_ENABLE_FSCANF\n#undef SECUREC_ENABLE_FSCANF\n#define SECUREC_ENABLE_FSCANF           0\n#endif\n#if SECUREC_ENABLE_VFSCANF\n#undef SECUREC_ENABLE_VFSCANF\n#define SECUREC_ENABLE_VFSCANF          0\n#endif\n#if SECUREC_ENABLE_SCANF\n#undef SECUREC_ENABLE_SCANF\n#define SECUREC_ENABLE_SCANF            0\n#endif\n#if SECUREC_ENABLE_FSCANF\n#undef SECUREC_ENABLE_FSCANF\n#define SECUREC_ENABLE_FSCANF           0\n#endif\n\n#endif\n\n#if SECUREC_IN_KERNEL\n#include <linux/kernel.h>\n#include <linux/module.h>\n#else\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n#endif\n\n/* If you need high performance, enable the SECUREC_WITH_PERFORMANCE_ADDONS macro, default is enable .\n * The macro is automatically closed on the windows platform and linux kernel\n */\n#ifndef SECUREC_WITH_PERFORMANCE_ADDONS\n#if SECUREC_IN_KERNEL\n#define SECUREC_WITH_PERFORMANCE_ADDONS 0\n#else\n#define SECUREC_WITH_PERFORMANCE_ADDONS 1\n#endif\n#endif\n\n/* if enable SECUREC_COMPATIBLE_WIN_FORMAT, the output format will be compatible to Windows. */\n#if (defined(_WIN32) || defined(_WIN64) || defined(_MSC_VER)) && !defined(SECUREC_COMPATIBLE_LINUX_FORMAT)\n#if !defined(SECUREC_COMPATIBLE_WIN_FORMAT)\n#define SECUREC_COMPATIBLE_WIN_FORMAT\n#endif\n#endif\n\n#if defined(SECUREC_COMPATIBLE_WIN_FORMAT)\n/* in windows platform, can't use optimized function for there is no __builtin_constant_p like function */\n/* If need optimized macro, can define this: define __builtin_constant_p(x) 0 */\n#ifdef SECUREC_WITH_PERFORMANCE_ADDONS\n#undef SECUREC_WITH_PERFORMANCE_ADDONS\n#define SECUREC_WITH_PERFORMANCE_ADDONS 0\n#endif\n#endif\n\n#if defined(__VXWORKS__) || defined(__vxworks) || defined(__VXWORKS) || defined(_VXWORKS_PLATFORM_)  || \\\n    defined(SECUREC_VXWORKS_VERSION_5_4)\n#if !defined(SECUREC_VXWORKS_PLATFORM)\n#define SECUREC_VXWORKS_PLATFORM\n#endif\n#endif\n\n/* if enable SECUREC_COMPATIBLE_LINUX_FORMAT, the output format will be compatible to Linux. */\n#if !(defined(SECUREC_COMPATIBLE_WIN_FORMAT) || defined(SECUREC_VXWORKS_PLATFORM))\n#if !defined(SECUREC_COMPATIBLE_LINUX_FORMAT)\n#define SECUREC_COMPATIBLE_LINUX_FORMAT\n#endif\n#endif\n\n#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT\n#include <stddef.h>\n#endif\n\n/* add  the -DSECUREC_SUPPORT_FORMAT_WARNING  compiler option to supoort  -Wformat.\n * default does not check the format is that the same data type in the actual code\n * in the product is different in the original data type definition of VxWorks and Linux.\n */\n#ifndef SECUREC_SUPPORT_FORMAT_WARNING\n#define SECUREC_SUPPORT_FORMAT_WARNING 0\n#endif\n\n/* SECUREC_PCLINT for tool do not recognize __attribute__  just for pclint */\n#if SECUREC_SUPPORT_FORMAT_WARNING && !defined(SECUREC_PCLINT)\n#define SECUREC_ATTRIBUTE(x, y)  __attribute__((format(printf, (x), (y))))\n#else\n#define SECUREC_ATTRIBUTE(x, y)\n#endif\n\n/* SECUREC_PCLINT for tool do not recognize __builtin_expect, just for pclint */\n#if defined(__GNUC__) && \\\n    ((__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 3))) && \\\n    !defined(SECUREC_PCLINT)\n/* This is a built-in function that can be used without a declaration, if you encounter an undeclared compilation alarm,\n * you can add -DSECUREC_NEED_BUILTIN_EXPECT_DECLARE to complier options\n */\n#if defined(SECUREC_NEED_BUILTIN_EXPECT_DECLARE)\nlong __builtin_expect(long exp, long c);\n#endif\n#define SECUREC_LIKELY(x) __builtin_expect(!!(x), 1)\n#define SECUREC_UNLIKELY(x) __builtin_expect(!!(x), 0)\n#else\n#define SECUREC_LIKELY(x) (x)\n#define SECUREC_UNLIKELY(x) (x)\n#endif\n\n/* define the max length of the string */\n#ifndef SECUREC_STRING_MAX_LEN\n#define SECUREC_STRING_MAX_LEN (0x7fffffffUL)\n#endif\n#define SECUREC_WCHAR_STRING_MAX_LEN (SECUREC_STRING_MAX_LEN / sizeof(wchar_t))\n\n/* add SECUREC_MEM_MAX_LEN for memcpy and memmove */\n#ifndef SECUREC_MEM_MAX_LEN\n#define SECUREC_MEM_MAX_LEN (0x7fffffffUL)\n#endif\n#define SECUREC_WCHAR_MEM_MAX_LEN (SECUREC_MEM_MAX_LEN / sizeof(wchar_t))\n\n#if SECUREC_STRING_MAX_LEN > 0x7fffffff\n#error \"max string is 2G\"\n#endif\n\n#if (defined(__GNUC__) && defined(__SIZEOF_POINTER__))\n#if (__SIZEOF_POINTER__ != 4) && (__SIZEOF_POINTER__ != 8)\n#error \"unsupported system\"\n#endif\n#endif\n\n#if defined(_WIN64) || defined(WIN64) || defined(__LP64__) || defined(_LP64)\n#define SECUREC_ON_64BITS\n#endif\n\n#if (!defined(SECUREC_ON_64BITS) && defined(__GNUC__) && defined(__SIZEOF_POINTER__))\n#if __SIZEOF_POINTER__ == 8\n#define SECUREC_ON_64BITS\n#endif\n#endif\n\n#if defined(__SVR4) || defined(__svr4__)\n#define SECUREC_ON_SOLARIS\n#endif\n\n#if (defined(__hpux) || defined(_AIX) || defined(SECUREC_ON_SOLARIS))\n#define SECUREC_ON_UNIX\n#endif\n\n/* codes should run under the macro SECUREC_COMPATIBLE_LINUX_FORMAT in unknow system on default,\n * and strtold. The function\n * strtold is referenced first at ISO9899:1999(C99), and some old compilers can\n * not support these functions. Here provides a macro to open these functions:\n * SECUREC_SUPPORT_STRTOLD  -- if defined, strtold will   be used\n */\n#ifndef SECUREC_SUPPORT_STRTOLD\n#define SECUREC_SUPPORT_STRTOLD 0\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT))\n#if defined(__USE_ISOC99)  || \\\n    (defined(_AIX) && defined(_ISOC99_SOURCE)) || \\\n    (defined(__hpux) && defined(__ia64)) || \\\n    (defined(SECUREC_ON_SOLARIS) && (!defined(_STRICT_STDC) && !defined(__XOPEN_OR_POSIX)) || \\\n    defined(_STDC_C99) || defined(__EXTENSIONS__))\n#undef  SECUREC_SUPPORT_STRTOLD\n#define SECUREC_SUPPORT_STRTOLD 1\n#endif\n#endif\n#if ((defined(SECUREC_WRLINUX_BELOW4) || defined(_WRLINUX_BELOW4_)))\n#undef  SECUREC_SUPPORT_STRTOLD\n#define SECUREC_SUPPORT_STRTOLD 0\n#endif\n#endif\n\n\n#if SECUREC_WITH_PERFORMANCE_ADDONS\n\n#ifndef SECUREC_TWO_MIN\n#define SECUREC_TWO_MIN(a, b) ((a) < (b) ? (a) : (b))\n#endif\n\n/* for strncpy_s performance optimization */\n#define SECUREC_STRNCPY_SM(dest, destMax, src, count) \\\n    (((void *)(dest) != NULL && (void *)(src) != NULL && (size_t)(destMax) > 0 && \\\n    (((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN) && \\\n    (SECUREC_TWO_MIN((size_t)(count), strlen(src)) + 1) <= (size_t)(destMax)) ? \\\n    (((size_t)(count) < strlen(src)) ? (memcpy((dest), (src), (count)), *((char *)(dest) + (count)) = '\\0', EOK) : \\\n    (memcpy((dest), (src), strlen(src) + 1), EOK)) : (strncpy_error((dest), (destMax), (src), (count))))\n\n#define SECUREC_STRCPY_SM(dest, destMax, src) \\\n    (((void *)(dest) != NULL && (void *)(src) != NULL && (size_t)(destMax) > 0 && \\\n    (((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN) && \\\n    (strlen(src) + 1) <= (size_t)(destMax)) ? (memcpy((dest), (src), strlen(src) + 1), EOK) : \\\n    (strcpy_error((dest), (destMax), (src))))\n\n/* for strcat_s performance optimization */\n#if defined(__GNUC__)\n#define SECUREC_STRCAT_SM(dest, destMax, src) ({ \\\n    int catRet = EOK; \\\n    if ((void *)(dest) != NULL && (void *)(src) != NULL && (size_t)(destMax) > 0 && \\\n        (((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN)) { \\\n        char *catTmpDst = (char *)(dest); \\\n        size_t catRestSize = (destMax); \\\n        while (catRestSize > 0 && *catTmpDst != '\\0') { \\\n            ++catTmpDst; \\\n            --catRestSize; \\\n        } \\\n        if (catRestSize == 0) { \\\n            catRet = EINVAL; \\\n        } else if ((strlen(src) + 1) <= catRestSize) { \\\n            memcpy(catTmpDst, (src), strlen(src) + 1); \\\n            catRet = EOK; \\\n        } else { \\\n            catRet = ERANGE; \\\n        } \\\n        if (catRet != EOK) { \\\n            catRet = strcat_s((dest), (destMax), (src)); \\\n        } \\\n    } else { \\\n        catRet = strcat_s((dest), (destMax), (src)); \\\n    } \\\n    catRet; \\\n})\n#else\n#define SECUREC_STRCAT_SM(dest, destMax, src) strcat_s((dest), (destMax), (src))\n#endif\n\n/* for strncat_s performance optimization */\n#if defined(__GNUC__)\n#define SECUREC_STRNCAT_SM(dest, destMax, src, count) ({ \\\n    int ncatRet = EOK; \\\n    if ((void *)(dest) != NULL && (void *)(src) != NULL && (size_t)(destMax) > 0 && \\\n        (((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN)  && \\\n        (((unsigned long long)(count) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN)) { \\\n        char *ncatTmpDest = (char *)(dest); \\\n        size_t ncatRestSize = (size_t)(destMax); \\\n        while (ncatRestSize > 0 && *ncatTmpDest != '\\0') { \\\n            ++ncatTmpDest; \\\n            --ncatRestSize; \\\n        } \\\n        if (ncatRestSize == 0) { \\\n            ncatRet = EINVAL; \\\n        } else if ((SECUREC_TWO_MIN((count), strlen(src)) + 1) <= ncatRestSize) { \\\n            if ((size_t)(count) < strlen(src)) { \\\n                memcpy(ncatTmpDest, (src), (count)); \\\n                *(ncatTmpDest + (count)) = '\\0'; \\\n            } else { \\\n                memcpy(ncatTmpDest, (src), strlen(src) + 1); \\\n            } \\\n        } else { \\\n            ncatRet = ERANGE; \\\n        } \\\n        if (ncatRet != EOK) { \\\n            ncatRet = strncat_s((dest), (destMax), (src), (count)); \\\n        } \\\n    } else { \\\n        ncatRet = strncat_s((dest), (destMax), (src), (count)); \\\n    } \\\n    ncatRet; \\\n})\n#else\n#define SECUREC_STRNCAT_SM(dest, destMax, src, count) strncat_s((dest), (destMax), (src), (count))\n#endif\n\n/* SECUREC_MEMCPY_SM do NOT check buffer overlap by default */\n#define  SECUREC_MEMCPY_SM(dest, destMax, src, count) \\\n    (!(((size_t)(destMax) == 0) || \\\n        (((unsigned long long)(destMax) & (unsigned long long)(-2)) > SECUREC_MEM_MAX_LEN) || \\\n        ((size_t)(count) > (size_t)(destMax)) || ((void *)(dest)) == NULL || ((void *)(src) == NULL))? \\\n        (memcpy((dest), (src), (count)), EOK) : \\\n        (memcpy_s((dest), (destMax), (src), (count))))\n\n#define  SECUREC_MEMSET_SM(dest, destMax, c, count) \\\n    (!(((size_t)(destMax) == 0) || \\\n        (((unsigned long long)(destMax) & (unsigned long long)(-2)) > SECUREC_MEM_MAX_LEN) || \\\n        ((void *)(dest) == NULL) || ((size_t)(count) > (size_t)(destMax))) ? \\\n        (memset((dest), (c), (count)), EOK) : \\\n        (memset_s((dest), (destMax), (c), (count))))\n\n#endif\n#endif /* __SECURECTYPE_H__A7BBB686_AADA_451B_B9F9_44DACDAE18A7 */\n\n"
  },
  {
    "path": "third_party/securec/src/CMakeLists.txt",
    "content": "if (CMAKE_SYSTEM_NAME MATCHES \"Windows\")\n    list(APPEND SECUREC_SRCS \"memset_s.c\")\nelse()\n    aux_source_directory(. SECUREC_SRCS)\nendif()\nadd_library(securec STATIC ${SECUREC_SRCS})\n"
  },
  {
    "path": "third_party/securec/src/fscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The fscanf_s function is equivalent to fscanf except that the c, s,\n *    and [ conversion specifiers apply to a pair of arguments (unless assignment suppression is indicated by a*)\n *    The fscanf function reads data from the current position of stream into\n *    the locations given by argument (if any). Each argument must be a pointer\n *    to a variable of a type that corresponds to a type specifier in format.\n *    format controls the interpretation of the input fields and has the same\n *    form and function as the format argument for scanf.\n *\n * <INPUT PARAMETERS>\n *    stream              Pointer to FILE structure.\n *    format              Format control string, see Format Specifications.\n *    ...                 Optional arguments.\n *\n * <OUTPUT PARAMETERS>\n *    ...                 The convered value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Each of these functions returns the number of fields successfully converted\n *    and assigned; the return value does not include fields that were read but\n *    not assigned. A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint fscanf_s(FILE *stream, const char *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vfscanf_s(stream, format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/fwscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The fwscanf_s function  is  the  wide-character  equivalent  of the fscanf_s function\n *    The fwscanf_s function reads data from the current position of stream into\n *    the locations given by argument (if any). Each argument must be a pointer\n *    to a variable of a type that corresponds to a type specifier in format.\n *    format controls the interpretation of the input fields and has the same\n *    form and function as the format argument for scanf.\n *\n * <INPUT PARAMETERS>\n *    stream                   Pointer to FILE structure.\n *    format                   Format control string, see Format Specifications.\n *    ...                      Optional arguments.\n *\n * <OUTPUT PARAMETERS>\n *    ...                      The converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Each of these functions returns the number of fields successfully converted\n *    and assigned; the return value does not include fields that were read but\n *    not assigned. A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint fwscanf_s(FILE *stream, const wchar_t *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vfwscanf_s(stream, format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/gets_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securecutil.h\"\n\nstatic void SecTrimCRLF(char *buffer, size_t len)\n{\n    int i;\n    /* No need to determine whether integer overflow exists */\n    for (i = (int)(len - 1); i >= 0 && (buffer[i] == '\\r' || buffer[i] == '\\n'); --i) {\n        buffer[i] = '\\0';\n    }\n    return;\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The gets_s function reads at most one less than the number of characters\n *    specified by destMax from the stream pointed to by stdin, into the array pointed to by buffer\n *    The line consists of all characters up to and including\n *    the first newline character ('\\n'). gets_s then replaces the newline\n *    character with a null character ('\\0') before returning the line.\n *    If the first character read is the end-of-file character, a null character\n *    is stored at the beginning of buffer and NULL is returned.\n *\n * <INPUT PARAMETERS>\n *    buffer                         Storage location for input string.\n *    numberOfElements       The size of the buffer.\n *\n * <OUTPUT PARAMETERS>\n *    buffer                         is updated\n *\n * <RETURN VALUE>\n *    buffer                         Successful operation\n *    NULL                           Improper parameter or read fail\n */\nchar *gets_s(char *buffer, size_t numberOfElements)\n{\n    size_t len;\n#ifdef SECUREC_COMPATIBLE_WIN_FORMAT\n    size_t bufferSize = ((numberOfElements == (size_t)-1) ? SECUREC_STRING_MAX_LEN : numberOfElements);\n#else\n    size_t bufferSize = numberOfElements;\n#endif\n\n    if (buffer == NULL || bufferSize == 0 || bufferSize > SECUREC_STRING_MAX_LEN) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"gets_s\");\n        return NULL;\n    }\n\n    if (fgets(buffer, (int)bufferSize, stdin) == NULL) {\n        return NULL;\n    }\n\n    len = strlen(buffer);\n    if (len > 0 && len < bufferSize) {\n        SecTrimCRLF(buffer, len);\n    }\n\n    return buffer;\n}\n\n"
  },
  {
    "path": "third_party/securec/src/input.inl",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef INPUT_INL_5D13A042_DC3F_4ED9_A8D1_882811274C27\n#define INPUT_INL_5D13A042_DC3F_4ED9_A8D1_882811274C27\n\n#if SECUREC_IN_KERNEL\n#include <linux/ctype.h>\n#ifndef EOF\n#define EOF  (-1)\n#endif\n#else\n#if !defined(SECUREC_SYSAPI4VXWORKS) && !defined(SECUREC_CTYPE_MACRO_ADAPT)\n#include <ctype.h>\n#ifdef SECUREC_FOR_WCHAR\n#include <wctype.h>             /* for iswspace */\n#endif\n#endif\n#endif\n\n#define SECUREC_NUM_WIDTH_SHORT                 0\n#define SECUREC_NUM_WIDTH_INT                   1\n#define SECUREC_NUM_WIDTH_LONG                  2\n#define SECUREC_NUM_WIDTH_LONG_LONG             3 /* also long double */\n\n#define SECUREC_BUF_EXT_MUL                     2\n#define SECUREC_BUFFERED_BLOK_SIZE              1024\n\n#if defined(SECUREC_VXWORKS_PLATFORM) && !defined(va_copy) && !defined(__va_copy)\n/* the name is the same as system macro. */\n#define __va_copy(d, s) do { \\\n    size_t size_of_d = (size_t)sizeof(d); \\\n    size_t size_of_s = (size_t)sizeof(s); \\\n    if (size_of_d != size_of_s) { \\\n        (void)memcpy((d), (s), sizeof(va_list)); \\\n    } else { \\\n        (void)memcpy(&(d), &(s), sizeof(va_list)); \\\n    } \\\n} SECUREC_WHILE_ZERO\n#endif\n\n\n#define SECUREC_MULTI_BYTE_MAX_LEN              6\n/* Record a flag for each bit */\n#define SECUREC_BRACKET_INDEX(x)                ((unsigned int)(x) >> 3)\n#define SECUREC_BRACKET_VALUE(x)                ((unsigned char)(1 << ((unsigned int)(x) & 7)))\n\n\n/* Compatibility macro name cannot be modifie */\n#ifndef UNALIGNED\n#if !(defined(_M_IA64)) && !(defined(_M_AMD64))\n#define UNALIGNED\n#else\n#define UNALIGNED __unaligned\n#endif\n#endif\n\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n/* Max 64bit value is 0xffffffffffffffff */\n#define SECUREC_MAX_64BITS_VALUE                18446744073709551615ULL\n#define SECUREC_MAX_64BITS_VALUE_DIV_TEN        1844674407370955161ULL\n#define SECUREC_MAX_64BITS_VALUE_CUT_LAST_DIGIT 18446744073709551610ULL\n#define SECUREC_MIN_64BITS_NEG_VALUE            9223372036854775808ULL\n#define SECUREC_MAX_64BITS_POS_VALUE            9223372036854775807ULL\n#define SECUREC_MIN_32BITS_NEG_VALUE            2147483648ULL\n#define SECUREC_MAX_32BITS_POS_VALUE            2147483647ULL\n#define SECUREC_MAX_32BITS_VALUE                4294967295ULL\n#define SECUREC_MAX_32BITS_VALUE_INC            4294967296ULL\n#define SECUREC_MAX_32BITS_VALUE_DIV_TEN        429496729ULL\n#define SECUREC_LONG_BIT_NUM                    ((unsigned int)(sizeof(long) << 3U))\n\n#define SECUREC_LONG_HEX_BEYOND_MAX(number)     (((number) >> (SECUREC_LONG_BIT_NUM - 4U)) > 0)\n#define SECUREC_LONG_OCTAL_BEYOND_MAX(number)   (((number) >> (SECUREC_LONG_BIT_NUM - 3U)) > 0)\n\n#define SECUREC_QWORD_HEX_BEYOND_MAX(number)    (((number) >> (64U - 4U)) > 0)\n#define SECUREC_QWORD_OCTAL_BEYOND_MAX(number)  (((number) >> (64U - 3U)) > 0)\n\n#define SECUREC_LP64_BIT_WIDTH                  64\n#define SECUREC_LP32_BIT_WIDTH                  32\n\n#endif\n\n#define SECUREC_CHAR(x)                         (x)\n#define SECUREC_BRACE                           '{'     /* [ to { */\n\n#ifdef SECUREC_FOR_WCHAR\n#define SECUREC_SCANF_BRACKET_CONDITION(comChr, ch, table, mask) ((comChr) == SECUREC_BRACE && \\\n    (table) != NULL && \\\n    (((table)[((unsigned int)(int)(ch) & SECUREC_CHAR_MASK) >> 3] ^ (mask)) & \\\n    (1 << ((unsigned int)(int)(ch) & 7))))\n#else\n#define SECUREC_SCANF_BRACKET_CONDITION(comChr, ch, table, mask) ((comChr) == SECUREC_BRACE && \\\n    (((table)[((unsigned char)(ch) & 0xff) >> 3] ^ (mask)) & (1 << ((unsigned char)(ch) & 7))))\n#endif\n#define SECUREC_SCANF_STRING_CONDITION(comChr, ch) ((comChr) == SECUREC_CHAR('s') && \\\n    (!((ch) >= SECUREC_CHAR('\\t') && (ch) <= SECUREC_CHAR('\\r')) && (ch) != SECUREC_CHAR(' ')))\n\n/* Do not use   |=   optimize this code, it will cause compiling warning */\n/* only supports  wide characters with a maximum length of two bytes */\n#define SECUREC_BRACKET_SET_BIT(table, ch) do { \\\n    unsigned int tableIndex = SECUREC_BRACKET_INDEX(((unsigned int)(int)(ch) & SECUREC_CHAR_MASK)); \\\n    unsigned int tableValue = SECUREC_BRACKET_VALUE(((unsigned int)(int)(ch) & SECUREC_CHAR_MASK)); \\\n    (table)[tableIndex] = (unsigned char)((table)[tableIndex] | tableValue); \\\n} SECUREC_WHILE_ZERO\n\n#ifdef SECUREC_FOR_WCHAR\n/* table size is 32 x 256 */\n#define SECUREC_BRACKET_TABLE_SIZE    8192\n#define SECUREC_EOF WEOF\n#define SECUREC_MB_LEN 16       /* max. # bytes in multibyte char  ,see MB_LEN_MAX */\n/* int to unsigned int clear  e571 */\n#define SECUREC_IS_DIGIT(chr)  (!((unsigned int)(int)(chr) & 0xff00) && isdigit(((unsigned int)(int)(chr) & 0x00ff)))\n#define SECUREC_IS_XDIGIT(chr) (!((unsigned int)(int)(chr) & 0xff00) && isxdigit(((unsigned int)(int)(chr) & 0x00ff)))\n#define SECUREC_IS_SPACE(chr)    iswspace((wint_t)(int)(chr))\n#else\n#define SECUREC_BRACKET_TABLE_SIZE    32\n#define SECUREC_EOF EOF\n#define SECUREC_IS_DIGIT(chr)    isdigit((unsigned char)(chr) & 0x00ff)\n#define SECUREC_IS_XDIGIT(chr)   isxdigit((unsigned char)(chr) & 0x00ff)\n#define SECUREC_IS_SPACE(chr)    isspace((unsigned char)(chr) & 0x00ff)\n#endif\n\n\nstatic SecInt SecSkipSpaceChar(SecFileStream *stream, int *counter);\nstatic SecInt SecGetChar(SecFileStream *stream, int *counter);\nstatic void SecUnGetChar(SecInt ch, SecFileStream *stream, int *counter);\n\ntypedef struct {\n#ifdef SECUREC_FOR_WCHAR\n    unsigned char *table; /* default NULL */\n#else\n    unsigned char table[SECUREC_BRACKET_TABLE_SIZE]; /* Array length is large enough in application scenarios */\n#endif\n    unsigned char mask; /* default 0 */\n} SecBracketTable;\n\n#ifdef SECUREC_FOR_WCHAR\n#define SECUREC_INIT_BRACKET_TABLE { NULL, 0 }\n#else\n#define SECUREC_INIT_BRACKET_TABLE { { 0 }, 0 }\n#endif\n\n#if SECUREC_ENABLE_SCANF_FLOAT\ntypedef struct {\n    size_t floatStrSize;           /* tialization must be length of buffer in charater */\n    size_t floatStrUsedLen;        /* store float string len */\n    SecChar buffer[SECUREC_FLOAT_BUFSIZE + 1];\n    SecChar *floatStr;            /* Initialization must point to buffer */\n    SecChar *allocatedFloatStr;   /* Initialization must be NULL  to store alloced point */\n} SecFloatSpec;\n#endif\n\ntypedef struct {\n    SecUnsignedInt64 number64;\n    unsigned long number;\n    int numberWidth;     /* 0 = SHORT, 1 = int, > 1  long or L_DOUBLE */\n    int isInt64Arg;      /* 1 for 64-bit integer, 0 otherwise */\n    int negative;        /* 0 is positive */\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    int beyondMax;       /* Non-zero means beyond */\n#endif\n    void *argPtr;        /* Variable parameter pointer */\n    size_t arrayWidth;   /* length of pointer Variable parameter, in charaters */\n    int width;           /* width number in format */\n    int widthSet;        /* 0 is not set width in format */\n    int comChr;          /* Lowercase format conversion characters */\n    int oriComChr;       /* store number conversion */\n    signed char isWChar; /* -1/0 not wchar, 1 for wchar */\n    char suppress;       /* 0 is not have %* in format */\n} SecScanSpec;\n\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n#define SECUREC_INIT_NUMBER_SPEC { 0, 0, 0, 0, 0, 0, NULL, 0, 0, 0, 0, 0, 0 }\n#else\n#define SECUREC_INIT_NUMBER_SPEC { 0, 0, 0, 0, 0, 0, NULL, 0, 0, 0, 0, 0 }\n#endif\n\n#ifdef SECUREC_FOR_WCHAR\n#define SECUREC_GETC fgetwc\n#define SECUREC_UN_GETC ungetwc\n#define SECUREC_CHAR_MASK 0xffff\n#else\n#define SECUREC_GETC fgetc\n#define SECUREC_UN_GETC ungetc\n#define SECUREC_CHAR_MASK 0xff\n#endif\n\n/*\n * Determine if it is a 64-bit pointer  function\n * return 0 is not ,1 is 64bit pointer\n */\nstatic int SecIs64BitPtr(size_t sizeOfVoidStar)\n{\n    /* point size is 4 or 8 , Under the 64 bit system, the value not 0 */\n    /* to clear e778 */\n    if ((sizeOfVoidStar & sizeof(SecInt64)) != 0) {\n        return 1;\n    }\n    return 0;\n}\n\n#if SECUREC_ENABLE_SCANF_FLOAT\n\n/*\n * Convert a floating point string to a floating point number\n */\nstatic void SecAssignFloat(const char *floatStr, int numberWidth, void *argPtr)\n{\n    char *endPtr = NULL;\n    double d;\n#if SECUREC_SUPPORT_STRTOLD\n    if (numberWidth == SECUREC_NUM_WIDTH_LONG_LONG) {\n        long double d2 = strtold(floatStr, &endPtr);\n        *(long double UNALIGNED *)(argPtr) = d2;\n        return;\n    }\n#endif\n    d = strtod(floatStr, &endPtr);\n    if (numberWidth > SECUREC_NUM_WIDTH_INT) {\n        *(double UNALIGNED *)(argPtr) = (double)d;\n    } else {\n        *(float UNALIGNED *)(argPtr) = (float)d;\n    }\n}\n\n#ifdef SECUREC_FOR_WCHAR\n/*\n * Convert a floating point wchar string to a floating point number\n * Success  ret 0\n */\nstatic int SecAssignFloatW(const SecFloatSpec *floatSpec, const  SecScanSpec *spec)\n{\n    /* convert float string */\n    size_t mbsLen;\n    size_t tempFloatStrLen = (size_t)(floatSpec->floatStrSize + 1) * sizeof(wchar_t);\n    char *tempFloatStr = (char *)SECUREC_MALLOC(tempFloatStrLen);\n\n    if (tempFloatStr == NULL) {\n        return -1;\n    }\n    tempFloatStr[0] = '\\0';\n    SECUREC_MASK_MSVC_CRT_WARNING\n    mbsLen = wcstombs(tempFloatStr, floatSpec->floatStr, tempFloatStrLen - 1);\n    SECUREC_END_MASK_MSVC_CRT_WARNING\n    if (mbsLen != (size_t)-1) {\n        tempFloatStr[mbsLen] = '\\0';\n        SecAssignFloat(tempFloatStr, spec->numberWidth, spec->argPtr);\n    } else {\n        SECUREC_FREE(tempFloatStr);\n        return -1;\n    }\n    SECUREC_FREE(tempFloatStr);\n    return 0;\n}\n#endif\n/*\n * Splice floating point string\n * return 0 OK\n */\nstatic int SecUpdateFloatString(SecChar ch, SecFloatSpec *floatSpec)\n{\n    floatSpec->floatStr[floatSpec->floatStrUsedLen++] = ch;    /* ch must be '0' - '9' */\n    if (floatSpec->floatStrUsedLen < floatSpec->floatStrSize) {\n        return 0;\n    }\n    if (floatSpec->allocatedFloatStr == NULL) {\n        /* add 1 to clear ZERO LENGTH ALLOCATIONS warning */\n        size_t oriBufSize = floatSpec->floatStrSize* (SECUREC_BUF_EXT_MUL * sizeof(SecChar)) + 1;\n        void *tmpPointer = (void *)SECUREC_MALLOC(oriBufSize);\n        if (tmpPointer == NULL) {\n            return -1;\n        }\n        if (memcpy_s(tmpPointer, oriBufSize, floatSpec->floatStr, floatSpec->floatStrSize * sizeof(SecChar)) != EOK) {\n            SECUREC_FREE(tmpPointer);   /* This is a dead code, just to meet the coding requirements */\n            return -1;\n        }\n        floatSpec->floatStr = (SecChar *) (tmpPointer);\n        floatSpec->allocatedFloatStr = (SecChar *) (tmpPointer); /* use to clear free on stack warning */\n        floatSpec->floatStrSize *= SECUREC_BUF_EXT_MUL; /* this is OK, oriBufSize plus 1 just clear warning */\n        return 0;\n    } else {\n        /* LSD 2014.3.6 fix, replace realloc to malloc to avoid heap injection */\n        size_t oriBufSize = floatSpec->floatStrSize * sizeof(SecChar);\n        size_t nextSize = (oriBufSize * SECUREC_BUF_EXT_MUL) + 1; /* add 1 to clear satic check tool warning */\n        /* Prevents integer overflow when calculating the wide character length.\n         * The maximum length of SECUREC_MAX_WIDTH_LEN is enough\n         */\n        if (nextSize <= SECUREC_MAX_WIDTH_LEN) {\n            void *tmpPointer = (void *)SECUREC_MALLOC(nextSize);\n            if (tmpPointer == NULL) {\n                return -1;\n            }\n            if (memcpy_s(tmpPointer, nextSize, floatSpec->floatStr, oriBufSize) != EOK) {\n                SECUREC_FREE(tmpPointer);   /* This is a dead code, just to meet the coding requirements */\n                return -1;\n            }\n            if (memset_s(floatSpec->floatStr, oriBufSize, 0, oriBufSize) != EOK) {\n                SECUREC_FREE(tmpPointer);   /* This is a dead code, just to meet the coding requirements */\n                return -1;\n            }\n            SECUREC_FREE(floatSpec->floatStr);\n\n            floatSpec->floatStr = (SecChar *) (tmpPointer);\n            floatSpec->allocatedFloatStr = (SecChar *) (tmpPointer);    /* use to clear free on stack warning */\n            floatSpec->floatStrSize *= SECUREC_BUF_EXT_MUL; /* this is OK, oriBufSize plus 1 just clear warning */\n            return 0;\n        }\n    }\n    return -1;\n}\n#endif\n\n#ifndef SECUREC_FOR_WCHAR\n/* LSD only multi-bytes string need isleadbyte() function */\nstatic int SecIsLeadByte(SecInt ch)\n{\n    unsigned int c = (unsigned int)ch;\n#if !(defined(_MSC_VER) || defined(_INC_WCTYPE))\n    return (int)(c & 0x80);\n#else\n    return (int)isleadbyte((int)(c & 0xff));\n#endif\n}\n#endif\n\n/*\n * Parsing whether it is a wide character\n */\nstatic void SecUpdateWcharFlagByType(SecUnsignedChar ch, SecScanSpec *spec)\n{\n#if defined(SECUREC_FOR_WCHAR) && (defined(SECUREC_COMPATIBLE_WIN_FORMAT))\n    signed char flagForUpperType = -1;\n    signed char flagForLowerType = 1;\n#else\n    signed char flagForUpperType = 1;\n    signed char flagForLowerType = -1;\n#endif\n    /* if no  l or h flag  */\n    if (spec->isWChar == 0) {\n        if ((ch == SECUREC_CHAR('C')) || (ch == SECUREC_CHAR('S'))) {\n            spec->isWChar = flagForUpperType;\n        } else {\n            spec->isWChar = flagForLowerType;\n        }\n    }\n    return;\n}\n/*\n * decode  %l %ll\n */\nstatic void SecDecodeScanQualifierL(const SecUnsignedChar **format, SecScanSpec *spec)\n{\n    const SecUnsignedChar *fmt = *format;\n    if (*(fmt + 1) == SECUREC_CHAR('l')) {\n        spec->isInt64Arg = 1;\n        spec->numberWidth = SECUREC_NUM_WIDTH_LONG_LONG;\n        ++fmt;\n    } else {\n        spec->numberWidth = SECUREC_NUM_WIDTH_LONG;\n#if defined(SECUREC_ON_64BITS) && !(defined(SECUREC_COMPATIBLE_WIN_FORMAT))\n        /* on window 64 system sizeof long is 32bit */\n        spec->isInt64Arg = 1;\n#endif\n        spec->isWChar = 1;\n    }\n    *format = fmt;\n}\n\n/*\n * decode  %I %I43 %I64 %Id %Ii %Io ...\n * set finishFlag to  1  finish Flag\n */\nstatic void SecDecodeScanQualifierI(const SecUnsignedChar **format, SecScanSpec *spec, int *finishFlag)\n{\n    const SecUnsignedChar *fmt = *format;\n    if ((*(fmt + 1) == SECUREC_CHAR('6')) &&\n        (*(fmt + 2) == SECUREC_CHAR('4'))) { /* offset 2 for I64 */\n        spec->isInt64Arg = 1;\n        *format = *format + 2; /* add 2 to skip I64 point to '4' next loop will inc */\n    } else if ((*(fmt + 1) == SECUREC_CHAR('3')) &&\n                (*(fmt + 2) == SECUREC_CHAR('2'))) { /* offset 2 for I32 */\n        *format = *format + 2; /* add 2 to skip I32 point to '2' next loop will inc */\n    } else if ((*(fmt + 1) == SECUREC_CHAR('d')) ||\n                (*(fmt + 1) == SECUREC_CHAR('i')) ||\n                (*(fmt + 1) == SECUREC_CHAR('o')) ||\n                (*(fmt + 1) == SECUREC_CHAR('x')) ||\n                (*(fmt + 1) == SECUREC_CHAR('X'))) {\n        spec->isInt64Arg = SecIs64BitPtr(sizeof(void *));\n    } else {\n        /* for %I */\n        spec->isInt64Arg = SecIs64BitPtr(sizeof(void *));\n        *finishFlag = 1;\n    }\n}\n\nstatic int SecDecodeScanWidth(const SecUnsignedChar **format, SecScanSpec *spec)\n{\n    const SecUnsignedChar *fmt = *format;\n    while (SECUREC_IS_DIGIT(*fmt)) {\n        spec->widthSet = 1;\n        if (SECUREC_MUL_TEN_ADD_BEYOND_MAX(spec->width)) {\n            return -1;\n        }\n        spec->width = (int)SECUREC_MUL_TEN((unsigned int)spec->width) + (unsigned char)(*fmt - SECUREC_CHAR('0'));\n        ++fmt;\n    }\n    *format = fmt;\n    return 0;\n}\n\n/*\n * init default flags for each format\n */\nstatic void SecSetDefaultScanSpec(SecScanSpec *spec)\n{\n    spec->number64 = 0;\n    spec->number = 0;\n    spec->numberWidth = SECUREC_NUM_WIDTH_INT;    /* 0 = SHORT, 1 = int, > 1  long or L_DOUBLE */\n    spec->isInt64Arg = 0;                         /* 1 for 64-bit integer, 0 otherwise */\n    spec->negative = 0;\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    spec->beyondMax = 0;\n#endif\n    spec->argPtr = NULL;\n    spec->arrayWidth = 0;\n    spec->width = 0;\n    spec->widthSet = 0;\n    spec->comChr = 0;\n    spec->isWChar = 0;\n    spec->suppress = 0;\n}\n\n/*\n * decode qualifier %I %L %h ...\n * set finishFlag to  1  finish Flag\n */\nstatic void  SecDecodeScanQualifier(const SecUnsignedChar **format, SecScanSpec *spec, int *finishFlag)\n{\n    switch ((int)(unsigned char)(**(format))) {\n        case SECUREC_CHAR('F'):    /* fall-through */ /* FALLTHRU */\n        case SECUREC_CHAR('N'):\n            break;\n        case SECUREC_CHAR('h'):\n            --spec->numberWidth;  /* h for SHORT , hh for CHAR */\n            spec->isWChar = -1;\n            break;\n#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT\n        case SECUREC_CHAR('j'):\n            spec->numberWidth = SECUREC_NUM_WIDTH_LONG_LONG;  /* intmax_t or uintmax_t */\n            spec->isInt64Arg = 1;\n            break;\n        case SECUREC_CHAR('t'):    /* fall-through */ /* FALLTHRU */\n#endif\n        case SECUREC_CHAR('z'):\n#ifdef SECUREC_ON_64BITS\n            spec->numberWidth = SECUREC_NUM_WIDTH_LONG_LONG;\n            spec->isInt64Arg = 1;\n#else\n            spec->numberWidth = SECUREC_NUM_WIDTH_LONG;\n#endif\n            break;\n        case SECUREC_CHAR('L'):    /* long double */ /* fall-through */ /* FALLTHRU */\n        case SECUREC_CHAR('q'):\n            spec->numberWidth = SECUREC_NUM_WIDTH_LONG_LONG;\n            spec->isInt64Arg = 1;\n            break;\n        case SECUREC_CHAR('l'):\n            SecDecodeScanQualifierL(format, spec);\n            break;\n        case SECUREC_CHAR('w'):\n            spec->isWChar = 1;\n            break;\n        case SECUREC_CHAR('*'):\n            spec->suppress = 1;\n            break;\n        case SECUREC_CHAR('I'):\n            SecDecodeScanQualifierI(format, spec, finishFlag);\n            break;\n        default:\n            *finishFlag = 1;\n            break;\n    }\n\n}\n/*\n * decode width and qualifier in format\n */\nstatic int SecDecodeScanFlag(const SecUnsignedChar **format, SecScanSpec *spec)\n{\n    const SecUnsignedChar *fmt = *format;\n    int finishFlag = 0;\n\n    do {\n        ++fmt; /*  first skip % , next  seek fmt */\n        /* may %*6d , so put it inside the loop */\n        if (SecDecodeScanWidth(&fmt, spec) != 0) {\n            return -1;\n        }\n        SecDecodeScanQualifier(&fmt, spec, &finishFlag);\n    } while (finishFlag == 0);\n    *format = fmt;\n    return 0;\n}\n\n\n\n\n\n/*\n * Judging whether a zeroing buffer is needed according to different formats\n */\nstatic int SecDecodeClearFormat(const SecUnsignedChar *format, int *comChr)\n{\n    const SecUnsignedChar *fmt = format;\n    /* to lowercase */\n    int ch = (unsigned char)(*fmt) | (SECUREC_CHAR('a') - SECUREC_CHAR('A'));\n    if (!(ch == SECUREC_CHAR('c') || ch == SECUREC_CHAR('s') || ch == SECUREC_BRACE)) {\n        return -1;     /* first argument is not a string type */\n    }\n    if (ch == SECUREC_BRACE) {\n#if !(defined(SECUREC_COMPATIBLE_WIN_FORMAT))\n        if (*fmt == SECUREC_CHAR('{')) {\n            return -1;\n        }\n#endif\n        ++fmt;\n        if (*fmt == SECUREC_CHAR('^')) {\n            ++fmt;\n        }\n        if (*fmt == SECUREC_CHAR(']')) {\n            ++fmt;\n        }\n        while ((*fmt != SECUREC_CHAR('\\0')) && (*fmt != SECUREC_CHAR(']'))) {\n            ++fmt;\n        }\n        if (*fmt == SECUREC_CHAR('\\0')) {\n            return -1; /* trunc'd format string */\n        }\n    }\n    *comChr = ch;\n    return 0;\n}\n\n/*\n * add L'\\0' for wchar string , add '\\0' for char string\n */\nstatic void SecAddEndingZero(void *ptr, const SecScanSpec *spec)\n{\n    *(char *)ptr = '\\0';\n    (void)spec; /* clear not use */\n#if SECUREC_HAVE_WCHART\n    if (spec->isWChar > 0) {\n        *(wchar_t UNALIGNED *)ptr = L'\\0';\n    }\n#endif\n}\n\n#ifdef SECUREC_FOR_WCHAR\n/*\n *  Clean up the first %s %c buffer to zero for wchar version\n */\nvoid SecClearDestBufW(const wchar_t *buffer, const wchar_t *format, va_list argList)\n#else\n/*\n *  Clean up the first %s %c buffer to zero for char version\n */\nvoid SecClearDestBuf(const char *buffer, const char *format, va_list argList)\n#endif\n{\n\n    va_list argListSave;        /* backup for argList value, this variable don't need initialized */\n    SecScanSpec spec;\n    int comChr = 0;\n    const SecUnsignedChar *fmt = (const SecUnsignedChar *)format;\n    if (fmt == NULL) {\n        return;\n    }\n\n    /* find first % */\n    while (*fmt != SECUREC_CHAR('\\0') && *fmt != SECUREC_CHAR('%')) {\n        ++fmt;\n    }\n    if (*fmt == SECUREC_CHAR('\\0')) {\n        return;\n    }\n\n    SecSetDefaultScanSpec(&spec);\n    if (SecDecodeScanFlag(&fmt, &spec) != 0) {\n        return;\n    }\n\n    /* update wchar flag for %S %C */\n    SecUpdateWcharFlagByType(*fmt, &spec);\n\n    if (spec.suppress != 0 || SecDecodeClearFormat(fmt, &comChr) != 0) {\n        return;\n    }\n\n    if ((buffer != NULL) && (*buffer != SECUREC_CHAR('\\0')) && (comChr != SECUREC_CHAR('s'))) {\n        /* when buffer not empty just clear %s.\n         * example call sscanf by  argment of (\" \\n\", \"%s\", s, sizeof(s))\n         */\n        return;\n    }\n    (void)memset(&argListSave, 0, sizeof(va_list)); /* to clear e530 argListSave not initialized */\n#if defined(va_copy)\n    va_copy(argListSave, argList);\n#elif defined(__va_copy)        /* for vxworks */\n    __va_copy(argListSave, argList);\n#else\n    argListSave = argList;\n#endif\n    do {\n        void *argPtr = (void *)va_arg(argListSave, void *);\n        /* Get the next argument - size of the array in characters */\n        size_t arrayWidth = ((size_t)(va_arg(argListSave, size_t))) & 0xFFFFFFFFUL;\n        va_end(argListSave);\n        /* to clear e438 last value assigned not used , the compiler will optimize this code */\n        (void)argListSave;\n        /* There is no need to judge the upper limit */\n        if (arrayWidth == 0 || argPtr == NULL) {\n            return;\n        }\n\n        /* clear one char */\n        SecAddEndingZero(argPtr, &spec);\n    } SECUREC_WHILE_ZERO;\n    return;\n\n}\n\n/*\n *  Assign number  to output buffer\n */\nstatic void SecAssignNumber(const SecScanSpec *spec)\n{\n    void *argPtr = spec->argPtr;\n    if (spec->isInt64Arg != 0) {\n#if defined(SECUREC_VXWORKS_PLATFORM)\n#if defined(SECUREC_VXWORKS_PLATFORM_COMP)\n        *(SecInt64 UNALIGNED *)argPtr = (SecInt64)(spec->number64);\n#else\n         /* take number64 as unsigned number unsigned to int clear Compile warning */\n        *(SecInt64 UNALIGNED *)argPtr = *(SecUnsignedInt64 *)(&(spec->number64));\n#endif\n#else\n        /* take number64 as unsigned number */\n        *(SecInt64 UNALIGNED *)argPtr = (SecInt64)(spec->number64);\n#endif\n        return;\n    }\n    if (spec->numberWidth > SECUREC_NUM_WIDTH_INT) {\n        /* take number as unsigned number */\n        *(long UNALIGNED *)argPtr = (long)(spec->number);\n    } else if (spec->numberWidth == SECUREC_NUM_WIDTH_INT) {\n        *(int UNALIGNED *)argPtr = (int)(spec->number);\n    } else if (spec->numberWidth == SECUREC_NUM_WIDTH_SHORT) {\n        /* take number as unsigned number */\n        *(short UNALIGNED *)argPtr = (short)(spec->number);\n    } else {  /* < 0 for hh format modifier */\n        /* take number as unsigned number */\n        *(char UNALIGNED *)argPtr = (char)(spec->number);\n    }\n}\n\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n/*\n *  Judge the long bit width\n */\nstatic int SecIsLongBitEqual(int bitNum)\n{\n    return (unsigned int)bitNum == SECUREC_LONG_BIT_NUM;\n}\n#endif\n/*\n * Convert hexadecimal characters to decimal value\n */\nstatic int SecHexValueOfChar(SecInt ch)\n{\n    /* use isdigt Causing tool false alarms */\n    return (int)((ch >= '0' && ch <= '9') ? ((unsigned char)ch - '0') :\n            ((((unsigned char)ch | (unsigned char)('a' - 'A')) - ('a')) + 10)); /* Adding 10 is to hex value */\n}\n\n\n\n/*\n * Parse decimal character to integer for 32bit .\n */\nstatic void SecDecodeNumberDecimal(SecInt ch, SecScanSpec *spec)\n{\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    unsigned long decimalEdge = SECUREC_MAX_32BITS_VALUE_DIV_TEN;\n#ifdef SECUREC_ON_64BITS\n    if (SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH)) {\n        decimalEdge = (unsigned long)SECUREC_MAX_64BITS_VALUE_DIV_TEN;\n    }\n#else\n    if (SecIsLongBitEqual(SECUREC_LP32_BIT_WIDTH)) {\n        decimalEdge = SECUREC_MAX_32BITS_VALUE_DIV_TEN;\n    }\n#endif\n    if (spec->number > decimalEdge) {\n        spec->beyondMax = 1;\n    }\n#endif\n    spec->number = SECUREC_MUL_TEN(spec->number);\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    if (spec->number == SECUREC_MUL_TEN(decimalEdge)) {\n        SecUnsignedInt64 number64As = (unsigned long)SECUREC_MAX_64BITS_VALUE - spec->number;\n        if (number64As < (SecUnsignedInt64)((SecUnsignedInt)ch - SECUREC_CHAR('0'))) {\n            spec->beyondMax = 1;\n        }\n    }\n#endif\n    spec->number += (unsigned long)((SecUnsignedInt)ch - SECUREC_CHAR('0'));\n\n}\n\n\n/*\n * Parse Hex character to integer for 32bit .\n */\nstatic void SecDecodeNumberHex(SecInt ch, SecScanSpec *spec)\n{\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    if (SECUREC_LONG_HEX_BEYOND_MAX(spec->number)) {\n        spec->beyondMax = 1;\n    }\n#endif\n    spec->number = SECUREC_MUL_SIXTEEN(spec->number);\n    spec->number += (unsigned long)(unsigned int)SecHexValueOfChar(ch);\n}\n\n\n/*\n * Parse Octal character to integer for 32bit .\n */\nstatic void SecDecodeNumberOctal(SecInt ch, SecScanSpec *spec)\n{\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    if (SECUREC_LONG_OCTAL_BEYOND_MAX(spec->number)) {\n        spec->beyondMax = 1;\n    }\n#endif\n    spec->number = SECUREC_MUL_EIGHT(spec->number);\n    spec->number += (unsigned long)((SecUnsignedInt)ch - SECUREC_CHAR('0'));\n}\n\n\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n/* Compatible with integer negative values other than int */\nstatic void SecFinishNumberNegativeOther(int comChr, int numberWidth, SecScanSpec *spec)\n{\n    if ((comChr == SECUREC_CHAR('d')) || (comChr == SECUREC_CHAR('i'))) {\n        if (spec->number > (unsigned long)(1ULL << (SECUREC_LONG_BIT_NUM - 1))) {\n            spec->number = (unsigned long)(1ULL << (SECUREC_LONG_BIT_NUM - 1));\n        } else {\n            spec->number = (unsigned long)(-(long)spec->number);\n        }\n        if (spec->beyondMax != 0) {\n            if (numberWidth < SECUREC_NUM_WIDTH_INT) {\n                spec->number = 0;\n            } else if (numberWidth == SECUREC_NUM_WIDTH_LONG) {\n                spec->number = ((unsigned long)(1UL << (SECUREC_LONG_BIT_NUM - 1)));\n            }\n        }\n    } else { /* o, u, x, X, p */\n        spec->number = (unsigned long)(-(long)spec->number);\n        if (spec->beyondMax != 0) {\n            spec->number |= (unsigned long)SECUREC_MAX_64BITS_VALUE;\n        }\n    }\n}\n/* Compatible processing of integer negative numbers */\nstatic void SecFinishNumberNegativeInt(int comChr, SecScanSpec *spec)\n{\n    if ((comChr == SECUREC_CHAR('d')) || (comChr == SECUREC_CHAR('i'))) {\n#ifdef SECUREC_ON_64BITS\n        if (SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH)) {\n            if ((spec->number > SECUREC_MIN_64BITS_NEG_VALUE)) {\n                spec->number = 0;\n            } else {\n                spec->number = (unsigned int)(-(int)spec->number);\n            }\n        }\n#else\n        if (SecIsLongBitEqual(SECUREC_LP32_BIT_WIDTH)) {\n            if ((spec->number > SECUREC_MIN_32BITS_NEG_VALUE)) {\n                spec->number = SECUREC_MIN_32BITS_NEG_VALUE;\n            } else {\n                spec->number = (unsigned int)(-(int)spec->number);\n            }\n        }\n#endif\n        if (spec->beyondMax != 0) {\n#ifdef SECUREC_ON_64BITS\n            if (SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH)) {\n                spec->number = 0;\n            }\n#else\n            if (SecIsLongBitEqual(SECUREC_LP32_BIT_WIDTH)) {\n                spec->number = SECUREC_MIN_32BITS_NEG_VALUE;\n            }\n#endif\n        }\n    } else {            /* o, u, x, X ,p */\n#ifdef SECUREC_ON_64BITS\n        if (spec->number > SECUREC_MAX_32BITS_VALUE_INC) {\n            spec->number = SECUREC_MAX_32BITS_VALUE;\n        } else {\n            spec->number = (unsigned int)(-(int)spec->number);\n        }\n#else\n        spec->number = (unsigned int)(-(int)spec->number);\n#endif\n        if (spec->beyondMax != 0) {\n            spec->number |= (unsigned long)SECUREC_MAX_64BITS_VALUE;\n        }\n    }\n}\n\n/* Compatible with integer positive values other than int */\nstatic void SecFinishNumberPositiveOther(int comChr, int numberWidth, SecScanSpec *spec)\n{\n    if (comChr == SECUREC_CHAR('d') || comChr == SECUREC_CHAR('i')) {\n        if (spec->number > ((unsigned long)(1UL << (SECUREC_LONG_BIT_NUM - 1)) - 1)) {\n            spec->number = ((unsigned long)(1UL << (SECUREC_LONG_BIT_NUM - 1)) - 1);\n        }\n        if ((spec->beyondMax != 0 && numberWidth < SECUREC_NUM_WIDTH_INT)) {\n            spec->number |= (unsigned long)SECUREC_MAX_64BITS_VALUE;\n        }\n        if (spec->beyondMax != 0 && numberWidth == SECUREC_NUM_WIDTH_LONG) {\n            spec->number = ((unsigned long)(1UL << (SECUREC_LONG_BIT_NUM - 1)) - 1);\n        }\n    } else {\n        if (spec->beyondMax != 0) {\n            spec->number |= (unsigned long)SECUREC_MAX_64BITS_VALUE;\n        }\n    }\n}\n\n/* Compatible processing of integer positive numbers */\nstatic void SecFinishNumberPositiveInt(int comChr, SecScanSpec *spec)\n{\n    if ((comChr == SECUREC_CHAR('d')) || (comChr == SECUREC_CHAR('i'))) {\n#ifdef SECUREC_ON_64BITS\n        if (SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH)) {\n            if (spec->number > SECUREC_MAX_64BITS_POS_VALUE) {\n                spec->number |= (unsigned long)SECUREC_MAX_64BITS_VALUE;\n            }\n        }\n        if (spec->beyondMax != 0 && SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH)) {\n            spec->number |= (unsigned long)SECUREC_MAX_64BITS_VALUE;\n        }\n#else\n        if (SecIsLongBitEqual(SECUREC_LP32_BIT_WIDTH)) {\n            if (spec->number > SECUREC_MAX_32BITS_POS_VALUE) {\n                spec->number = SECUREC_MAX_32BITS_POS_VALUE;\n            }\n        }\n        if (spec->beyondMax != 0 && SecIsLongBitEqual(SECUREC_LP32_BIT_WIDTH)) {\n            spec->number = SECUREC_MAX_32BITS_POS_VALUE;\n        }\n#endif\n    } else {            /* o,u,x,X,p */\n        if (spec->beyondMax != 0) {\n            spec->number = SECUREC_MAX_32BITS_VALUE;\n        }\n    }\n}\n\n#endif\n\n\n/*\n * Parse decimal character to integer for 64bit .\n */\nstatic void SecDecodeNumber64Decimal(SecInt ch, SecScanSpec *spec)\n{\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    if (spec->number64 > SECUREC_MAX_64BITS_VALUE_DIV_TEN) {\n        spec->beyondMax = 1;\n    }\n#endif\n    spec->number64 = SECUREC_MUL_TEN(spec->number64);\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    if (spec->number64 == SECUREC_MAX_64BITS_VALUE_CUT_LAST_DIGIT) {\n        SecUnsignedInt64 number64As = (SecUnsignedInt64)SECUREC_MAX_64BITS_VALUE - spec->number64;\n        if (number64As < (SecUnsignedInt64)((SecUnsignedInt)ch - SECUREC_CHAR('0'))) {\n            spec->beyondMax = 1;\n        }\n    }\n#endif\n    spec->number64 += (SecUnsignedInt64)((SecUnsignedInt)ch - SECUREC_CHAR('0'));\n}\n\n/*\n * Parse Hex character to integer for 64bit .\n */\nstatic void SecDecodeNumber64Hex(SecInt ch, SecScanSpec *spec)\n{\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    if (SECUREC_QWORD_HEX_BEYOND_MAX(spec->number64)) {\n        spec->beyondMax = 1;\n    }\n#endif\n    spec->number64 = SECUREC_MUL_SIXTEEN(spec->number64);\n    spec->number64 += (SecUnsignedInt64)(unsigned int)SecHexValueOfChar(ch);\n\n}\n\n/*\n * Parse Octal character to integer for 64bit .\n */\nstatic void SecDecodeNumber64Octal(SecInt ch, SecScanSpec *spec)\n{\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    if (SECUREC_QWORD_OCTAL_BEYOND_MAX(spec->number64)) {\n        spec->beyondMax = 1;\n    }\n#endif\n    spec->number64 = SECUREC_MUL_EIGHT(spec->number64);\n    spec->number64 += (SecUnsignedInt64)((SecUnsignedInt)ch - SECUREC_CHAR('0'));\n}\n\n#define SECUREC_DECODE_NUMBER_FUNC_NUM 2\n/* Function name cannot add address symbol, causing 546 alarm */\nstatic void (*g_secDecodeNumberHex[SECUREC_DECODE_NUMBER_FUNC_NUM])(SecInt ch, SecScanSpec *spec) = \\\n    { SecDecodeNumberHex, SecDecodeNumber64Hex };\nstatic void (*g_secDecodeNumberOctal[SECUREC_DECODE_NUMBER_FUNC_NUM])(SecInt ch, SecScanSpec *spec) = \\\n    { SecDecodeNumberOctal, SecDecodeNumber64Octal };\nstatic void (*g_secDecodeNumberDecimal[SECUREC_DECODE_NUMBER_FUNC_NUM])(SecInt ch, SecScanSpec *spec) = \\\n    { SecDecodeNumberDecimal, SecDecodeNumber64Decimal };\n\n/*\n * Parse 64-bit integer formatted input, return 0 when ch is a number.\n */\nstatic int SecDecodeNumber(SecInt ch, SecScanSpec *spec)\n{\n    if (spec->comChr == SECUREC_CHAR('x') || spec->comChr == SECUREC_CHAR('p')) {\n        if (SECUREC_IS_XDIGIT(ch)) {\n            (*g_secDecodeNumberHex[spec->isInt64Arg])(ch, spec);\n        } else {\n            return -1;\n        }\n        return 0;\n    }\n    if (!(SECUREC_IS_DIGIT(ch))) {\n        return -1;\n    }\n    if (spec->comChr == SECUREC_CHAR('o')) {\n        if (ch < SECUREC_CHAR('8')) {\n            (*g_secDecodeNumberOctal[spec->isInt64Arg])(ch, spec);\n        } else {\n            return -1;\n        }\n    } else { /* comChr is 'd' */\n        (*g_secDecodeNumberDecimal[spec->isInt64Arg])(ch, spec);\n    }\n    return 0;\n}\n\n\n/*\n * Complete the final 32-bit integer formatted input\n */\nstatic void SecFinishNumber(SecScanSpec *spec)\n{\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    if (spec->negative != 0) {\n        if (spec->numberWidth == SECUREC_NUM_WIDTH_INT) {\n            SecFinishNumberNegativeInt(spec->oriComChr, spec);\n        } else {\n            SecFinishNumberNegativeOther(spec->oriComChr, spec->numberWidth, spec);\n        }\n    } else {\n        if (spec->numberWidth == SECUREC_NUM_WIDTH_INT) {\n            SecFinishNumberPositiveInt(spec->oriComChr, spec);\n        } else {\n            SecFinishNumberPositiveOther(spec->oriComChr, spec->numberWidth, spec);\n        }\n    }\n#else\n    if (spec->negative != 0) {\n#if defined(__hpux)\n        if (spec->oriComChr != SECUREC_CHAR('p')) {\n            spec->number = (unsigned long)(-(long)spec->number);\n        }\n#else\n        spec->number = (unsigned long)(-(long)spec->number);\n#endif\n    }\n#endif\n    return;\n}\n\n/*\n * Complete the final 64-bit integer formatted input\n */\nstatic void SecFinishNumber64(SecScanSpec *spec)\n{\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX)))\n    if (spec->negative != 0) {\n        if (spec->oriComChr == (SECUREC_CHAR('d')) || (spec->oriComChr == SECUREC_CHAR('i'))) {\n            if (spec->number64 > SECUREC_MIN_64BITS_NEG_VALUE) {\n                spec->number64 = SECUREC_MIN_64BITS_NEG_VALUE;\n            } else {\n                spec->number64 = (SecUnsignedInt64)(-(SecInt64)spec->number64);\n            }\n            if (spec->beyondMax != 0) {\n                spec->number64 = SECUREC_MIN_64BITS_NEG_VALUE;\n            }\n        } else {                /* o, u, x, X, p */\n            spec->number64 = (SecUnsignedInt64)(-(SecInt64)spec->number64);\n            if (spec->beyondMax != 0) {\n                spec->number64 = SECUREC_MAX_64BITS_VALUE;\n            }\n        }\n    } else {\n        if ((spec->oriComChr == SECUREC_CHAR('d')) || (spec->oriComChr == SECUREC_CHAR('i'))) {\n            if (spec->number64 > SECUREC_MAX_64BITS_POS_VALUE) {\n                spec->number64 = SECUREC_MAX_64BITS_POS_VALUE;\n            }\n            if (spec->beyondMax != 0) {\n                spec->number64 = SECUREC_MAX_64BITS_POS_VALUE;\n            }\n        } else {\n            if (spec->beyondMax != 0) {\n                spec->number64 = SECUREC_MAX_64BITS_VALUE;\n            }\n        }\n    }\n#else\n    if (spec->negative != 0) {\n#if defined(__hpux)\n        if (spec->oriComChr != SECUREC_CHAR('p')) {\n            spec->number64 = (SecUnsignedInt64)(-(SecInt64)spec->number64);\n        }\n#else\n        spec->number64 = (SecUnsignedInt64)(-(SecInt64)spec->number64);\n#endif\n    }\n#endif\n    return;\n}\nstatic void (*g_secFinishNumber[SECUREC_DECODE_NUMBER_FUNC_NUM])(SecScanSpec *spec) = \\\n    { SecFinishNumber, SecFinishNumber64 };\n\n#if SECUREC_ENABLE_SCANF_FILE\n\n/*\n *  Adjust the pointer position of the file stream\n */\nstatic void SecSeekStream(SecFileStream *stream)\n{\n    if ((stream->count == 0) && feof(stream->pf)) {\n        /* file pointer at the end of file, don't need to seek back */\n        stream->base[0] = '\\0';\n        return;\n    }\n    /* LSD seek to original position, bug fix 2014 1 21 */\n    if (fseek(stream->pf, stream->oriFilePos, SEEK_SET)) {\n        /* seek failed, ignore it */\n        stream->oriFilePos = 0;\n        return;\n    }\n\n    if (stream->fileRealRead > 0) { /* LSD bug fix. when file reach to EOF, don't seek back */\n#if (defined(SECUREC_COMPATIBLE_WIN_FORMAT))\n        int loops;\n        for (loops = 0; loops < (stream->fileRealRead / SECUREC_BUFFERED_BLOK_SIZE); ++loops) {\n            if (fread(stream->base, (size_t)1, (size_t)SECUREC_BUFFERED_BLOK_SIZE,\n                stream->pf) != SECUREC_BUFFERED_BLOK_SIZE) {\n                break;\n            }\n        }\n        if ((stream->fileRealRead % SECUREC_BUFFERED_BLOK_SIZE) != 0) {\n            size_t ret = fread(stream->base, (size_t)((unsigned int)stream->fileRealRead % SECUREC_BUFFERED_BLOK_SIZE),\n                               (size_t)1, stream->pf);\n            if ((ret == 1 || ret == 0) && (ftell(stream->pf) < stream->oriFilePos + stream->fileRealRead)) {\n                (void)fseek(stream->pf, stream->oriFilePos + stream->fileRealRead, SEEK_SET);\n            }\n        }\n\n#else\n        /* in linux like system */\n        if (fseek(stream->pf, stream->oriFilePos + stream->fileRealRead, SEEK_SET)) {\n            /* seek failed, ignore it */\n            stream->oriFilePos = 0;\n        }\n#endif\n    }\n\n    return;\n}\n\n/*\n *  Adjust the pointer position of the file stream and free memory\n */\nstatic void SecAdjustStream(SecFileStream *stream)\n{\n    if (stream != NULL && (stream->flag & SECUREC_FILE_STREAM_FLAG) && stream->base != NULL) {\n        SecSeekStream(stream);\n        SECUREC_FREE(stream->base);\n        stream->base = NULL;\n    }\n    return;\n}\n#endif\n\nstatic void SecSkipSpaceFormat(const SecUnsignedChar **format)\n{\n    const SecUnsignedChar *fmt = *format;\n    while (SECUREC_IS_SPACE(*fmt)) {\n        ++fmt;\n    }\n    *format = fmt;\n}\n#ifndef SECUREC_FOR_WCHAR\n/*\n * Handling multi-character characters\n */\nstatic int SecDecodeLeadByte(SecInt ch, const SecUnsignedChar **format, SecFileStream *stream, int *counter)\n{\n#if SECUREC_HAVE_MBTOWC\n    char temp[SECUREC_MULTI_BYTE_MAX_LEN];\n    const SecUnsignedChar *fmt = *format;\n    wchar_t tempWChar = L'\\0';\n    int ch2 = SecGetChar(stream, counter);\n    if (*fmt == SECUREC_CHAR('\\0') || (int)(*fmt) != (ch2)) {\n        /* LSD in console mode, ungetc twice may cause problem */\n        SecUnGetChar(ch2, stream, counter);\n        SecUnGetChar(ch, stream, counter);\n        return -1;\n    }\n    ++fmt;\n    if (MB_CUR_MAX >= SECUREC_UTF8_BOM_HEADER_SIZE &&\n        (((unsigned char)ch & SECUREC_UTF8_LEAD_1ST) == SECUREC_UTF8_LEAD_1ST) &&\n        (((unsigned char)ch2 & SECUREC_UTF8_LEAD_2ND) == SECUREC_UTF8_LEAD_2ND)) {\n        /* this char is very likely to be a UTF-8 char */\n        int ch3 = SecGetChar(stream, counter);\n        temp[0] = (char)ch;\n        temp[1] = (char)ch2; /* 1 index of second character */\n        temp[2] = (char)ch3; /* 2 index of third character */\n        temp[3] = '\\0';      /* 3 of string terminator position */\n\n        if (mbtowc(&tempWChar, temp, sizeof(temp)) > 0) {\n            /* succeed */\n            if (*fmt == SECUREC_CHAR('\\0') || (int)(*fmt) != (int)ch3) {\n                SecUnGetChar(ch3, stream, counter);\n                return -1;\n            }\n            ++fmt;\n            *counter = *counter - 1;\n        } else {\n            SecUnGetChar(ch3, stream, counter);\n        }\n    }\n    *counter = *counter - 1;    /* only count as one character read */\n    *format = fmt;\n    return 0;\n#else\n    SecUnGetChar(ch, stream, counter);\n    (void)format;\n    return -1;\n#endif\n}\n#endif\n\n\n\n/*\n *  Resolving sequence of characters from %[ format\n */\nstatic int SecSetupBracketTable(const SecUnsignedChar **format, SecBracketTable *bracketTable)\n{\n    const SecUnsignedChar *fmt = *format;\n    SecUnsignedChar prevChar = 0;\n    SecUnsignedChar expCh;\n    SecUnsignedChar last = 0;\n#if !(defined(SECUREC_COMPATIBLE_WIN_FORMAT))\n    if (*fmt == SECUREC_CHAR('{')) {\n        return -1;\n    }\n#endif\n    /* for building \"table\" data */\n    ++fmt; /* skip [ */\n    bracketTable->mask = 0;\n    if (*fmt == SECUREC_CHAR('^')) {\n        ++fmt;\n        bracketTable->mask = (unsigned char)0xff;\n    }\n    if (*fmt == SECUREC_CHAR(']')) {\n        prevChar = SECUREC_CHAR(']');\n        ++fmt;\n        SECUREC_BRACKET_SET_BIT(bracketTable->table, SECUREC_CHAR(']'));\n    }\n    while (*fmt != SECUREC_CHAR('\\0') && *fmt != SECUREC_CHAR(']')) {\n        expCh = *fmt++;\n        if (expCh != SECUREC_CHAR('-') || prevChar == 0 || *fmt == SECUREC_CHAR(']')) {\n            /* normal character */\n            prevChar = expCh;\n            SECUREC_BRACKET_SET_BIT(bracketTable->table, expCh);\n        } else {\n            /* for %[a-z] */\n            expCh = *fmt++;   /* get end of range */\n            if (prevChar < expCh) { /* %[a-z] */\n                last = expCh;\n            } else {\n                prevChar = expCh;\n#if (defined(SECUREC_COMPATIBLE_WIN_FORMAT))\n                /* %[z-a] */\n                last = prevChar;\n\n#else\n                SECUREC_BRACKET_SET_BIT(bracketTable->table, SECUREC_CHAR('-'));\n                SECUREC_BRACKET_SET_BIT(bracketTable->table, expCh);\n                continue;\n#endif\n            }\n            /* format %[a-\\xff] last is 0xFF, condition (rnch <= last) cause dead loop */\n            for (expCh = prevChar; expCh < last; ++expCh) {\n                SECUREC_BRACKET_SET_BIT(bracketTable->table, expCh);\n            }\n            SECUREC_BRACKET_SET_BIT(bracketTable->table, last);\n            prevChar = 0;\n        }\n    }\n    *format = fmt;\n    return 0;\n}\n\n\n#ifdef SECUREC_FOR_WCHAR\nstatic int SecInputForWchar(SecInt ch, SecScanSpec *spec)\n{\n    void *endPtr = spec->argPtr;\n    if (spec->isWChar > 0) {\n        *(wchar_t UNALIGNED *)endPtr = (wchar_t)ch;\n        endPtr = (wchar_t *)endPtr + 1;\n        --spec->arrayWidth;\n    } else {\n#if SECUREC_HAVE_WCTOMB\n        int temp;\n        char tmpBuf[SECUREC_MB_LEN + 1];\n        SECUREC_MASK_MSVC_CRT_WARNING temp = wctomb(tmpBuf, (wchar_t)ch);\n        SECUREC_END_MASK_MSVC_CRT_WARNING\n        if (temp <= 0 || ((size_t)(unsigned int)temp) > sizeof(tmpBuf)) {\n            /* if wctomb  error, then ignore character */\n            return 0;\n        }\n        if (((size_t)(unsigned int)temp) > spec->arrayWidth) {\n            return -1;\n        }\n        if (memcpy_s(endPtr, spec->arrayWidth, tmpBuf, (size_t)(unsigned int)temp) != EOK) {\n            return -1;\n        }\n        endPtr = (char *)endPtr + temp;\n        spec->arrayWidth -= (size_t)(unsigned int)temp;\n#else\n        return -1;\n#endif\n    }\n    spec->argPtr = endPtr;\n    return 0;\n}\n#endif\n\n\n#ifndef SECUREC_FOR_WCHAR\nstatic int SecInputForChar(SecInt ch, SecScanSpec *spec, SecFileStream *stream, int *charCount)\n{\n    void *endPtr = spec->argPtr;\n    if (spec->isWChar > 0) {\n        wchar_t tempWChar = L'?';   /* set default char as ? */\n#if SECUREC_HAVE_MBTOWC\n        char temp[SECUREC_MULTI_BYTE_MAX_LEN + 1];\n        temp[0] = (char)ch;\n        temp[1] = '\\0';\n#if defined(SECUREC_COMPATIBLE_WIN_FORMAT)\n        if (SecIsLeadByte(ch)) {\n            temp[1] = (char)SecGetChar(stream, charCount);\n            temp[2] = '\\0'; /* 2 of string terminator position */\n        }\n        if (mbtowc(&tempWChar, temp, sizeof(temp)) <= 0) {\n            /* no string termination error for tool */\n            tempWChar = L'?';\n        }\n#else\n        if (SecIsLeadByte(ch)) {\n            int convRes = 0;\n            int di = 1;\n            /* in Linux like system, the string is encoded in UTF-8 */\n            while (convRes <= 0 && di < (int)MB_CUR_MAX && di < SECUREC_MULTI_BYTE_MAX_LEN) {\n                temp[di++] = (char)SecGetChar(stream, charCount);\n                temp[di] = '\\0';\n                convRes = mbtowc(&tempWChar, temp, sizeof(temp));\n            }\n            if (convRes <= 0) {\n                tempWChar = L'?';\n            }\n        } else {\n            if (mbtowc(&tempWChar, temp, sizeof(temp)) <= 0) {\n                /* no string termination error for tool */\n                tempWChar = L'?';\n            }\n        }\n#endif\n#endif /* SECUREC_HAVE_MBTOWC */\n        *(wchar_t UNALIGNED *)endPtr = tempWChar;\n        /* just copy L'?' if mbtowc fails, errno is set by mbtowc */\n        endPtr = (wchar_t *)endPtr + 1;\n        --spec->arrayWidth;\n        (void)charCount;\n        (void)stream;\n    } else {\n        *(char *)endPtr = (char)ch;\n        endPtr = (char *)endPtr + 1;\n        --spec->arrayWidth;\n    }\n    spec->argPtr = endPtr;\n    return 0;\n}\n#endif\n\n\n#if SECUREC_ENABLE_SCANF_FLOAT\n\n/* no not use localeconv()->decimal_pointif  onlay support  '.' */\n#define SECURE_IS_FLOAT_DECIMAL(ch) ((ch) == SECUREC_CHAR('.'))\n/*\n * init SecFloatSpec befor parse format\n */\nstatic void SecInitFloatSpec(SecFloatSpec *floatSpec)\n{\n    floatSpec->floatStr = floatSpec->buffer;\n    floatSpec->allocatedFloatStr = NULL;\n    floatSpec->floatStrSize = sizeof(floatSpec->buffer) / sizeof(floatSpec->buffer[0]);\n    floatSpec->floatStr = floatSpec->buffer;\n    floatSpec->floatStrUsedLen = 0;\n}\n\nstatic void SecClearFloatSpec(SecFloatSpec *floatSpec, int *doneCount)\n{\n     /* LSD 2014.3.6 add, clear the stack data */\n    if (memset_s(floatSpec->buffer, sizeof(floatSpec->buffer), 0,\n        sizeof(floatSpec->buffer)) != EOK) {\n        *doneCount = 0;  /* This is a dead code, just to meet the coding requirements */\n    }\n    if (floatSpec->allocatedFloatStr != NULL) {\n        /* pFloatStr can be alloced in SecUpdateFloatString function, clear and free it */\n        if (memset_s(floatSpec->allocatedFloatStr, floatSpec->floatStrSize * sizeof(SecChar), 0,\n            floatSpec->floatStrSize * sizeof(SecChar)) != EOK) {\n            *doneCount = 0; /* This is a dead code, just to meet the coding requirements */\n        }\n        SECUREC_FREE(floatSpec->allocatedFloatStr);\n        floatSpec->allocatedFloatStr = NULL;\n        floatSpec->floatStr = NULL;\n    }\n}\n\n\n/*\n * scan value of exponent.\n * return 0 OK\n */\nstatic int SecInputFloatE(SecFileStream *stream, SecScanSpec *spec, SecFloatSpec *floatSpec, int *charCount)\n{\n    SecInt ch = SecGetChar(stream, charCount);\n    if (ch == SECUREC_CHAR('+') || ch == SECUREC_CHAR('-')) {\n        if (ch == SECUREC_CHAR('-') && SecUpdateFloatString((SecChar)'-', floatSpec) != 0) {\n            return -1;\n        }\n        if (spec->width != 0) {\n            ch = SecGetChar(stream, charCount);\n            --spec->width;\n        }\n    }\n\n    while (SECUREC_IS_DIGIT(ch) && spec->width-- != 0) {\n        if (SecUpdateFloatString((SecChar)ch, floatSpec) != 0) {\n            return -1;\n        }\n        ch = SecGetChar(stream, charCount);\n    }\n    return 0;\n}\n\n/*\n * scan %f.\n * return 0 OK\n */\nstatic int SecInputFloat(SecFileStream *stream, SecScanSpec *spec, SecFloatSpec *floatSpec, int *charCount)\n{\n    int started = -1;\n    SecInt ch = SecGetChar(stream, charCount);\n\n    floatSpec->floatStrUsedLen = 0;\n    if (ch == SECUREC_CHAR('-')) {\n        floatSpec->floatStr[floatSpec->floatStrUsedLen++] = SECUREC_CHAR('-');\n        --spec->width;\n        ch = SecGetChar(stream, charCount);\n    } else if (ch == SECUREC_CHAR('+')) {\n        --spec->width;\n        ch = SecGetChar(stream, charCount);\n    }\n\n    if (spec->widthSet == 0) {    /* must care width */\n        spec->width = -1; /* -1 is unlimited */\n    }\n\n    /* now get integral part */\n    while (SECUREC_IS_DIGIT(ch) && spec->width-- != 0) {\n        started = 0;\n        /* ch must be '0' - '9' */\n        if (SecUpdateFloatString((SecChar)ch, floatSpec) != 0) {\n            return -1;\n        }\n        ch = SecGetChar(stream, charCount);\n    }\n\n    /* now get fractional part */\n    if (SECURE_IS_FLOAT_DECIMAL((SecChar)ch) && spec->width-- != 0) {\n        /* now check for decimal */\n        if (SecUpdateFloatString((SecChar)ch, floatSpec) != 0) {\n            return -1;\n        }\n        ch = SecGetChar(stream, charCount);\n        while (SECUREC_IS_DIGIT(ch) && spec->width-- != 0) {\n            started = 0;\n            if (SecUpdateFloatString((SecChar)ch, floatSpec) != 0) {\n                return -1;\n            }\n            ch = SecGetChar(stream, charCount);\n        }\n    }\n\n    /* now get exponent part */\n    if (started == 0 && (ch == SECUREC_CHAR('e') || ch == SECUREC_CHAR('E')) && spec->width-- != 0) {\n        if (SecUpdateFloatString((SecChar)'e', floatSpec) != 0) {\n            return -1;\n        }\n        if (SecInputFloatE(stream, spec, floatSpec, charCount) != 0) {\n            return -1;\n        }\n    }\n    /* un set the last character that is not a floating point number */\n    SecUnGetChar(ch, stream, charCount);\n    /* Make sure  have a string terminator, buffer is large enough */\n    floatSpec->floatStr[floatSpec->floatStrUsedLen] = SECUREC_CHAR('\\0');\n    return started;\n\n}\n#endif\n\n/*\n * scan digital part of %d %i %o %u %x %p.\n * return 0 OK\n */\nstatic int SecInputNumberDigital(SecInt firstCh, SecFileStream *stream, SecScanSpec *spec, int *charCount)\n{\n    SecInt ch = firstCh;\n    int loopFlag = 0;\n    int started = -1;\n    while (loopFlag == 0) {\n        /* decode ch to number */\n        loopFlag = SecDecodeNumber(ch, spec);\n        if (loopFlag == 0) {\n            started = 0;\n            if (spec->widthSet != 0 && --spec->width == 0) {\n                loopFlag = 1;\n            } else {\n                ch = SecGetChar(stream, charCount);\n            }\n        } else {\n            SecUnGetChar(ch, stream, charCount);\n        }\n    }\n\n    /* Handling integer negative numbers and beyond max */\n    (*g_secFinishNumber[spec->isInt64Arg])(spec);\n    return started;\n\n}\n\n/*\n * scan %d %i %o %u %x %p.\n * return 0 OK\n */\nstatic int SecInputNumber(SecFileStream *stream, SecScanSpec *spec, int *charCount)\n{\n    SecInt ch = SecGetChar(stream, charCount);\n\n    if (ch == SECUREC_CHAR('+') || ch == SECUREC_CHAR('-')) {\n        if (ch == SECUREC_CHAR('-')) {\n            spec->negative = 1;\n        }\n        if (spec->widthSet != 0 && --spec->width == 0) {\n            return -1;\n        } else {\n            ch = SecGetChar(stream, charCount);\n        }\n    }\n\n    if (spec->oriComChr == SECUREC_CHAR('i')) {\n        /* i could be d, o, or x, use d as default */\n        spec->comChr = SECUREC_CHAR('d');\n    }\n\n    if (spec->oriComChr == SECUREC_CHAR('x') || spec->oriComChr == SECUREC_CHAR('i')) {\n        if (ch != SECUREC_CHAR('0')) {\n            /* scan number */\n            return SecInputNumberDigital(ch, stream, spec, charCount);\n        }\n        /* now input string may be 0x123 or 0X123 or just 0 */\n        /* get next char */\n        ch = SecGetChar(stream, charCount);\n        if ((SecChar)(ch) == SECUREC_CHAR('x') || (SecChar)ch == SECUREC_CHAR('X')) {\n            spec->comChr = SECUREC_CHAR('x');\n            ch = SecGetChar(stream, charCount);\n            /* length of 0x is 2 */\n            if (spec->widthSet != 0 && spec->width <= (1 + 1)) {\n                /* length not enough for \"0x\" */\n                return -1;\n            }\n            spec->width -= 2; /* Subtract 2 for the length of \"0x\" */\n        } else {\n            if (spec->oriComChr != SECUREC_CHAR('x')) {\n                spec->comChr = SECUREC_CHAR('o');\n            }\n            /* unset the character after 0 back to stream, input only '0' result is OK */\n            SecUnGetChar(ch, stream, charCount);\n            ch = SECUREC_CHAR('0');\n        }\n    }\n    /* scan number */\n    return SecInputNumberDigital(ch, stream, spec, charCount);\n}\n/*\n * scan %c %s %[\n * return 0 OK\n */\nstatic int SecInputString(SecFileStream *stream, SecScanSpec *spec,\n    const SecBracketTable *bracketTable, int *charCount, int *doneCount)\n{\n    void *startPtr = spec->argPtr;\n    int suppressed= 0;\n    int errNoMem = 0;\n\n    while (spec->widthSet == 0 || spec->width-- != 0) {\n        SecInt ch = SecGetChar(stream, charCount);\n        /* char  condition or string condition and bracket condition.\n         * only supports  wide characters with a maximum length of two bytes\n         */\n        if ((ch != SECUREC_EOF) && (spec->comChr == SECUREC_CHAR('c') ||\n            SECUREC_SCANF_STRING_CONDITION(spec->comChr, ch) ||\n            SECUREC_SCANF_BRACKET_CONDITION(spec->comChr, ch, bracketTable->table, bracketTable->mask))) {\n            if (spec->suppress != 0) {\n                /* Used to identify processed data for %*\n                 * use endPtr to identify will cause 613, so use suppressed\n                 */\n                suppressed = 1;\n                continue;\n            }\n            /* now suppress is not set */\n            if (spec->arrayWidth == 0) {\n                errNoMem = 1; /* We have exhausted the user's buffer */\n                break;\n            }\n#ifdef SECUREC_FOR_WCHAR\n            errNoMem = SecInputForWchar(ch, spec);\n#else\n            errNoMem = SecInputForChar(ch, spec, stream, charCount);\n#endif\n            if (errNoMem != 0) {\n                break;\n            }\n        } else {\n            SecUnGetChar(ch, stream, charCount);\n            break;\n        }\n    }\n\n    if (errNoMem != 0) {\n        /* In case of error, blank out the input buffer */\n        if (spec->suppress == 0) {\n            SecAddEndingZero(startPtr, spec);\n        }\n        return -1;\n    }\n\n    /* No input was scanned */\n    if ((spec->suppress != 0 && suppressed == 0) ||\n        (spec->suppress == 0 && startPtr == spec->argPtr)) {\n        return -1;\n    }\n\n    if (spec->suppress == 0) {\n        if (spec->comChr != 'c') {\n            /* null-terminate strings */\n            SecAddEndingZero(spec->argPtr, spec);\n        }\n        *doneCount = *doneCount + 1;\n    }\n    return 0;\n}\n\n#ifdef SECUREC_FOR_WCHAR\n/*\n * alloce buffer for wchar version of %[.\n * return 0 OK\n */\nstatic int SecAllocBracketTable(SecBracketTable *bracketTable)\n{\n    if (bracketTable->table == NULL) {\n        /* table should be freed after use */\n        bracketTable->table = (unsigned char *)SECUREC_MALLOC(SECUREC_BRACKET_TABLE_SIZE);\n        if (bracketTable->table == NULL) {\n            return -1;\n        }\n    }\n    return 0;\n}\n\n/*\n * free buffer for wchar version of %[\n */\nstatic void SecFreeBracketTable(SecBracketTable *bracketTable)\n{\n    if (bracketTable->table != NULL) {\n        SECUREC_FREE(bracketTable->table);\n        bracketTable->table = NULL;\n    }\n}\n#endif\n\n#ifdef SECUREC_FOR_WCHAR\n/*\n *  Formatting input core functions for wchar version.Called by a function such as vsscanf_s\n */\nint SecInputSW(SecFileStream *stream, const wchar_t *cFormat, va_list argList)\n#else\n/*\n * Formatting input core functions for char version.Called by a function such as vswscanf_s\n */\nint SecInputS(SecFileStream *stream, const char *cFormat, va_list argList)\n#endif\n{\n    const SecUnsignedChar *format = (const SecUnsignedChar *)cFormat;\n    SecBracketTable bracketTable = SECUREC_INIT_BRACKET_TABLE;\n    SecScanSpec spec;\n    SecInt ch = 0;\n    int charCount = 0;\n    int doneCount = 0;\n    int formatError = 0;\n    int paraIsNull = 0;\n#if SECUREC_ENABLE_SCANF_FLOAT\n    SecFloatSpec floatSpec;\n#endif\n    int match = 0;\n    int errRet = 0;\n#if SECUREC_ENABLE_SCANF_FLOAT\n    SecInitFloatSpec(&floatSpec);\n#endif\n    /* format must not NULL */\n    /* use err < 1 to claer 845 */\n    while (errRet < 1 && *format != SECUREC_CHAR('\\0')) {\n        /* skip space in format and space in input */\n        if (SECUREC_IS_SPACE(*format)) {\n            SecInt nonSpaceChar = SecSkipSpaceChar(stream, &charCount);\n            /* eat all space chars and put fist no space char backup */\n            SecUnGetChar(nonSpaceChar, stream, &charCount);\n            SecSkipSpaceFormat(&format);\n            continue;\n        }\n\n        if (*format != SECUREC_CHAR('%')) {\n            ch = SecGetChar(stream, &charCount);\n            if ((int)(*format++) != (int)(ch)) {\n                SecUnGetChar(ch, stream, &charCount);\n                ++errRet; /* use plus to clear 845 */\n                continue;\n            }\n#ifndef SECUREC_FOR_WCHAR\n            if (SecIsLeadByte(ch) && SecDecodeLeadByte(ch, &format, stream, &charCount) != 0) {\n                ++errRet;\n                continue;\n            }\n#endif\n            /* for next %n */\n            if ((ch == SECUREC_EOF) && ((*format != SECUREC_CHAR('%')) || (*(format + 1) != SECUREC_CHAR('n')))) {\n                break;\n            }\n            continue;\n        }\n\n        /* now *format is % */\n        /* set default value for each % */\n        SecSetDefaultScanSpec(&spec);\n        if (SecDecodeScanFlag(&format, &spec) != 0) {\n            formatError = 1;\n            ++errRet;\n            continue;\n        }\n        /* update wchar flag for %S %C */\n        SecUpdateWcharFlagByType(*format, &spec);\n\n#if SECUREC_HAVE_WCHART == 0\n        /* in kernel not support wide char */\n        if (spec.isWChar > 0) {\n            formatError = 1;\n            ++errRet;\n            continue;\n        }\n#endif\n        if (spec.widthSet != 0 && spec.width == 0) {\n            /* 0 width in format */\n            ++errRet;\n            continue;\n        }\n\n        spec.comChr = (unsigned char)(*format) | (SECUREC_CHAR('a') - SECUREC_CHAR('A')); /* to lowercase */\n        spec.oriComChr = spec.comChr;\n\n        if (spec.comChr != SECUREC_CHAR('n')) {\n            if (spec.comChr != SECUREC_CHAR('c') && spec.comChr != SECUREC_BRACE) {\n                ch = SecSkipSpaceChar(stream, &charCount);\n            } else {\n                ch = SecGetChar(stream, &charCount);\n            }\n            if (ch == SECUREC_EOF) {\n                ++errRet;\n                continue;\n            }\n        }\n\n        /* now no 0 width in format and get one char from input */\n        switch (spec.comChr) {\n            case SECUREC_CHAR('c'): /* also 'C' */\n                /* fall-through */ /* FALLTHRU */\n            case SECUREC_CHAR('s'): /* also 'S': */\n                /* fall-through */ /* FALLTHRU */\n            case SECUREC_BRACE:\n                /* check dest buffer and size */\n                if (spec.suppress == 0) {\n                    spec.argPtr = (void *)va_arg(argList, void *);\n                    if (spec.argPtr == NULL) {\n                        paraIsNull = 1;\n                        ++errRet;\n                        continue;\n                    }\n                    /* Get the next argument - size of the array in characters */\n#ifdef SECUREC_ON_64BITS\n                    spec.arrayWidth = ((size_t)(va_arg(argList, size_t))) & 0xFFFFFFFFUL;\n#else /* !SECUREC_ON_64BITS */\n                    spec.arrayWidth = (size_t)va_arg(argList, size_t);\n#endif\n                    if (spec.arrayWidth == 0 || (spec.isWChar <= 0 && spec.arrayWidth > SECUREC_STRING_MAX_LEN) ||\n                        (spec.isWChar > 0 && spec.arrayWidth > SECUREC_WCHAR_STRING_MAX_LEN)) {\n                        /* do not clear buffer just go error */\n                        ++errRet;\n                        continue;\n                    }\n                    /* One element is needed for '\\0' for %s and %[ */\n                    if (spec.comChr != SECUREC_CHAR('c')) {\n                        --spec.arrayWidth;\n                    }\n                } else {\n                    /*  Set argPtr to  NULL  is necessary, in supress mode we don't use argPtr to store data */\n                    spec.argPtr = NULL;\n                }\n\n                if (spec.comChr == 'c') {\n                    if (spec.widthSet == 0) {\n                        spec.widthSet = 1;\n                        spec.width = 1;\n                    }\n                } else if (spec.comChr == SECUREC_BRACE) {\n                    /* malloc  when  first %[ is meet  for wchar version */\n#ifdef SECUREC_FOR_WCHAR\n                    if (SecAllocBracketTable(&bracketTable) != 0) {\n                        ++errRet;\n                        continue;\n                    }\n\n#endif\n                    (void)memset(bracketTable.table, 0, (size_t)SECUREC_BRACKET_TABLE_SIZE);\n                    if (SecSetupBracketTable(&format, &bracketTable) != 0) {\n                        ++errRet;\n                        continue;\n                    }\n\n                    if (*format == SECUREC_CHAR('\\0')) {\n                        if (spec.suppress == 0 && spec.arrayWidth > 0) {\n                            SecAddEndingZero(spec.argPtr, &spec);\n                        }\n                        ++errRet;\n                        /* truncated format */\n                        continue;\n                    }\n\n                }\n                /* un set last char to stream */\n                SecUnGetChar(ch, stream, &charCount);\n                /* scanset completed.  Now read string */\n                if (SecInputString(stream, &spec, &bracketTable, &charCount, &doneCount) != 0) {\n                    ++errRet;\n                    continue;\n                }\n                break;\n            case SECUREC_CHAR('p'):\n                /* make %hp same as %p */\n                spec.numberWidth = SECUREC_NUM_WIDTH_INT;\n#ifdef SECUREC_ON_64BITS\n                spec.isInt64Arg = 1;\n#endif\n                /* fall-through */ /* FALLTHRU */\n            case SECUREC_CHAR('o'):    /* fall-through */ /* FALLTHRU */\n            case SECUREC_CHAR('u'):    /* fall-through */ /* FALLTHRU */\n            case SECUREC_CHAR('d'):    /* fall-through */ /* FALLTHRU */\n            case SECUREC_CHAR('i'):    /* fall-through */ /* FALLTHRU */\n            case SECUREC_CHAR('x'):\n                /* un set last char to stream */\n                SecUnGetChar(ch, stream, &charCount);\n                if (SecInputNumber(stream, &spec, &charCount) != 0) {\n                    ++errRet;\n                    continue;\n                }\n                if (spec.suppress == 0) {\n                    spec.argPtr = (void *)va_arg(argList, void *);\n                    if (spec.argPtr == NULL) {\n                        paraIsNull = 1;\n                        ++errRet;\n                        continue;\n                    }\n                    SecAssignNumber(&spec);\n                    ++doneCount;\n                }\n                break;\n            case SECUREC_CHAR('n'):    /* char count */\n                if (spec.suppress == 0) {\n                    spec.argPtr = (void *)va_arg(argList, void *);\n                    if (spec.argPtr == NULL) {\n                        paraIsNull = 1;\n                        ++errRet;\n                        continue;\n                    }\n                    spec.number = (unsigned long)(unsigned int)charCount;\n                    spec.isInt64Arg = 0;\n                    SecAssignNumber(&spec);\n                }\n                break;\n            case SECUREC_CHAR('e'):    /* fall-through */ /* FALLTHRU */\n            case SECUREC_CHAR('f'):    /* fall-through */ /* FALLTHRU */\n            case SECUREC_CHAR('g'):    /* scan a float */\n#if SECUREC_ENABLE_SCANF_FLOAT\n                /* un set last char to stream */\n                SecUnGetChar(ch, stream, &charCount);\n                if (SecInputFloat(stream, &spec, &floatSpec, &charCount) != 0) {\n                    ++errRet;\n                    continue;\n                }\n                if (spec.suppress == 0) {\n                    spec.argPtr = (void *)va_arg(argList, void *);\n                    if (spec.argPtr == NULL) {\n                        ++errRet;\n                        paraIsNull = 1;\n                        continue;\n                    }\n#ifdef SECUREC_FOR_WCHAR\n                    if (SecAssignFloatW(&floatSpec, &spec) != 0) {\n                        ++errRet;\n                        continue;\n                    }\n#else\n                    SecAssignFloat(floatSpec.floatStr, spec.numberWidth, spec.argPtr);\n#endif\n                    ++doneCount;\n                }\n\n                break;\n#else /* SECUREC_ENABLE_SCANF_FLOAT */\n                ++errRet;\n                continue;\n#endif\n            default:\n                if ((int)(*format) != (int)ch) {\n                    SecUnGetChar(ch, stream, &charCount);\n                    formatError = 1;\n                    ++errRet;\n                    continue;\n                } else {\n                    --match;\n                }\n        }\n\n        ++match;\n        ++format;\n        if ((ch == SECUREC_EOF) && ((*format != SECUREC_CHAR('%')) || (*(format + 1) != SECUREC_CHAR('n')))) {\n            break;\n        }\n    }\n\n#ifdef SECUREC_FOR_WCHAR\n    SecFreeBracketTable(&bracketTable);\n#endif\n\n#if SECUREC_ENABLE_SCANF_FLOAT\n    SecClearFloatSpec(&floatSpec, &doneCount);\n#endif\n\n#if SECUREC_ENABLE_SCANF_FILE\n    SecAdjustStream(stream);\n#endif\n\n    if (ch == SECUREC_EOF) {\n        return ((doneCount || match) ? doneCount : SECUREC_SCANF_EINVAL);\n    } else if (formatError != 0 || paraIsNull != 0) {\n        /* Invalid Input Format or parameter */\n        return SECUREC_SCANF_ERROR_PARA;\n    }\n\n    return doneCount;\n}\n\n#if SECUREC_ENABLE_SCANF_FILE\n\n#if defined(SECUREC_NO_STD_UNGETC)\n/*\n *  Get char  from stdin or buffer\n */\nstatic SecInt SecGetCharFromStdin(SecFileStream *stream)\n{\n    SecInt ch;\n    if (stream->fUnget == 1) {\n        ch = (SecInt) stream->lastChar;\n        stream->fUnget = 0;\n    } else {\n        ch = SECUREC_GETC(stream->pf);\n        stream->lastChar = (unsigned int)ch;\n    }\n    return ch;\n}\n#else\n/*\n *  Get char  from stdin or buffer use std function\n */\nstatic SecInt SecGetCharFromStdin(const SecFileStream *stream)\n{\n    SecInt ch;\n    ch = SECUREC_GETC(stream->pf);\n    return ch;\n}\n#endif\n\nstatic void SecSkipBomHeader(SecFileStream *stream)\n{\n#ifdef SECUREC_FOR_WCHAR\n    if (stream->count >= SECUREC_BOM_HEADER_SIZE &&\n        (((unsigned char)(stream->base[0]) == SECUREC_BOM_HEADER_LE_1ST &&\n        (unsigned char)(stream->base[1]) == SECUREC_BOM_HEADER_LE_2ST) ||\n        ((unsigned char)(stream->base[0]) == SECUREC_BOM_HEADER_BE_1ST &&\n        (unsigned char)(stream->base[1]) == SECUREC_BOM_HEADER_BE_2ST))) {\n\n        /* the stream->count must be a  multiple of  sizeof(SecChar),\n         * otherwise this function will return SECUREC_EOF when read the last character\n         */\n        if ((stream->count - SECUREC_BOM_HEADER_SIZE) % (int)sizeof(SecChar) != 0) {\n            int ret = (int)fread(stream->base + stream->count, (size_t)1,\n                                 (size_t)SECUREC_BOM_HEADER_SIZE, stream->pf);\n            if (ret > 0 && ret <= SECUREC_BUFFERED_BLOK_SIZE) {\n                stream->count += ret;\n            }\n        }\n        /* it's BOM header, skip */\n        stream->count -= SECUREC_BOM_HEADER_SIZE;\n        stream->cur += SECUREC_BOM_HEADER_SIZE;\n    }\n#else\n    if (stream->count >= SECUREC_UTF8_BOM_HEADER_SIZE &&\n        (unsigned char)(stream->base[0]) == SECUREC_UTF8_BOM_HEADER_1ST &&\n        (unsigned char)(stream->base[1]) == SECUREC_UTF8_BOM_HEADER_2ND &&\n        (unsigned char)(stream->base[2]) == SECUREC_UTF8_BOM_HEADER_3RD) { /* 2 offset of third head character */\n        /* it's BOM header, skip */\n        stream->count -= SECUREC_UTF8_BOM_HEADER_SIZE;\n        stream->cur += SECUREC_UTF8_BOM_HEADER_SIZE;\n    }\n#endif\n}\n/*\n *  Get char  from file stream or buffer\n */\nstatic SecInt SecGetCharFromFile(SecFileStream *stream)\n{\n    SecInt ch;\n    if (stream->count == 0) {\n        int firstReadOnFile = 0;\n        /* load file to buffer */\n        if (stream->base == NULL) {\n            stream->base = (char *)SECUREC_MALLOC(SECUREC_BUFFERED_BLOK_SIZE + 1);\n            if (stream->base == NULL) {\n                return SECUREC_EOF;\n            }\n            stream->base[SECUREC_BUFFERED_BLOK_SIZE] = '\\0';   /* for tool Warning string null */\n        }\n        /* LSD add 2014.3.21 */\n        if (stream->oriFilePos == SECUREC_UNINITIALIZED_FILE_POS) {\n            stream->oriFilePos = ftell(stream->pf);   /* save original file read position */\n            firstReadOnFile = 1;\n        }\n        stream->count = (int)fread(stream->base, (size_t)1, (size_t)SECUREC_BUFFERED_BLOK_SIZE, stream->pf);\n        stream->base[SECUREC_BUFFERED_BLOK_SIZE] = '\\0';   /* for tool Warning string null */\n        if (stream->count == 0 || stream->count > SECUREC_BUFFERED_BLOK_SIZE) {\n            return SECUREC_EOF;\n        }\n        stream->cur = stream->base;\n        stream->flag |= SECUREC_LOAD_FILE_TO_MEM_FLAG;\n        if (firstReadOnFile != 0) {\n            SecSkipBomHeader(stream);\n        }\n    }\n    /* according  wchar_t has two bytes */\n    ch = (SecInt)((stream->count -= (int)sizeof(SecChar)) >= 0 ? \\\n                  (SecInt)(SECUREC_CHAR_MASK & \\\n                  (unsigned int)(int)(*((const SecChar *)(const void *)stream->cur))) : SECUREC_EOF);\n    stream->cur += sizeof(SecChar);\n\n    if (ch != SECUREC_EOF && stream->base != NULL) {\n        stream->fileRealRead += (int)sizeof(SecChar);\n    }\n    return ch;\n}\n#endif\n\n/*\n *  Get char  for wchar version\n */\nstatic SecInt SecGetChar(SecFileStream *stream, int *counter)\n{\n    SecInt ch = SECUREC_EOF;\n#if SECUREC_ENABLE_SCANF_FILE\n    if ((stream->flag & SECUREC_FROM_STDIN_FLAG) > 0) {\n        ch = SecGetCharFromStdin(stream);\n    } else if ((stream->flag & SECUREC_FILE_STREAM_FLAG) > 0) {\n        ch = SecGetCharFromFile(stream);\n    }\n#endif\n    if ((stream->flag & SECUREC_MEM_STR_FLAG) > 0) {\n        /* according  wchar_t has two bytes */\n        ch = (SecInt)((stream->count -= (int)sizeof(SecChar)) >= 0 ? \\\n                      (SecInt)(SECUREC_CHAR_MASK & \\\n                      (unsigned int)(int)(*((const SecChar *)(const void *)stream->cur))) : SECUREC_EOF);\n        stream->cur += sizeof(SecChar);\n    }\n    *counter = *counter + 1;\n    return ch;\n}\n\n/*\n *  Unget Public realizatio char  for wchar and char version\n */\nstatic void SecUnGetCharImpl(SecInt ch, SecFileStream *stream)\n{\n    if ((stream->flag & SECUREC_FROM_STDIN_FLAG) > 0) {\n#if SECUREC_ENABLE_SCANF_FILE\n#if defined(SECUREC_NO_STD_UNGETC)\n        stream->lastChar = (unsigned int)ch;\n        stream->fUnget = 1;\n#else\n        (void)SECUREC_UN_GETC(ch, stream->pf);\n#endif\n#else\n        (void)ch; /* to clear e438 last value assigned not used , the compiler will optimize this code */\n#endif\n    } else if ((stream->flag & SECUREC_MEM_STR_FLAG) || (stream->flag & SECUREC_LOAD_FILE_TO_MEM_FLAG) > 0) {\n        if (stream->cur > stream->base) {\n            stream->cur -= sizeof(SecChar);\n            stream->count += (int)sizeof(SecChar);\n        }\n    }\n#if SECUREC_ENABLE_SCANF_FILE\n    if ((stream->flag & SECUREC_FILE_STREAM_FLAG) > 0 && stream->base) {\n        stream->fileRealRead -= (int)sizeof(SecChar);\n    }\n#endif\n}\n\n/*\n *  Unget char  for char version\n */\nstatic void SecUnGetChar(SecInt ch, SecFileStream *stream, int *counter)\n{\n    if (ch != SECUREC_EOF) {\n        SecUnGetCharImpl(ch, stream);\n    }\n    *counter = *counter - 1;\n}\n\n/*\n *  Skip space char by isspace\n */\nstatic SecInt SecSkipSpaceChar(SecFileStream *stream, int *counter)\n{\n    SecInt ch;\n    do {\n        ch = SecGetChar(stream, counter);\n    } while (ch != SECUREC_EOF && SECUREC_IS_SPACE(ch));\n    return ch;\n}\n#endif /* __INPUT_INL__5D13A042_DC3F_4ED9_A8D1_882811274C27 */\n\n"
  },
  {
    "path": "third_party/securec/src/memcpy_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_DO_MEMCPY   1\n#include \"securecutil.h\"\n\n#ifndef SECUREC_MEMCOPY_WITH_PERFORMANCE\n#define SECUREC_MEMCOPY_WITH_PERFORMANCE 0\n#endif\n\n#if SECUREC_WITH_PERFORMANCE_ADDONS || SECUREC_MEMCOPY_WITH_PERFORMANCE\n#ifndef SECUREC_MEMCOPY_THRESHOLD_SIZE\n#define SECUREC_MEMCOPY_THRESHOLD_SIZE 64UL\n#endif\n/*\n * Determine whether the address is 8-byte aligned, use static to increase performance\n * return 0 is aligned\n */\nstatic int SecIsAddrAligned8(const void *addr, const void *zeroAddr)\n{\n    return (int)(((size_t)((const char*)addr - (const char*)zeroAddr)) & 7); /* use 7 to check aligned 8 */\n}\n\n#define SECUREC_SMALL_MEM_COPY do { \\\n    if (SECUREC_ADDR_ALIGNED_8(dest) && SECUREC_ADDR_ALIGNED_8(src)) { \\\n        /* use struct assignment */ \\\n        switch (count) { \\\n            case 1: \\\n                *(SecStrBuf1 *)dest = *(const SecStrBuf1 *)src; \\\n                break; \\\n            case 2: \\\n                *(SecStrBuf2 *)dest = *(const SecStrBuf2 *)src; \\\n                break; \\\n            case 3: \\\n                *(SecStrBuf3 *)dest = *(const SecStrBuf3 *)src; \\\n                break; \\\n            case 4: \\\n                *(SecStrBuf4 *)dest = *(const SecStrBuf4 *)src; \\\n                break; \\\n            case 5: \\\n                *(SecStrBuf5 *)dest = *(const SecStrBuf5 *)src; \\\n                break; \\\n            case 6: \\\n                *(SecStrBuf6 *)dest = *(const SecStrBuf6 *)src; \\\n                break; \\\n            case 7: \\\n                *(SecStrBuf7 *)dest = *(const SecStrBuf7 *)src; \\\n                break; \\\n            case 8: \\\n                *(SecStrBuf8 *)dest = *(const SecStrBuf8 *)src; \\\n                break; \\\n            case 9: \\\n                *(SecStrBuf9 *)dest = *(const SecStrBuf9 *)src; \\\n                break; \\\n            case 10: \\\n                *(SecStrBuf10 *)dest = *(const SecStrBuf10 *)src; \\\n                break; \\\n            case 11: \\\n                *(SecStrBuf11 *)dest = *(const SecStrBuf11 *)src; \\\n                break; \\\n            case 12: \\\n                *(SecStrBuf12 *)dest = *(const SecStrBuf12 *)src; \\\n                break; \\\n            case 13: \\\n                *(SecStrBuf13 *)dest = *(const SecStrBuf13 *)src; \\\n                break; \\\n            case 14: \\\n                *(SecStrBuf14 *)dest = *(const SecStrBuf14 *)src; \\\n                break; \\\n            case 15: \\\n                *(SecStrBuf15 *)dest = *(const SecStrBuf15 *)src; \\\n                break; \\\n            case 16: \\\n                *(SecStrBuf16 *)dest = *(const SecStrBuf16 *)src; \\\n                break; \\\n            case 17: \\\n                *(SecStrBuf17 *)dest = *(const SecStrBuf17 *)src; \\\n                break; \\\n            case 18: \\\n                *(SecStrBuf18 *)dest = *(const SecStrBuf18 *)src; \\\n                break; \\\n            case 19: \\\n                *(SecStrBuf19 *)dest = *(const SecStrBuf19 *)src; \\\n                break; \\\n            case 20: \\\n                *(SecStrBuf20 *)dest = *(const SecStrBuf20 *)src; \\\n                break; \\\n            case 21: \\\n                *(SecStrBuf21 *)dest = *(const SecStrBuf21 *)src; \\\n                break; \\\n            case 22: \\\n                *(SecStrBuf22 *)dest = *(const SecStrBuf22 *)src; \\\n                break; \\\n            case 23: \\\n                *(SecStrBuf23 *)dest = *(const SecStrBuf23 *)src; \\\n                break; \\\n            case 24: \\\n                *(SecStrBuf24 *)dest = *(const SecStrBuf24 *)src; \\\n                break; \\\n            case 25: \\\n                *(SecStrBuf25 *)dest = *(const SecStrBuf25 *)src; \\\n                break; \\\n            case 26: \\\n                *(SecStrBuf26 *)dest = *(const SecStrBuf26 *)src; \\\n                break; \\\n            case 27: \\\n                *(SecStrBuf27 *)dest = *(const SecStrBuf27 *)src; \\\n                break; \\\n            case 28: \\\n                *(SecStrBuf28 *)dest = *(const SecStrBuf28 *)src; \\\n                break; \\\n            case 29: \\\n                *(SecStrBuf29 *)dest = *(const SecStrBuf29 *)src; \\\n                break; \\\n            case 30: \\\n                *(SecStrBuf30 *)dest = *(const SecStrBuf30 *)src; \\\n                break; \\\n            case 31: \\\n                *(SecStrBuf31 *)dest = *(const SecStrBuf31 *)src; \\\n                break; \\\n            case 32: \\\n                *(SecStrBuf32 *)dest = *(const SecStrBuf32 *)src; \\\n                break; \\\n            case 33: \\\n                *(SecStrBuf33 *)dest = *(const SecStrBuf33 *)src; \\\n                break; \\\n            case 34: \\\n                *(SecStrBuf34 *)dest = *(const SecStrBuf34 *)src; \\\n                break; \\\n            case 35: \\\n                *(SecStrBuf35 *)dest = *(const SecStrBuf35 *)src; \\\n                break; \\\n            case 36: \\\n                *(SecStrBuf36 *)dest = *(const SecStrBuf36 *)src; \\\n                break; \\\n            case 37: \\\n                *(SecStrBuf37 *)dest = *(const SecStrBuf37 *)src; \\\n                break; \\\n            case 38: \\\n                *(SecStrBuf38 *)dest = *(const SecStrBuf38 *)src; \\\n                break; \\\n            case 39: \\\n                *(SecStrBuf39 *)dest = *(const SecStrBuf39 *)src; \\\n                break; \\\n            case 40: \\\n                *(SecStrBuf40 *)dest = *(const SecStrBuf40 *)src; \\\n                break; \\\n            case 41: \\\n                *(SecStrBuf41 *)dest = *(const SecStrBuf41 *)src; \\\n                break; \\\n            case 42: \\\n                *(SecStrBuf42 *)dest = *(const SecStrBuf42 *)src; \\\n                break; \\\n            case 43: \\\n                *(SecStrBuf43 *)dest = *(const SecStrBuf43 *)src; \\\n                break; \\\n            case 44: \\\n                *(SecStrBuf44 *)dest = *(const SecStrBuf44 *)src; \\\n                break; \\\n            case 45: \\\n                *(SecStrBuf45 *)dest = *(const SecStrBuf45 *)src; \\\n                break; \\\n            case 46: \\\n                *(SecStrBuf46 *)dest = *(const SecStrBuf46 *)src; \\\n                break; \\\n            case 47: \\\n                *(SecStrBuf47 *)dest = *(const SecStrBuf47 *)src; \\\n                break; \\\n            case 48: \\\n                *(SecStrBuf48 *)dest = *(const SecStrBuf48 *)src; \\\n                break; \\\n            case 49: \\\n                *(SecStrBuf49 *)dest = *(const SecStrBuf49 *)src; \\\n                break; \\\n            case 50: \\\n                *(SecStrBuf50 *)dest = *(const SecStrBuf50 *)src; \\\n                break; \\\n            case 51: \\\n                *(SecStrBuf51 *)dest = *(const SecStrBuf51 *)src; \\\n                break; \\\n            case 52: \\\n                *(SecStrBuf52 *)dest = *(const SecStrBuf52 *)src; \\\n                break; \\\n            case 53: \\\n                *(SecStrBuf53 *)dest = *(const SecStrBuf53 *)src; \\\n                break; \\\n            case 54: \\\n                *(SecStrBuf54 *)dest = *(const SecStrBuf54 *)src; \\\n                break; \\\n            case 55: \\\n                *(SecStrBuf55 *)dest = *(const SecStrBuf55 *)src; \\\n                break; \\\n            case 56: \\\n                *(SecStrBuf56 *)dest = *(const SecStrBuf56 *)src; \\\n                break; \\\n            case 57: \\\n                *(SecStrBuf57 *)dest = *(const SecStrBuf57 *)src; \\\n                break; \\\n            case 58: \\\n                *(SecStrBuf58 *)dest = *(const SecStrBuf58 *)src; \\\n                break; \\\n            case 59: \\\n                *(SecStrBuf59 *)dest = *(const SecStrBuf59 *)src; \\\n                break; \\\n            case 60: \\\n                *(SecStrBuf60 *)dest = *(const SecStrBuf60 *)src; \\\n                break; \\\n            case 61: \\\n                *(SecStrBuf61 *)dest = *(const SecStrBuf61 *)src; \\\n                break; \\\n            case 62: \\\n                *(SecStrBuf62 *)dest = *(const SecStrBuf62 *)src; \\\n                break; \\\n            case 63: \\\n                *(SecStrBuf63 *)dest = *(const SecStrBuf63 *)src; \\\n                break; \\\n            case 64: \\\n                *(SecStrBuf64 *)dest = *(const SecStrBuf64 *)src; \\\n                break; \\\n            default: \\\n                break; \\\n        } /* END switch */ \\\n    } else { \\\n        char *tmpDest = (char *)dest; \\\n        const char *tmpSrc = (const char *)src; \\\n        switch (count) { \\\n            case 64: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 63: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 62: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 61: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 60: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 59: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 58: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 57: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 56: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 55: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 54: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 53: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 52: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 51: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 50: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 49: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 48: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 47: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 46: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 45: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 44: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 43: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 42: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 41: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 40: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 39: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 38: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 37: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 36: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 35: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 34: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 33: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 32: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 31: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 30: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 29: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 28: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 27: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 26: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 25: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 24: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 23: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 22: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 21: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 20: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 19: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 18: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 17: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 16: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 15: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 14: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 13: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 12: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 11: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 10: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 9: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 8: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 7: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 6: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 5: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 4: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 3: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 2: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 1: \\\n                *(tmpDest++) = *(tmpSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            default: \\\n                break; \\\n        } \\\n    } \\\n} SECUREC_WHILE_ZERO\n#endif\n\n/*\n * Handling errors\n */\nstatic errno_t SecMemcpyError(void *dest, size_t destMax, const void *src, size_t count)\n{\n    if (destMax == 0 || destMax > SECUREC_MEM_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"memcpy_s\");\n        return ERANGE;\n    }\n    if (dest == NULL || src == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"memcpy_s\");\n        if (dest != NULL) {\n            (void)memset(dest, 0, destMax);\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    }\n    if (count > destMax) {\n        (void)memset(dest, 0, destMax);\n        SECUREC_ERROR_INVALID_RANGE(\"memcpy_s\");\n        return ERANGE_AND_RESET;\n    }\n    if (dest == src) {\n        return EOK;\n    }\n    if ((dest > src && dest < (const void *)((const unsigned char *)src + count)) || \\\n        (src > dest && src < (void *)((unsigned char *)dest + count))) {\n        (void)memset(dest, 0, destMax);\n        SECUREC_ERROR_BUFFER_OVERLAP(\"memcpy_s\");\n        return EOVERLAP_AND_RESET;\n    }\n    /* count == 0 also return EOK */\n    return EOK;\n}\n\n#if SECUREC_WITH_PERFORMANCE_ADDONS || SECUREC_MEMCOPY_WITH_PERFORMANCE\n/*\n * Performance optimization\n */\nstatic void SecDoMemcpyOpt(void *dest, const void *src, size_t count)\n{\n    if (count > SECUREC_MEMCOPY_THRESHOLD_SIZE) {\n        SecDoMemcpy(dest, src, count);\n    } else {\n        SECUREC_SMALL_MEM_COPY;\n    }\n    return;\n}\n#endif\n\n#if defined(SECUREC_COMPATIBLE_WIN_FORMAT)\n    /* fread API in windows will call memcpy_s and pass 0xffffffff to destMax.\n     * To avoid the failure of fread, we don't check desMax limit.\n     */\n#define SECUREC_MEMCPY_PARAM_OK(dest, destMax, src, count) (SECUREC_LIKELY((count) <= (destMax) && \\\n    (dest) != NULL && (src) != NULL && \\\n    (count) > 0 && SECUREC_MEMORY_NO_OVERLAP((dest), (src), (count))))\n#else\n#define SECUREC_MEMCPY_PARAM_OK(dest, destMax, src, count) (SECUREC_LIKELY((count) <= (destMax) && \\\n    (dest) != NULL && (src) != NULL && \\\n    (destMax) <= SECUREC_MEM_MAX_LEN && \\\n    (count) > 0 && SECUREC_MEMORY_NO_OVERLAP((dest), (src), (count))))\n#endif\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The memcpy_s function copies n characters from the object pointed to by src into the object pointed to by dest\n *\n * <INPUT PARAMETERS>\n *    dest                      Destination buffer.\n *    destMax                   Size of the destination buffer.\n *    src                       Buffer to copy from.\n *    count                     Number of characters to copy\n *\n * <OUTPUT PARAMETERS>\n *    dest buffer               is updated.\n *\n * <RETURN VALUE>\n *    EOK                      Success\n *    EINVAL                   dest is  NULL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN\n *    EINVAL_AND_RESET         dest != NULL and src is NULLL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN\n *    ERANGE                   destMax > SECUREC_MEM_MAX_LEN or destMax is 0\n *    ERANGE_AND_RESET         count > destMax and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN\n *                             and dest  !=  NULL  and src != NULL\n *    EOVERLAP_AND_RESET       dest buffer and source buffer are overlapped and\n *                             count <= destMax destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN and dest  !=  NULL\n *                             and src != NULL  and dest != src\n *\n *    if an error occured, dest will be filled with 0.\n *    If the source and destination overlap, the behavior of memcpy_s is undefined.\n *    Use memmove_s to handle overlapping regions.\n */\nerrno_t memcpy_s(void *dest, size_t destMax, const void *src, size_t count)\n{\n    if (SECUREC_MEMCPY_PARAM_OK(dest, destMax, src, count)) {\n#if SECUREC_MEMCOPY_WITH_PERFORMANCE\n        SecDoMemcpyOpt(dest, src, count);\n#else\n        SecDoMemcpy(dest, src, count);\n#endif\n        return EOK;\n    }\n    /* meet some runtime violation, return error code */\n    return SecMemcpyError(dest, destMax, src, count);\n}\n\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(memcpy_s);\n#endif\n\n#if SECUREC_WITH_PERFORMANCE_ADDONS\n/*\n * Performance optimization\n */\nerrno_t memcpy_sOptAsm(void *dest, size_t destMax, const void *src, size_t count)\n{\n    if (SECUREC_MEMCPY_PARAM_OK(dest, destMax, src, count)) {\n        SecDoMemcpyOpt(dest, src, count);\n        return EOK;\n    }\n    /* meet some runtime violation, return error code */\n    return SecMemcpyError(dest, destMax, src, count);\n}\n\n/* trim judgement on \"destMax <= SECUREC_MEM_MAX_LEN\" */\nerrno_t memcpy_sOptTc(void *dest, size_t destMax, const void *src, size_t count)\n{\n    if (SECUREC_LIKELY(count <= destMax && dest != NULL && src != NULL && \\\n                       count > 0 && \\\n                       ((dest > src && (const void *)((const unsigned char *)src + count) <= dest) || \\\n                       (src > dest && (void *)((unsigned char *)dest + count) <= src)))) {\n        SecDoMemcpyOpt(dest, src, count);\n        return EOK;\n    }\n    /* meet some runtime violation, return error code */\n    return SecMemcpyError(dest, destMax, src, count);\n}\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/memmove_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securecutil.h\"\n\n#ifdef SECUREC_NOT_CALL_LIBC_CORE_API\n/*\n * Implementing memory data movement\n */\nstatic void SecUtilMemmove(void *dst, const void *src, size_t count)\n{\n    unsigned char *pDest = (unsigned char *)dst;\n    const unsigned char *pSrc = (const unsigned char *)src;\n    size_t maxCount = count;\n\n    if (dst <= src || pDest >= (pSrc + maxCount)) {\n        /*\n         * Non-Overlapping Buffers\n         * copy from lower addresses to higher addresses\n         */\n        while (maxCount--) {\n            *pDest = *pSrc;\n            ++pDest;\n            ++pSrc;\n        }\n    } else {\n        /*\n         * Overlapping Buffers\n         * copy from higher addresses to lower addresses\n         */\n        pDest = pDest + maxCount - 1;\n        pSrc = pSrc + maxCount - 1;\n\n        while (maxCount--) {\n            *pDest = *pSrc;\n\n            --pDest;\n            --pSrc;\n        }\n    }\n}\n#endif\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The memmove_s function copies count bytes of characters from src to dest.\n *    This function can be assigned correctly when memory overlaps.\n * <INPUT PARAMETERS>\n *    dest                                 Destination object.\n *    destMax                           Size of the destination buffer.\n *    src                                   Source object.\n *    count                                Number of characters to copy.\n *\n * <OUTPUT PARAMETERS>\n *    dest buffer                       is uptdated.\n *\n * <RETURN VALUE>\n *    EOK                                 Success\n *    EINVAL                            dest is  NULL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN\n *    EINVAL_AND_RESET         dest != NULL and src is NULLL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN\n *    ERANGE                           destMax > SECUREC_MEM_MAX_LEN or destMax is 0\n *    ERANGE_AND_RESET        count > destMax and dest  !=  NULL and src != NULL and destMax != 0\n *                            and destMax <= SECUREC_MEM_MAX_LEN\n *\n *    If an error occured, dest will  be filled with 0 when dest and destMax valid.\n *    If some regions of the source area and the destination overlap, memmove_s\n *    ensures that the original source bytes in the overlapping region are copied\n *    before being overwritten.\n */\nerrno_t memmove_s(void *dest, size_t destMax, const void *src, size_t count)\n{\n    if (destMax == 0 || destMax > SECUREC_MEM_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"memmove_s\");\n        return ERANGE;\n    }\n    if (dest == NULL || src == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"memmove_s\");\n        if (dest != NULL) {\n            (void)memset(dest, 0, destMax);\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    }\n    if (count > destMax) {\n        (void)memset(dest, 0, destMax);\n        SECUREC_ERROR_INVALID_RANGE(\"memmove_s\");\n        return ERANGE_AND_RESET;\n    }\n    if (dest == src) {\n        return EOK;\n    }\n\n    if (count > 0) {\n#ifdef SECUREC_NOT_CALL_LIBC_CORE_API\n        SecUtilMemmove(dest, src, count);\n#else\n        /* use underlying memmove for performance consideration */\n        (void)memmove(dest, src, count);\n#endif\n    }\n    return EOK;\n}\n\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(memmove_s);\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/memset_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_DO_MEMSET   1\n\n#include \"securecutil.h\"\n\n#ifndef SECUREC_MEMSET_WITH_PERFORMANCE\n#define SECUREC_MEMSET_WITH_PERFORMANCE 0\n#endif\n\n#define SECUREC_MEMSET_PARAM_OK(dest, destMax, count) (SECUREC_LIKELY((count) <= (destMax) && \\\n    (dest) != NULL && (destMax) <= SECUREC_MEM_MAX_LEN))\n\n\n#if SECUREC_WITH_PERFORMANCE_ADDONS || SECUREC_MEMSET_WITH_PERFORMANCE\n/*\n * Determine whether the address is 8-byte aligned, use static to increase performance\n * return 0 is aligned\n */\nstatic int SecIsAddrAligned8(const void *addr, const void *zeroAddr)\n{\n    return (int)(((size_t)((const char*)addr - (const char*)zeroAddr)) & 7); /* use 7 to check aligned 8 */\n}\n\n/* use union to clear strict-aliasing warning */\ntypedef union {\n    SecStrBuf32 buf32;\n    SecStrBuf31 buf31;\n    SecStrBuf30 buf30;\n    SecStrBuf29 buf29;\n    SecStrBuf28 buf28;\n    SecStrBuf27 buf27;\n    SecStrBuf26 buf26;\n    SecStrBuf25 buf25;\n    SecStrBuf24 buf24;\n    SecStrBuf23 buf23;\n    SecStrBuf22 buf22;\n    SecStrBuf21 buf21;\n    SecStrBuf20 buf20;\n    SecStrBuf19 buf19;\n    SecStrBuf18 buf18;\n    SecStrBuf17 buf17;\n    SecStrBuf16 buf16;\n    SecStrBuf15 buf15;\n    SecStrBuf14 buf14;\n    SecStrBuf13 buf13;\n    SecStrBuf12 buf12;\n    SecStrBuf11 buf11;\n    SecStrBuf10 buf10;\n    SecStrBuf9 buf9;\n    SecStrBuf8 buf8;\n    SecStrBuf7 buf7;\n    SecStrBuf6 buf6;\n    SecStrBuf5 buf5;\n    SecStrBuf4 buf4;\n    SecStrBuf3 buf3;\n    SecStrBuf2 buf2;\n    SecStrBuf1 buf1;\n} SecStrBuf32Union;\n/* C standard initializes the first member of the consortium. */\nstatic const SecStrBuf32 g_allZero = {{\n    '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0',\n    '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0',\n    '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0',\n    '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0'\n}};\nstatic const SecStrBuf32 g_allFF = {{\n    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF\n}};\n\nstatic const SecStrBuf32Union *SecStrictAliasingCast(const SecStrBuf32 *buf)\n{\n    return (const SecStrBuf32Union *)buf;\n}\n\n#ifndef SECUREC_MEMSET_THRESHOLD_SIZE\n#define SECUREC_MEMSET_THRESHOLD_SIZE 32UL\n#endif\n\n#define SECUREC_UNALIGNED_SET do { \\\n    char *pcDest = (char *)dest; \\\n    switch (count) { \\\n        case 32: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 31: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 30: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 29: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 28: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 27: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 26: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 25: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 24: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 23: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 22: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 21: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 20: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 19: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 18: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 17: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 16: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 15: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 14: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 13: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 12: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 11: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 10: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 9: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 8: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 7: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 6: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 5: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 4: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 3: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 2: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        case 1: \\\n            *(pcDest++) = (char)c; \\\n            /* fall-through */ /* FALLTHRU */ \\\n        default: \\\n            break; \\\n    } \\\n} SECUREC_WHILE_ZERO\n\n#define SECUREC_ALIGNED_SET_OPT_ZERO_FF do { \\\n    switch (c) { \\\n        case 0: \\\n            switch (count) { \\\n                case 1: \\\n                    *(SecStrBuf1 *)dest = *(const SecStrBuf1 *)(&((SecStrictAliasingCast(&g_allZero))->buf1)); \\\n                    break; \\\n                case 2: \\\n                    *(SecStrBuf2 *)dest = *(const SecStrBuf2 *)(&((SecStrictAliasingCast(&g_allZero))->buf2)); \\\n                    break; \\\n                case 3: \\\n                    *(SecStrBuf3 *)dest = *(const SecStrBuf3 *)(&((SecStrictAliasingCast(&g_allZero))->buf3)); \\\n                    break; \\\n                case 4: \\\n                    *(SecStrBuf4 *)dest = *(const SecStrBuf4 *)(&((SecStrictAliasingCast(&g_allZero))->buf4)); \\\n                    break; \\\n                case 5: \\\n                    *(SecStrBuf5 *)dest = *(const SecStrBuf5 *)(&((SecStrictAliasingCast(&g_allZero))->buf5)); \\\n                    break; \\\n                case 6: \\\n                    *(SecStrBuf6 *)dest = *(const SecStrBuf6 *)(&((SecStrictAliasingCast(&g_allZero))->buf6)); \\\n                    break; \\\n                case 7: \\\n                    *(SecStrBuf7 *)dest = *(const SecStrBuf7 *)(&((SecStrictAliasingCast(&g_allZero))->buf7)); \\\n                    break; \\\n                case 8: \\\n                    *(SecStrBuf8 *)dest = *(const SecStrBuf8 *)(&((SecStrictAliasingCast(&g_allZero))->buf8)); \\\n                    break; \\\n                case 9: \\\n                    *(SecStrBuf9 *)dest = *(const SecStrBuf9 *)(&((SecStrictAliasingCast(&g_allZero))->buf9)); \\\n                    break; \\\n                case 10: \\\n                    *(SecStrBuf10 *)dest = *(const SecStrBuf10 *)(&((SecStrictAliasingCast(&g_allZero))->buf10)); \\\n                    break; \\\n                case 11: \\\n                    *(SecStrBuf11 *)dest = *(const SecStrBuf11 *)(&((SecStrictAliasingCast(&g_allZero))->buf11)); \\\n                    break; \\\n                case 12: \\\n                    *(SecStrBuf12 *)dest = *(const SecStrBuf12 *)(&((SecStrictAliasingCast(&g_allZero))->buf12)); \\\n                    break; \\\n                case 13: \\\n                    *(SecStrBuf13 *)dest = *(const SecStrBuf13 *)(&((SecStrictAliasingCast(&g_allZero))->buf13)); \\\n                    break; \\\n                case 14: \\\n                    *(SecStrBuf14 *)dest = *(const SecStrBuf14 *)(&((SecStrictAliasingCast(&g_allZero))->buf14)); \\\n                    break; \\\n                case 15: \\\n                    *(SecStrBuf15 *)dest = *(const SecStrBuf15 *)(&((SecStrictAliasingCast(&g_allZero))->buf15)); \\\n                    break; \\\n                case 16: \\\n                    *(SecStrBuf16 *)dest = *(const SecStrBuf16 *)(&((SecStrictAliasingCast(&g_allZero))->buf16)); \\\n                    break; \\\n                case 17: \\\n                    *(SecStrBuf17 *)dest = *(const SecStrBuf17 *)(&((SecStrictAliasingCast(&g_allZero))->buf17)); \\\n                    break; \\\n                case 18: \\\n                    *(SecStrBuf18 *)dest = *(const SecStrBuf18 *)(&((SecStrictAliasingCast(&g_allZero))->buf18)); \\\n                    break; \\\n                case 19: \\\n                    *(SecStrBuf19 *)dest = *(const SecStrBuf19 *)(&((SecStrictAliasingCast(&g_allZero))->buf19)); \\\n                    break; \\\n                case 20: \\\n                    *(SecStrBuf20 *)dest = *(const SecStrBuf20 *)(&((SecStrictAliasingCast(&g_allZero))->buf20)); \\\n                    break; \\\n                case 21: \\\n                    *(SecStrBuf21 *)dest = *(const SecStrBuf21 *)(&((SecStrictAliasingCast(&g_allZero))->buf21)); \\\n                    break; \\\n                case 22: \\\n                    *(SecStrBuf22 *)dest = *(const SecStrBuf22 *)(&((SecStrictAliasingCast(&g_allZero))->buf22)); \\\n                    break; \\\n                case 23: \\\n                    *(SecStrBuf23 *)dest = *(const SecStrBuf23 *)(&((SecStrictAliasingCast(&g_allZero))->buf23)); \\\n                    break; \\\n                case 24: \\\n                    *(SecStrBuf24 *)dest = *(const SecStrBuf24 *)(&((SecStrictAliasingCast(&g_allZero))->buf24)); \\\n                    break; \\\n                case 25: \\\n                    *(SecStrBuf25 *)dest = *(const SecStrBuf25 *)(&((SecStrictAliasingCast(&g_allZero))->buf25)); \\\n                    break; \\\n                case 26: \\\n                    *(SecStrBuf26 *)dest = *(const SecStrBuf26 *)(&((SecStrictAliasingCast(&g_allZero))->buf26)); \\\n                    break; \\\n                case 27: \\\n                    *(SecStrBuf27 *)dest = *(const SecStrBuf27 *)(&((SecStrictAliasingCast(&g_allZero))->buf27)); \\\n                    break; \\\n                case 28: \\\n                    *(SecStrBuf28 *)dest = *(const SecStrBuf28 *)(&((SecStrictAliasingCast(&g_allZero))->buf28)); \\\n                    break; \\\n                case 29: \\\n                    *(SecStrBuf29 *)dest = *(const SecStrBuf29 *)(&((SecStrictAliasingCast(&g_allZero))->buf29)); \\\n                    break; \\\n                case 30: \\\n                    *(SecStrBuf30 *)dest = *(const SecStrBuf30 *)(&((SecStrictAliasingCast(&g_allZero))->buf30)); \\\n                    break; \\\n                case 31: \\\n                    *(SecStrBuf31 *)dest = *(const SecStrBuf31 *)(&((SecStrictAliasingCast(&g_allZero))->buf31)); \\\n                    break; \\\n                case 32: \\\n                    *(SecStrBuf32 *)dest = *(const SecStrBuf32 *)(&((SecStrictAliasingCast(&g_allZero))->buf32)); \\\n                    break; \\\n                default: \\\n                    break; \\\n            } \\\n            break; \\\n        case 0xFF: \\\n            switch (count) { \\\n                case 1: \\\n                    *(SecStrBuf1 *)dest = *(const SecStrBuf1 *)(&((SecStrictAliasingCast(&g_allFF))->buf1)); \\\n                    break; \\\n                case 2: \\\n                    *(SecStrBuf2 *)dest = *(const SecStrBuf2 *)(&((SecStrictAliasingCast(&g_allFF))->buf2)); \\\n                    break; \\\n                case 3: \\\n                    *(SecStrBuf3 *)dest = *(const SecStrBuf3 *)(&((SecStrictAliasingCast(&g_allFF))->buf3)); \\\n                    break; \\\n                case 4: \\\n                    *(SecStrBuf4 *)dest = *(const SecStrBuf4 *)(&((SecStrictAliasingCast(&g_allFF))->buf4)); \\\n                    break; \\\n                case 5: \\\n                    *(SecStrBuf5 *)dest = *(const SecStrBuf5 *)(&((SecStrictAliasingCast(&g_allFF))->buf5)); \\\n                    break; \\\n                case 6: \\\n                    *(SecStrBuf6 *)dest = *(const SecStrBuf6 *)(&((SecStrictAliasingCast(&g_allFF))->buf6)); \\\n                    break; \\\n                case 7: \\\n                    *(SecStrBuf7 *)dest = *(const SecStrBuf7 *)(&((SecStrictAliasingCast(&g_allFF))->buf7)); \\\n                    break; \\\n                case 8: \\\n                    *(SecStrBuf8 *)dest = *(const SecStrBuf8 *)(&((SecStrictAliasingCast(&g_allFF))->buf8)); \\\n                    break; \\\n                case 9: \\\n                    *(SecStrBuf9 *)dest = *(const SecStrBuf9 *)(&((SecStrictAliasingCast(&g_allFF))->buf9)); \\\n                    break; \\\n                case 10: \\\n                    *(SecStrBuf10 *)dest = *(const SecStrBuf10 *)(&((SecStrictAliasingCast(&g_allFF))->buf10)); \\\n                    break; \\\n                case 11: \\\n                    *(SecStrBuf11 *)dest = *(const SecStrBuf11 *)(&((SecStrictAliasingCast(&g_allFF))->buf11)); \\\n                    break; \\\n                case 12: \\\n                    *(SecStrBuf12 *)dest = *(const SecStrBuf12 *)(&((SecStrictAliasingCast(&g_allFF))->buf12)); \\\n                    break; \\\n                case 13: \\\n                    *(SecStrBuf13 *)dest = *(const SecStrBuf13 *)(&((SecStrictAliasingCast(&g_allFF))->buf13)); \\\n                    break; \\\n                case 14: \\\n                    *(SecStrBuf14 *)dest = *(const SecStrBuf14 *)(&((SecStrictAliasingCast(&g_allFF))->buf14)); \\\n                    break; \\\n                case 15: \\\n                    *(SecStrBuf15 *)dest = *(const SecStrBuf15 *)(&((SecStrictAliasingCast(&g_allFF))->buf15)); \\\n                    break; \\\n                case 16: \\\n                    *(SecStrBuf16 *)dest = *(const SecStrBuf16 *)(&((SecStrictAliasingCast(&g_allFF))->buf16)); \\\n                    break; \\\n                case 17: \\\n                    *(SecStrBuf17 *)dest = *(const SecStrBuf17 *)(&((SecStrictAliasingCast(&g_allFF))->buf17)); \\\n                    break; \\\n                case 18: \\\n                    *(SecStrBuf18 *)dest = *(const SecStrBuf18 *)(&((SecStrictAliasingCast(&g_allFF))->buf18)); \\\n                    break; \\\n                case 19: \\\n                    *(SecStrBuf19 *)dest = *(const SecStrBuf19 *)(&((SecStrictAliasingCast(&g_allFF))->buf19)); \\\n                    break; \\\n                case 20: \\\n                    *(SecStrBuf20 *)dest = *(const SecStrBuf20 *)(&((SecStrictAliasingCast(&g_allFF))->buf20)); \\\n                    break; \\\n                case 21: \\\n                    *(SecStrBuf21 *)dest = *(const SecStrBuf21 *)(&((SecStrictAliasingCast(&g_allFF))->buf21)); \\\n                    break; \\\n                case 22: \\\n                    *(SecStrBuf22 *)dest = *(const SecStrBuf22 *)(&((SecStrictAliasingCast(&g_allFF))->buf22)); \\\n                    break; \\\n                case 23: \\\n                    *(SecStrBuf23 *)dest = *(const SecStrBuf23 *)(&((SecStrictAliasingCast(&g_allFF))->buf23)); \\\n                    break; \\\n                case 24: \\\n                    *(SecStrBuf24 *)dest = *(const SecStrBuf24 *)(&((SecStrictAliasingCast(&g_allFF))->buf24)); \\\n                    break; \\\n                case 25: \\\n                    *(SecStrBuf25 *)dest = *(const SecStrBuf25 *)(&((SecStrictAliasingCast(&g_allFF))->buf25)); \\\n                    break; \\\n                case 26: \\\n                    *(SecStrBuf26 *)dest = *(const SecStrBuf26 *)(&((SecStrictAliasingCast(&g_allFF))->buf26)); \\\n                    break; \\\n                case 27: \\\n                    *(SecStrBuf27 *)dest = *(const SecStrBuf27 *)(&((SecStrictAliasingCast(&g_allFF))->buf27)); \\\n                    break; \\\n                case 28: \\\n                    *(SecStrBuf28 *)dest = *(const SecStrBuf28 *)(&((SecStrictAliasingCast(&g_allFF))->buf28)); \\\n                    break; \\\n                case 29: \\\n                    *(SecStrBuf29 *)dest = *(const SecStrBuf29 *)(&((SecStrictAliasingCast(&g_allFF))->buf29)); \\\n                    break; \\\n                case 30: \\\n                    *(SecStrBuf30 *)dest = *(const SecStrBuf30 *)(&((SecStrictAliasingCast(&g_allFF))->buf30)); \\\n                    break; \\\n                case 31: \\\n                    *(SecStrBuf31 *)dest = *(const SecStrBuf31 *)(&((SecStrictAliasingCast(&g_allFF))->buf31)); \\\n                    break; \\\n                case 32: \\\n                    *(SecStrBuf32 *)dest = *(const SecStrBuf32 *)(&((SecStrictAliasingCast(&g_allFF))->buf32)); \\\n                    break; \\\n                default: \\\n                    break; \\\n            } \\\n            break; \\\n        default: \\\n            SECUREC_UNALIGNED_SET; \\\n    } /* END switch */ \\\n} SECUREC_WHILE_ZERO\n#endif\n\n/*\n * Handling errors\n */\nstatic errno_t SecMemsetError(void *dest, size_t destMax, int c, size_t count)\n{\n    if (destMax == 0 || destMax > SECUREC_MEM_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"memset_s\");\n        return ERANGE;\n    }\n    if (dest == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"memset_s\");\n        return EINVAL;\n    }\n    if (count > destMax) {\n        (void)memset(dest, c, destMax); /* set entire buffer to value c */\n        SECUREC_ERROR_INVALID_RANGE(\"memset_s\");\n        return ERANGE_AND_RESET;\n    }\n    return EOK;\n}\n\n#if SECUREC_WITH_PERFORMANCE_ADDONS || SECUREC_MEMSET_WITH_PERFORMANCE\n/*\n * Performance optimization\n */\nstatic void SecDoMemsetOpt(void *dest, int c, size_t count)\n{\n    if (count > SECUREC_MEMSET_THRESHOLD_SIZE) {\n        SecDoMemset(dest, c, count);\n    } else {\n        if (SECUREC_ADDR_ALIGNED_8(dest)) {\n            /* use struct assignment */\n            SECUREC_ALIGNED_SET_OPT_ZERO_FF;\n        } else {\n            SECUREC_UNALIGNED_SET;\n        }\n    }\n    return;\n}\n#endif\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The memset_s function copies the value of c (converted to an unsigned char)\n *     into each of the first count characters of the object pointed to by dest.\n *\n * <INPUT PARAMETERS>\n *    dest                           Pointer to destination.\n *    destMax                     The size of the buffer.\n *    c                               Character to set.\n *    count                          Number of characters.\n *\n * <OUTPUT PARAMETERS>\n *    dest buffer                   is uptdated.\n *\n * <RETURN VALUE>\n *    EOK                            Success\n *    EINVAL                        dest == NULL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN\n *    ERANGE                       destMax is  0 or destMax > SECUREC_MEM_MAX_LEN\n *    ERANGE_AND_RESET    count > destMax and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN and dest != NULL\n *\n *    if return ERANGE_AND_RESET then fill dest to c ,fill length is destMax\n */\nerrno_t memset_s(void *dest, size_t destMax, int c, size_t count)\n{\n    if (SECUREC_MEMSET_PARAM_OK(dest, destMax, count)) {\n#if SECUREC_MEMSET_WITH_PERFORMANCE\n        SecDoMemsetOpt(dest, c, count);\n#else\n        SecDoMemset(dest, c, count);\n#endif\n        return EOK;\n    } else {\n        /* meet some runtime violation, return error code */\n        return SecMemsetError(dest, destMax, c, count);\n    }\n}\n\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(memset_s);\n#endif\n\n#if SECUREC_WITH_PERFORMANCE_ADDONS\n/*\n * Performance optimization\n */\nerrno_t memset_sOptAsm(void *dest, size_t destMax, int c, size_t count)\n{\n    if (SECUREC_MEMSET_PARAM_OK(dest, destMax, count)) {\n        SecDoMemsetOpt(dest, c, count);\n        return EOK;\n    }\n    /* meet some runtime violation, return error code */\n    return SecMemsetError(dest, destMax, c, count);\n}\n\n/*\n * Performance optimization\n */\nerrno_t memset_sOptTc(void *dest, size_t destMax, int c, size_t count)\n{\n    if (SECUREC_LIKELY(count <= destMax && dest != NULL)) {\n        SecDoMemsetOpt(dest, c, count);\n        return EOK;\n    }\n    /* meet some runtime violation, return error code */\n    return SecMemsetError(dest, destMax, c, count);\n}\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/output.inl",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef OUTPUT_INL_2B263E9C_43D8_44BB_B17A_6D2033DECEE5\n#define OUTPUT_INL_2B263E9C_43D8_44BB_B17A_6D2033DECEE5\n\n#define SECUREC_NULL_STRING_SIZE            8\n#define SECUREC_STATE_TABLE_SIZE              337\n#define SECUREC_OFFSET_BITS_WORD            16\n#define SECUREC_OFFSET_BITS_DWORD           32\n\n#define SECUREC_OFFSET_DIV_OCTAL            3\n#define SECUREC_OFFSET_DIV_HEX              4\n#define SECUREC_RADIX_OCTAL                 8\n#define SECUREC_RADIX_DECIMAL               10\n#define SECUREC_RADIX_HEX                   16\n/* Use two displacements to eliminate compilation warnings */\n#define SECUREC_SHR_DWORD(x)                (((x) >> 16) >> 16)\n#define SECUREC_PREFIX_LEN                  2\n/* size include '+' and '\\0' */\n#define SECUREC_FLOAT_BUF_EXT               2\n\n\n#ifdef SECUREC_STACK_SIZE_LESS_THAN_1K\n#define SECUREC_FMT_STR_LEN                 8\n#else\n#define SECUREC_FMT_STR_LEN                 16\n#endif\n\ntypedef struct {\n    unsigned int flags;\n    int fldWidth;\n    int precision;\n    int bufferIsWide;           /* flag for buffer contains wide chars ;0 is not wide char */\n    int dynWidth;               /* %*   1 width from variable parameter ;0 not */\n    int dynPrecision;           /* %.*  1 precision from variable parameter ;0 not */\n} SecFormatAttr;\n\ntypedef union {\n    char *str;                  /* not a null terminated  string */\n#if SECUREC_HAVE_WCHART\n    wchar_t *wStr;\n#endif\n} SecFormatBuf;\n\ntypedef union {\n    char str[SECUREC_BUFFER_SIZE + 1];\n#ifdef SECUREC_FOR_WCHAR\n    wchar_t wStr[SECUREC_BUFFER_SIZE + 1];\n#endif\n} SecBuffer;\n\n#if SECUREC_ENABLE_SPRINTF_FLOAT\n/* call system sprintf to format float value */\nstatic int SecIndirectSprintf(char *strDest, const char *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    SECUREC_MASK_MSVC_CRT_WARNING\n    ret = vsprintf(strDest, format, argList);\n    SECUREC_END_MASK_MSVC_CRT_WARNING\n    va_end(argList);\n    (void)argList; /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n\n#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT\n/* out put long double value to dest */\nstatic int SecFormatLongDboule(char *strDest,const SecFormatAttr *formatAttr, const char *fmt, long double ldValue)\n{\n    int fldWidth = ((formatAttr->flags & SECUREC_FLAG_LEFT) ? (-(formatAttr->fldWidth)) : formatAttr->fldWidth);\n    if (formatAttr->dynWidth && formatAttr->dynPrecision) {\n        return SecIndirectSprintf(strDest, fmt, fldWidth, formatAttr->precision, ldValue);\n    } else if (formatAttr->dynWidth) {\n        return SecIndirectSprintf(strDest, fmt, fldWidth, ldValue);\n    } else if (formatAttr->dynPrecision) {\n        return SecIndirectSprintf(strDest, fmt, formatAttr->precision, ldValue);\n    }\n    return SecIndirectSprintf(strDest, fmt, ldValue);\n}\n#endif\n\n/* out put double value to dest */\nstatic int SecFormatDboule(char *strDest, const SecFormatAttr *formatAttr, const char *fmt, double dValue)\n{\n    int fldWidth = ((formatAttr->flags & SECUREC_FLAG_LEFT) ? (-(formatAttr->fldWidth)) : formatAttr->fldWidth);\n    if (formatAttr->dynWidth && formatAttr->dynPrecision) {\n        return SecIndirectSprintf(strDest, fmt, fldWidth, formatAttr->precision, dValue);\n    } else if (formatAttr->dynWidth) {\n        return SecIndirectSprintf(strDest, fmt, fldWidth, dValue);\n    } else if (formatAttr->dynPrecision) {\n        return SecIndirectSprintf(strDest, fmt, formatAttr->precision, dValue);\n    }\n    return SecIndirectSprintf(strDest, fmt, dValue);\n}\n#endif\n\n#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT\n/* to clear e506 warning */\nstatic int SecIsSameSize(size_t sizeA, size_t sizeB)\n{\n    return sizeA == sizeB;\n}\n#endif\n\n#define SECUREC_SPECIAL_DWORD(val32, numBase) do { \\\n    --formatBuf.str; \\\n    *(formatBuf.str) = digits[(val32) % (numBase)]; \\\n} while (((val32) /= (numBase)) != 0)\n\n#if defined(SECUREC_USE_SPECIAL_DIV64) || (defined(SECUREC_VXWORKS_VERSION_5_4) && !defined(SECUREC_ON_64BITS))\n/*\n * Fast divide by 10 algorithm.\n * Calculation divisor multiply  0xcccccccccccccccdULL, resultHi64 >> 3 as quotient\n */\nstatic void SecU64Div10(SecUnsignedInt64 divisor, SecUnsignedInt64 *quotient, SecUnsignedInt32 *remainder)\n{\n    SecUnsignedInt64 mask = 0xffffffffULL; /* use 0xffffffffULL as 32 bit mask */\n    SecUnsignedInt64 magicHi = 0xccccccccULL; /* fast divide 10 magic numbers high 32bit 0xccccccccULL */\n    SecUnsignedInt64 magicLow = 0xcccccccdULL; /* fast divide 10 magic numbers low 32bit  0xcccccccdULL */\n    SecUnsignedInt64 divisorHi = (SecUnsignedInt64)(SECUREC_SHR_DWORD(divisor)); /* hig 32 bit use  */\n    SecUnsignedInt64 divisorLow = (SecUnsignedInt64)(divisor & mask); /* low 32 bit mask */\n    SecUnsignedInt64 factorHi = divisorHi * magicHi;\n    SecUnsignedInt64 factorLow1 = divisorHi * magicLow;\n    SecUnsignedInt64 factorLow2 = divisorLow * magicHi;\n    SecUnsignedInt64 factorLow3 = divisorLow * magicLow;\n    SecUnsignedInt64 carry = (factorLow1 & mask) + (factorLow2 & mask) + SECUREC_SHR_DWORD(factorLow3);\n    SecUnsignedInt64 resultHi64 = factorHi + SECUREC_SHR_DWORD(factorLow1) + \\\n                                   SECUREC_SHR_DWORD(factorLow2) + SECUREC_SHR_DWORD(carry);\n\n    *quotient = resultHi64 >> 3; /* fast divide 10 magic numbers 3 */\n    *remainder = (SecUnsignedInt32)(divisor - ((*quotient) * 10)); /* quotient mul 10 */\n    return;\n}\n#if defined(SECUREC_VXWORKS_VERSION_5_4) && !defined(SECUREC_ON_64BITS)\n/*\n * Divide function for VXWORKS\n */\nstatic int SecU64Div32(SecUnsignedInt64 divisor, SecUnsignedInt32 radix,\n    SecUnsignedInt64 *quotient, SecUnsignedInt32 *remainder)\n{\n    switch (radix) {\n        case SECUREC_RADIX_DECIMAL:\n            SecU64Div10(divisor, quotient, remainder);\n            break;\n        case SECUREC_RADIX_HEX:\n            *quotient = divisor >> SECUREC_OFFSET_DIV_HEX;\n            *remainder = divisor & 0xfULL; /* mask one hex number by 0xfULL */\n            break;\n        case SECUREC_RADIX_OCTAL:\n            *quotient = divisor >> SECUREC_OFFSET_DIV_OCTAL;\n            *remainder = divisor & 0x7ULL; /* mask one hex number by 0x7ULL */\n            break;\n        default:\n            return -1;\n    }\n    return 0;\n}\n#endif\n#endif\n\n#if defined(SECUREC_USE_SPECIAL_DIV64)\n/* The compiler does not provide 64 bit division problems */\n#define SECUREC_SPECIAL_QWORD_BASE10(val64) do { \\\n    SecUnsignedInt64 quotient = 0; \\\n    SecUnsignedInt32 digit = 0; \\\n    SecU64Div10((val64), &(quotient), &(digit)); \\\n    --formatBuf.str; \\\n    *(formatBuf.str) = digits[digit]; \\\n    (val64) = quotient; \\\n} while ((val64) != 0)\n#else\n#define SECUREC_SPECIAL_QWORD_BASE10(val64) do { \\\n    --formatBuf.str; \\\n    *(formatBuf.str) = digits[(val64) % SECUREC_RADIX_DECIMAL]; \\\n} while (((val64) /= SECUREC_RADIX_DECIMAL) != 0)\n#endif\n#define SECUREC_SPECIAL_QWORD(val64, numBase) do { \\\n    --formatBuf.str; \\\n    *(formatBuf.str) = digits[(val64) % (numBase)]; \\\n} while (((val64) /= (numBase)) != 0)\n\n\n#define SECUREC_SAFE_WRITE_STR_OPT(src, txtLen, outStream, outChars) do { \\\n    int ii_; \\\n    for (ii_ = 0; ii_ < (txtLen); ++ii_) { \\\n        *((SecChar *)(void *)((outStream)->cur)) = *(SecChar *)(src); \\\n        (outStream)->cur += sizeof(SecChar); \\\n        (src) = (src) + 1; \\\n    } \\\n    (outStream)->count -= (txtLen) * (int)(sizeof(SecChar)); \\\n    *(outChars) = *(outChars) + (txtLen); \\\n} SECUREC_WHILE_ZERO\n\n#define SECUREC_SAFE_WRITE_STR(src, txtLen, outStream, outChars) do { \\\n    if ((txtLen) < 12) { /* performance optimization for mobile number length 12 */ \\\n        SECUREC_SAFE_WRITE_STR_OPT((src), (txtLen), (outStream), (outChars)); \\\n    } else { \\\n        SecDoMemcpy((outStream)->cur, (src), ((size_t)(unsigned int)(txtLen) * (sizeof(SecChar)))); \\\n        (outStream)->cur += (size_t)((size_t)(unsigned int)(txtLen) * (sizeof(SecChar))); \\\n        (outStream)->count -= (txtLen) * (int)(sizeof(SecChar)); \\\n        *(outChars) = *(outChars) + (txtLen); \\\n    } \\\n} SECUREC_WHILE_ZERO\n\n#define SECUREC_SAFE_WRITE_CHAR(c, outStream, outChars) do { \\\n    *((SecChar *)(void *)((outStream)->cur)) = (SecChar)(c); \\\n    (outStream)->cur += sizeof(SecChar); \\\n    (outStream)->count -= (int)(sizeof(SecChar)); \\\n    *(outChars) = *(outChars) + 1; \\\n} SECUREC_WHILE_ZERO\n\n#define SECUREC_SAFE_PADDING(padChar, padLen, outStream, outChars) do { \\\n    int ii_; \\\n    for (ii_ = 0; ii_ < (padLen); ++ii_) { \\\n        *((SecChar *)(void *)((outStream)->cur)) = (SecChar)(padChar); \\\n        (outStream)->cur += sizeof(SecChar); \\\n    } \\\n    (outStream)->count -= (padLen) * (int)(sizeof(SecChar)); \\\n    *(outChars) = *(outChars) + (padLen); \\\n} SECUREC_WHILE_ZERO\n\n/* The count variable can be reduced to 0, and the external function complements the \\0 terminator. */\n#define SECUREC_IS_REST_BUF_ENOUGH(stream, needLen) ((int)((stream)->count - \\\n    (int)(needLen) * (int)(sizeof(SecChar))) >= 0)\n\n#define SECUREC_FMT_STATE_OFFSET  256\n#ifdef SECUREC_FOR_WCHAR\n#define SECUREC_FMT_TYPE(c, fmtTable)  ((((unsigned int)(int)(c)) <= (unsigned int)(int)SECUREC_CHAR('~')) ? \\\n    ((fmtTable)[(unsigned char)(c)]) : 0)\n#define SECUREC_DECODE_STATE(c, fmtTable, lastState) (SecFmtState)((((fmtTable)[(SECUREC_FMT_TYPE(c, (fmtTable))) * \\\n    ((unsigned char)STAT_INVALID + 1) + \\\n    (unsigned char)(lastState) + \\\n    SECUREC_FMT_STATE_OFFSET])))\n#else\n#define SECUREC_DECODE_STATE(c, fmtTable, lastState) (SecFmtState)(((fmtTable)[((fmtTable)[(unsigned char)(c)]) * \\\n    ((unsigned char)STAT_INVALID + 1) + \\\n    (unsigned char)(lastState) + \\\n    SECUREC_FMT_STATE_OFFSET]))\n#endif\n\nstatic void SecDecodeFlags(SecChar ch, SecFormatAttr *attr)\n{\n    switch (ch) {\n        case SECUREC_CHAR(' '):\n            attr->flags |= SECUREC_FLAG_SIGN_SPACE;\n            break;\n        case SECUREC_CHAR('+'):\n            attr->flags |= SECUREC_FLAG_SIGN;\n            break;\n        case SECUREC_CHAR('-'):\n            attr->flags |= SECUREC_FLAG_LEFT;\n            break;\n        case SECUREC_CHAR('0'):\n            attr->flags |= SECUREC_FLAG_LEADZERO;   /* add zero th the front */\n            break;\n        case SECUREC_CHAR('#'):\n            attr->flags |= SECUREC_FLAG_ALTERNATE;  /* output %x with 0x */\n            break;\n        default:\n            break;\n    }\n    return;\n}\n\n\n/*\n * Decoded size identifier in format string to Reduce the number of lines of function code\n */\nstatic int SecDecodeSizeI(SecFormatAttr *attr, const SecChar **format)\n{\n#ifdef SECUREC_ON_64BITS\n    attr->flags |= SECUREC_FLAG_I64;    /* %I  to  INT64 */\n#endif\n    if ((**format == SECUREC_CHAR('6')) && (*((*format) + 1) == SECUREC_CHAR('4'))) {\n        (*format) += 2; /* add 2 to skip I64 */\n        attr->flags |= SECUREC_FLAG_I64;    /* %I64  to  INT64 */\n    } else if ((**format == SECUREC_CHAR('3')) && (*((*format) + 1) == SECUREC_CHAR('2'))) {\n        (*format) += 2; /* add 2 to skip I32 */\n        attr->flags &= ~SECUREC_FLAG_I64;   /* %I64  to  INT32 */\n    } else if ((**format == SECUREC_CHAR('d')) || (**format == SECUREC_CHAR('i')) ||\n        (**format == SECUREC_CHAR('o')) || (**format == SECUREC_CHAR('u')) ||\n        (**format == SECUREC_CHAR('x')) || (**format == SECUREC_CHAR('X'))) {\n        /* do nothing */\n    } else {\n        /* Compatibility  code for \"%I\" just print I */\n        return -1;\n    }\n    return 0;\n}\n/*\n * Decoded size identifier in format string\n */\nstatic int SecDecodeSize(SecChar ch, SecFormatAttr *attr, const SecChar **format)\n{\n    switch (ch) {\n#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT\n        case SECUREC_CHAR('j'):\n            attr->flags |= SECUREC_FLAG_INTMAX;\n            break;\n#endif\n        case SECUREC_CHAR('q'):\n            /* fall-through */ /* FALLTHRU */\n        case SECUREC_CHAR('L'):\n            attr->flags |= SECUREC_FLAG_LONGLONG | SECUREC_FLAG_LONG_DOUBLE;\n            break;\n        case SECUREC_CHAR('l'):\n            if (**format == SECUREC_CHAR('l')) {\n                *format = *format + 1;\n                attr->flags |= SECUREC_FLAG_LONGLONG;   /* long long */\n            } else {\n                attr->flags |= SECUREC_FLAG_LONG;   /* long int or wchar_t */\n            }\n            break;\n        case SECUREC_CHAR('t'):\n            attr->flags |= SECUREC_FLAG_PTRDIFF;\n            break;\n#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT\n        case SECUREC_CHAR('z'):\n            /* fall-through */ /* FALLTHRU */\n        case SECUREC_CHAR('Z'):\n            attr->flags |= SECUREC_FLAG_SIZE;\n            break;\n#endif\n        case SECUREC_CHAR('I'):\n            if (SecDecodeSizeI(attr, format) != 0) {\n                /* Compatibility  code for \"%I\" just print I */\n                return -1;\n            }\n            break;\n        case SECUREC_CHAR('h'):\n            if (**format == SECUREC_CHAR('h')) {\n                attr->flags |= SECUREC_FLAG_CHAR;   /* char */\n            } else {\n                attr->flags |= SECUREC_FLAG_SHORT;  /* short int */\n            }\n            break;\n        case SECUREC_CHAR('w'):\n            attr->flags |= SECUREC_FLAG_WIDECHAR;   /* wide char */\n            break;\n        default:\n            break;\n    }\n    return 0;\n}\n\n/*\n * Decoded char type identifier\n */\nstatic int SecDecodeTypeC(SecFormatAttr *attr, unsigned int cValue, SecFormatBuf *formatBuf, SecBuffer *buffer)\n{\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT)) && !(defined(__hpux)) && !(defined(SECUREC_ON_SOLARIS))\n    attr->flags &= ~SECUREC_FLAG_LEADZERO;\n#endif\n\n#ifdef SECUREC_FOR_WCHAR\n    attr->bufferIsWide = 1;\n    if (attr->flags & SECUREC_FLAG_SHORT) {\n#if SECUREC_HAVE_MBTOWC\n        /* multibyte character to wide  character */\n        char tmpChar[2]; /* One character string, length is 2 */\n        tmpChar[0] = (char)(cValue & 0x00ff);\n        tmpChar[1] = '\\0';\n\n        if (mbtowc(buffer->wStr, tmpChar, sizeof(tmpChar)) < 0) {\n            return -1;\n        }\n#else\n        return -1;\n#endif\n    } else {\n        buffer->wStr[0] = (wchar_t)cValue;\n    }\n    formatBuf->wStr = buffer->wStr;\n    return 1;                /* only 1 wide character */\n#else /* SECUREC_FOR_WCHAR */\n    attr->bufferIsWide = 0;\n    if (attr->flags & (SECUREC_FLAG_LONG | SECUREC_FLAG_WIDECHAR)) {\n#if SECUREC_HAVE_WCTOMB\n        wchar_t wChar = (wchar_t)cValue;\n        int textLen;\n        /* wide  character  to multibyte character */\n        SECUREC_MASK_MSVC_CRT_WARNING\n        textLen = wctomb(buffer->str, wChar);\n        SECUREC_END_MASK_MSVC_CRT_WARNING\n        if (textLen < 0) {\n            return -1;\n        }\n        formatBuf->str = buffer->str;\n        return textLen;\n#else\n        return -1;\n#endif\n    } else {\n        /* get  multibyte character from argument */\n        unsigned short temp;\n        temp = (unsigned short)cValue;\n        buffer->str[0] = (char)temp;\n        formatBuf->str = buffer->str;\n        return 1; /* only 1 character */\n    }\n#endif\n\n}\n\n/* literal string to print null ptr, define it as array rather than const text area\n * is to avoid gcc warning with pointing const text with variable\n */\n#if SECUREC_HAVE_WCHART\nstatic wchar_t g_wStrNullString[SECUREC_NULL_STRING_SIZE] = { L'(', L'n', L'u', L'l', L'l', L')', L'\\0', L'\\0' };\n#endif\nstatic char g_strNullString[SECUREC_NULL_STRING_SIZE] = \"(null)\";\n\nstatic int SecDecodeTypeSchar(const SecFormatAttr *attr, SecFormatBuf *formatBuf)\n{\n    int finalPrecision = (attr->precision == -1) ? SECUREC_INT_MAX : attr->precision;\n    int textLen;\n\n    if (formatBuf->str == NULL) {   /* NULL passed, use special string */\n        formatBuf->str = g_strNullString;\n    }\n    if (finalPrecision == SECUREC_INT_MAX) {\n        /* precision NOT assigned */\n        /* The strlen performance is high when the string length is greater than 32 */\n        textLen = (int)strlen(formatBuf->str);\n    } else {\n        /* precision assigned */\n        size_t tmpLen;\n        SECUREC_CALC_STR_LEN(formatBuf->str, (size_t)(unsigned int)finalPrecision, &tmpLen);\n        textLen = (int)tmpLen;\n    }\n    return textLen;\n}\n\n#if SECUREC_HAVE_WCHART\nstatic int SecDecodeTypeSwchar(SecFormatAttr *attr, SecFormatBuf *formatBuf)\n{\n    int finalPrecision = (attr->precision == -1) ? SECUREC_INT_MAX : attr->precision;\n    int textLen;\n\n    attr->bufferIsWide = 1;\n    if (formatBuf->wStr == NULL) {  /* NULL passed, use special string */\n        formatBuf->wStr = g_wStrNullString;\n    }\n    /* textLen in wchar_t */\n    SECUREC_CALC_WSTR_LEN(formatBuf->wStr, finalPrecision, &textLen);\n\n    return textLen;\n}\n#endif\n\n/*\n * Decoded string identifier\n */\nstatic int SecDecodeTypeS(SecFormatAttr *attr, char *argPtr, SecFormatBuf *formatBuf)\n{\n    int textLen;\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT)) && (!defined(SECUREC_ON_UNIX))\n    attr->flags &= ~SECUREC_FLAG_LEADZERO;\n#endif\n    formatBuf->str = argPtr;\n#ifdef SECUREC_FOR_WCHAR\n#if defined(SECUREC_COMPATIBLE_LINUX_FORMAT)\n    if (!(attr->flags & SECUREC_FLAG_LONG)) {\n        attr->flags |= SECUREC_FLAG_SHORT;\n    }\n#endif\n    if (attr->flags & SECUREC_FLAG_SHORT) {\n        /* textLen now contains length in multibyte chars */\n        textLen = SecDecodeTypeSchar(attr, formatBuf);\n    } else {\n        /* textLen now contains length in wide chars */\n        textLen = SecDecodeTypeSwchar(attr, formatBuf);\n    }\n#else /* SECUREC_FOR_WCHAR */\n    if (attr->flags & (SECUREC_FLAG_LONG | SECUREC_FLAG_WIDECHAR)) {\n        /* textLen now contains length in wide chars */\n#if SECUREC_HAVE_WCHART\n        textLen = SecDecodeTypeSwchar(attr, formatBuf);\n#else\n        textLen = 0;\n#endif\n    } else {\n        /* textLen now contains length in multibyte chars */\n        textLen = SecDecodeTypeSchar(attr, formatBuf);\n    }\n#endif /* SECUREC_FOR_WCHAR */\n    return textLen;\n}\n\n/*\n * Write one character to dest buffer\n */\nstatic void SecOutputOneChar(SecChar ch, SecPrintfStream *stream, int *counter)\n{\n    /* normal state, write character */\n    if (SECUREC_IS_REST_BUF_ENOUGH(stream, 1)) { /* only one char */\n        SECUREC_SAFE_WRITE_CHAR(ch, stream, counter); /* char * cast to wchar * */\n    } else {\n#ifdef SECUREC_FOR_WCHAR\n        SecWriteCharW(ch, stream, counter);\n#else\n        /* optimize function call to code */\n        *counter = -1;\n        stream->count = -1;\n#endif\n    }\n}\n\n/*\n * Check precison in format\n */\nstatic int SecDecodePrecision(SecChar ch, SecFormatAttr *formatAttr)\n{\n    if (formatAttr->dynPrecision == 0) {\n        /* add digit to current precision */\n        if (SECUREC_MUL_TEN_ADD_BEYOND_MAX(formatAttr->precision)) {\n            return -1;\n        }\n        formatAttr->precision = (int)SECUREC_MUL_TEN((unsigned int)formatAttr->precision) +\n            (unsigned char)(ch - SECUREC_CHAR('0'));\n    } else {\n        if (formatAttr->precision < 0) {\n            formatAttr->precision = -1;\n        }\n        if (formatAttr->precision > SECUREC_MAX_WIDTH_LEN) {\n            return -1;\n        }\n    }\n    return 0;\n}\n\n\n/*\n * Check width in format\n */\nstatic int SecDecodeWidth(SecChar ch, SecFormatAttr *formatAttr, SecFmtState lastState)\n{\n    if (formatAttr->dynWidth == 0) {\n        if (lastState != STAT_WIDTH) {\n            formatAttr->fldWidth = 0;\n        }\n        if (SECUREC_MUL_TEN_ADD_BEYOND_MAX(formatAttr->fldWidth)) {\n            return -1;\n        }\n        formatAttr->fldWidth = (int)SECUREC_MUL_TEN((unsigned int)formatAttr->fldWidth) +\n            (unsigned char)(ch - SECUREC_CHAR('0'));\n    } else {\n        if (formatAttr->fldWidth < 0) {\n            formatAttr->flags |= SECUREC_FLAG_LEFT;\n            formatAttr->fldWidth = (-formatAttr->fldWidth);\n            if (formatAttr->fldWidth > SECUREC_MAX_WIDTH_LEN) {\n                return -1;\n            }\n        }\n    }\n    return 0;\n}\n#ifdef SECUREC_FOR_WCHAR\n/*\n * Formatting output core functions for wchar version.Called by a function such as vswprintf_s\n * argList must not be declare as const\n */\nstatic int SecOutputSW(SecPrintfStream *stream, const wchar_t *cFormat, va_list argList)\n#else\n/*\n * Formatting output core functions for char version.Called by a function such as vsnprintf_s\n */\nstatic int SecOutputS(SecPrintfStream *stream, const char *cFormat, va_list argList)\n#endif\n{\n    const SecChar *format = cFormat;\n#if SECUREC_ENABLE_SPRINTF_FLOAT\n    char *floatBuf = NULL;\n#endif\n    SecFormatBuf formatBuf;\n    static const char *itoaUpperDigits = \"0123456789ABCDEFX\";\n    static const char *itoaLowerDigits = \"0123456789abcdefx\";\n    const char *digits = itoaUpperDigits;\n    unsigned int radix = SECUREC_RADIX_DECIMAL;\n    int charsOut;               /* characters written */\n    int prefixLen = 0;  /* Must be initialized or compiler alerts */\n    int padding = 0;\n    int textLen;                /* length of the text */\n    int noOutput = 0; /* Must be initialized or compiler alerts */\n    SecFmtState state;\n    SecFmtState lastState;\n    SecChar prefix[SECUREC_PREFIX_LEN] = { 0 };\n    SecChar ch;                 /* currently read character */\n    static const unsigned char stateTable[SECUREC_STATE_TABLE_SIZE] = {\n        /* type 0:    nospecial meanin;\n         *  1:   '%';\n         *  2:    '.'\n         *  3:    '*'\n         *  4:    '0'\n         *  5:    '1' ... '9'\n         *  6:    ' ', '+', '-', '#'\n         *  7:     'h', 'l', 'L', 'F', 'w' , 'N','z','q','t','j'\n         *  8:     'd','o','u','i','x','X','e','f','g'\n         */\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x06, 0x00, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x00, 0x06, 0x02, 0x00,\n        0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x08, 0x08, 0x00, 0x07, 0x00, 0x00, 0x07, 0x00, 0x07, 0x00,\n        0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x07, 0x08, 0x07, 0x00, 0x07, 0x00, 0x00, 0x08,\n        0x08, 0x07, 0x00, 0x08, 0x07, 0x08, 0x00, 0x07, 0x08, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,\n        /* fill zero  for normal char 128 byte for 0x80 - 0xff */\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        /* state  0: normal\n         *  1: percent\n         *  2: flag\n         *  3: width\n         *  4: dot\n         *  5: precis\n         *  6: size\n         *  7: type\n         *  8: invalid\n         */\n        0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, 0x01, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08,\n        0x01, 0x00, 0x00, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x03, 0x03, 0x08, 0x05,\n        0x08, 0x08, 0x00, 0x00, 0x00, 0x02, 0x02, 0x03, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x03, 0x03,\n        0x03, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00,\n        0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x00,\n        0x00\n    };\n\n    SecFormatAttr formatAttr;\n    SecBuffer buffer;\n    formatAttr.flags = 0;\n    formatAttr.bufferIsWide = 0;    /* flag for buffer contains wide chars */\n    formatAttr.fldWidth = 0;\n    formatAttr.precision = 0;\n    formatAttr.dynWidth = 0;\n    formatAttr.dynPrecision = 0;\n    charsOut = 0;\n    textLen = 0;\n    state = STAT_NORMAL;        /* starting state */\n    formatBuf.str = NULL;\n\n    /* loop each format character */\n    /* remove format != NULL */\n    while ((ch = *format) != SECUREC_CHAR('\\0') && charsOut >= 0) {\n        ++format;\n        lastState = state;\n        state = SECUREC_DECODE_STATE(ch, stateTable, lastState);\n        switch (state) {\n            case STAT_NORMAL:\n                SecOutputOneChar(ch, stream, &charsOut);\n                continue;\n            case STAT_PERCENT:\n                /* set default values */\n                prefixLen = 0;\n                noOutput = 0;\n                formatAttr.flags = 0;\n                formatAttr.fldWidth = 0;\n                formatAttr.precision = -1;\n                formatAttr.bufferIsWide = 0;\n                formatAttr.dynWidth = 0;\n                formatAttr.dynPrecision = 0;\n                break;\n            case STAT_FLAG:\n                /* set flag based on which flag character */\n                SecDecodeFlags(ch, &formatAttr);\n                break;\n            case STAT_WIDTH:\n                /* update width value */\n                if (ch == SECUREC_CHAR('*')) {\n                    /* get width */\n                    formatAttr.fldWidth = (int)va_arg(argList, int);\n                    formatAttr.dynWidth = 1;\n                } else {\n                    formatAttr.dynWidth = 0;\n                }\n                if (SecDecodeWidth(ch, &formatAttr, lastState) != 0) {\n                    return -1;\n                }\n                break;\n            case STAT_DOT:\n                formatAttr.precision = 0;\n                break;\n            case STAT_PRECIS:\n                /* update precison value */\n                if (ch == SECUREC_CHAR('*')) {\n                    /* get precision from arg list */\n                    formatAttr.precision = (int)va_arg(argList, int);\n                    formatAttr.dynPrecision = 1;\n                } else {\n                    formatAttr.dynPrecision = 0;\n                }\n                if (SecDecodePrecision(ch, &formatAttr) != 0) {\n                    return -1;\n                }\n                break;\n            case STAT_SIZE:\n                /* read a size specifier, set the formatAttr.flags based on it */\n                if (SecDecodeSize(ch, &formatAttr, &format) != 0) {\n                    /* Compatibility  code for \"%I\" just print I */\n                    SecOutputOneChar(ch, stream, &charsOut);\n                    state = STAT_NORMAL;\n                    continue;\n                }\n                break;\n            case STAT_TYPE:\n                switch (ch) {\n                    case SECUREC_CHAR('C'):\n                        /* wide char */\n                        if (!(formatAttr.flags & (SECUREC_FLAG_SHORT | SECUREC_FLAG_LONG | SECUREC_FLAG_WIDECHAR))) {\n#ifdef SECUREC_FOR_WCHAR\n                            formatAttr.flags |= SECUREC_FLAG_SHORT;\n#else\n                            formatAttr.flags |= SECUREC_FLAG_WIDECHAR;\n#endif\n                        }\n                        /* fall-through */\n                        /* FALLTHRU */\n                    case SECUREC_CHAR('c'):\n                        do {\n                            unsigned int cValue = (unsigned int)va_arg(argList, int);\n                            textLen = SecDecodeTypeC(&formatAttr, cValue, &formatBuf, &buffer);\n                            if (textLen < 0) {\n                                noOutput = 1;\n                            }\n                        } SECUREC_WHILE_ZERO;\n                        break;\n                    case SECUREC_CHAR('S'):    /* wide char string */\n                        if (!(formatAttr.flags & (SECUREC_FLAG_SHORT | SECUREC_FLAG_LONG | SECUREC_FLAG_WIDECHAR))) {\n#ifndef SECUREC_FOR_WCHAR\n                            formatAttr.flags |= SECUREC_FLAG_WIDECHAR;\n#else\n                            formatAttr.flags |= SECUREC_FLAG_SHORT;\n#endif\n                        }\n                        /* fall-through */\n                        /* FALLTHRU */\n                    case SECUREC_CHAR('s'):\n                        do {\n                            char *argPtr = (char *)va_arg(argList, char *);\n                            textLen = SecDecodeTypeS(&formatAttr, argPtr, &formatBuf);\n                        } SECUREC_WHILE_ZERO;\n                        break;\n                    case SECUREC_CHAR('n'):\n                        /* higher risk disable it */\n                        return -1;\n                    case SECUREC_CHAR('E'):    /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('F'):    /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('G'):    /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('A'):    /* fall-through */ /* FALLTHRU */\n                        /* convert format char to lower , use Explicit conversion to clean up compilation warning */\n                        ch = (SecChar)(ch + ((SecChar)(SECUREC_CHAR('a')) - (SECUREC_CHAR('A'))));\n                        /* fall-through */\n                        /* FALLTHRU */\n                    case SECUREC_CHAR('e'):    /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('f'):    /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('g'):    /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('a'):\n#if SECUREC_ENABLE_SPRINTF_FLOAT\n                        do {\n                            int bufferSize = 0;         /* size of formatBuf.str */\n                            /* floating point conversion */\n                            formatBuf.str = buffer.str; /* output buffer for float string with default size */\n\n                            /* compute the precision value */\n                            if (formatAttr.precision < 0) {\n                                formatAttr.precision = SECUREC_FLOAT_DEFAULT_PRECISION;\n                            } else if (formatAttr.precision == 0 && ch == SECUREC_CHAR('g')) {\n                                formatAttr.precision = 1;\n                            }\n\n                            /* calc buffer size to store double value\n                             * The maximum length of SECUREC_MAX_WIDTH_LEN is enough\n                             */\n                            if (formatAttr.flags & SECUREC_FLAG_LONG_DOUBLE) {\n                                if (formatAttr.precision > (SECUREC_MAX_WIDTH_LEN - SECUREC_FLOAT_BUFSIZE_LB)) {\n                                    noOutput = 1;\n                                    break;\n                                }\n                                /* Long double needs to meet the basic print length */\n                                bufferSize = SECUREC_FLOAT_BUFSIZE_LB + formatAttr.precision + SECUREC_FLOAT_BUF_EXT;\n                            } else {\n                                if (formatAttr.precision > (SECUREC_MAX_WIDTH_LEN - SECUREC_FLOAT_BUFSIZE)) {\n                                    noOutput = 1;\n                                    break;\n                                }\n                                /* Double needs to meet the basic print length */\n                                bufferSize = SECUREC_FLOAT_BUFSIZE + formatAttr.precision + SECUREC_FLOAT_BUF_EXT;\n                            }\n                            if (formatAttr.fldWidth > bufferSize) {\n                                bufferSize = formatAttr.fldWidth + SECUREC_FLOAT_BUF_EXT;\n                            }\n\n                            if (bufferSize > SECUREC_BUFFER_SIZE) {\n                                /* the current vlaue of SECUREC_BUFFER_SIZE could NOT store the\n                                 * formatted float string\n                                 */\n                                floatBuf = (char *)SECUREC_MALLOC(((size_t)(unsigned int)bufferSize));\n                                if (floatBuf != NULL) {\n                                    formatBuf.str = floatBuf;\n                                } else {\n                                    noOutput = 1;\n                                    break;\n                                }\n                            }\n\n                            do {\n                                /* add following code to call system sprintf API for float number */\n                                const SecChar *pFloatFmt = format - 2;  /* sub 2 to the position before 'f' or 'g' */\n                                int k;\n                                int fFmtStrLen;\n                                char fFmtBuf[SECUREC_FMT_STR_LEN];\n                                char *fFmtStr = fFmtBuf;\n                                char *fFmtHeap = NULL;    /* to clear warning */\n\n                                while (SECUREC_CHAR('%') != *pFloatFmt) { /* must meet '%' */\n                                    --pFloatFmt;\n                                }\n                                fFmtStrLen = (int)((format - pFloatFmt) + 1);   /* with ending terminator */\n                                if (fFmtStrLen > SECUREC_FMT_STR_LEN) {\n                                    /* if SECUREC_FMT_STR_LEN is NOT enough, alloc a new buffer */\n                                    fFmtHeap = (char *)SECUREC_MALLOC((size_t)((unsigned int)fFmtStrLen));\n                                    if (fFmtHeap == NULL) {\n                                        noOutput = 1;\n                                        break;\n                                    } else {\n                                        for (k = 0; k < fFmtStrLen - 1; ++k) {\n                                            /* convert wchar to char */\n                                            fFmtHeap[k] = (char)(pFloatFmt[k]); /* copy the format string */\n                                        }\n                                        fFmtHeap[k] = '\\0';\n\n                                        fFmtStr = fFmtHeap;\n                                    }\n                                } else {\n                                    /* purpose of the repeat code is to solve the tool alarm  Redundant_Null_Check */\n                                    for (k = 0; k < fFmtStrLen - 1; ++k) {\n                                        /* convert wchar to char */\n                                        fFmtBuf[k] = (char)(pFloatFmt[k]);  /* copy the format string */\n                                    }\n                                    fFmtBuf[k] = '\\0';\n                                }\n\n                                if (formatAttr.flags & SECUREC_FLAG_LONG_DOUBLE) {\n#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT\n                                    long double tmp = (long double)va_arg(argList, long double);\n                                    textLen = SecFormatLongDboule(formatBuf.str, &formatAttr, fFmtStr, tmp);\n#else\n                                    double tmp = (double)va_arg(argList, double);\n                                    textLen = SecFormatDboule(formatBuf.str, &formatAttr, fFmtStr, tmp);\n#endif\n                                } else {\n                                    double tmp = (double)va_arg(argList, double);\n                                    textLen = SecFormatDboule(formatBuf.str, &formatAttr, fFmtStr, tmp);\n                                }\n\n                                if (fFmtHeap != NULL) {\n                                    /* if buffer is alloced on heap, free it */\n                                    SECUREC_FREE(fFmtHeap);\n                                    fFmtHeap = NULL;\n                                    /* to clear e438 last value assigned not used , the compiler will\n                                     * optimize this code\n                                     */\n                                    (void)fFmtHeap;\n                                }\n                                if (textLen < 0 || textLen >= bufferSize) {\n                                    /* bufferSize is large enough, just validation the return value */\n                                    noOutput = 1;\n                                    break;\n                                }\n\n                                /* no padding ,this variable to calculate amount of padding */\n                                formatAttr.fldWidth = textLen;\n                                prefixLen = 0;  /* no padding ,this variable to  calculate amount of padding */\n                                formatAttr.flags = 0;   /* clear all internal formatAttr.flags */\n                                break;\n                            } SECUREC_WHILE_ZERO;\n                        } SECUREC_WHILE_ZERO;\n                        break;\n#else\n                        return -1;\n#endif\n                    case SECUREC_CHAR('p'): /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('X'): /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('x'):\n                        /* unsigned lower hex output */\n                        digits = itoaLowerDigits;\n                        radix = SECUREC_RADIX_HEX;\n                        switch (ch) {\n                            case SECUREC_CHAR('p'):\n                                /* print a pointer */\n#if defined(SECUREC_COMPATIBLE_WIN_FORMAT)\n                                formatAttr.flags &= ~SECUREC_FLAG_LEADZERO;\n#else\n                                formatAttr.flags |= SECUREC_FLAG_POINTER;\n#endif\n#ifdef SECUREC_ON_64BITS\n                                formatAttr.flags |= SECUREC_FLAG_I64;   /* converting an int64 */\n#else\n                                formatAttr.flags |= SECUREC_FLAG_LONG;  /* converting a long */\n#endif\n\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) || defined(SECUREC_VXWORKS_PLATFORM)) && (!defined(SECUREC_ON_UNIX))\n#if defined(SECUREC_VXWORKS_PLATFORM)\n                                formatAttr.precision = 1;\n#else\n                                formatAttr.precision = 0;\n#endif\n                                formatAttr.flags |= SECUREC_FLAG_ALTERNATE; /* \"0x\" is not default prefix in UNIX */\n                                break;\n#else\n                /* not linux vxwoks */\n#if defined(_AIX) || defined(SECUREC_ON_SOLARIS)\n                                formatAttr.precision = 1;\n#else\n                                formatAttr.precision = 2 * sizeof(void *);  /* 2 precision of different systems */\n#endif\n#endif\n\n#if defined(SECUREC_ON_UNIX)\n                                break;\n#endif\n                                /* fall-through */ /* FALLTHRU */\n                            case SECUREC_CHAR('X'): /* fall-through */ /* FALLTHRU */\n                                /* unsigned upper hex output */\n                                digits = itoaUpperDigits;\n                                break;\n                            default:\n                                break;\n                        }\n\n                        if (formatAttr.flags & SECUREC_FLAG_ALTERNATE) {\n                            /* alternate form means '0x' prefix */\n                            prefix[0] = SECUREC_CHAR('0');\n                            prefix[1] = (SecChar)(digits[16]); /* 16 for 'x' or 'X' */\n\n#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) || defined(SECUREC_VXWORKS_PLATFORM))\n                            if (ch == 'p') {\n                                prefix[1] = SECUREC_CHAR('x');\n                            }\n#endif\n#if defined(_AIX) || defined(SECUREC_ON_SOLARIS)\n                            if (ch == 'p') {\n                                prefixLen = 0;\n                            } else {\n                                prefixLen = SECUREC_PREFIX_LEN;\n                            }\n#else\n                            prefixLen = SECUREC_PREFIX_LEN;\n#endif\n\n                        }\n                        /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('i'):    /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('d'):    /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('u'):    /* fall-through */ /* FALLTHRU */\n                    case SECUREC_CHAR('o'):    /* fall-through */ /* FALLTHRU */\n                        switch (ch) {\n                            case SECUREC_CHAR('i'): /* fall-through */ /* FALLTHRU */\n                            case SECUREC_CHAR('d'): /* fall-through */ /* FALLTHRU */\n                                /* signed decimal output */\n                                formatAttr.flags |= SECUREC_FLAG_SIGNED;\n                                /* fall-through */ /* FALLTHRU */\n                            case SECUREC_CHAR('u'):\n                                radix = SECUREC_RADIX_DECIMAL;\n                                break;\n                            case SECUREC_CHAR('o'):\n                                /* unsigned octal output */\n                                radix = SECUREC_RADIX_OCTAL;\n                                if (formatAttr.flags & SECUREC_FLAG_ALTERNATE) {\n                                    /* alternate form means force a leading 0 */\n                                    formatAttr.flags |= SECUREC_FLAG_FORCE_OCTAL;\n                                }\n                                break;\n                            default:\n                                break;\n                        }\n\n                        do {\n\n                            SecUnsignedInt64 number = 0;    /* number to convert */\n                            SecInt64 l; /* temp long value */\n\n                            /* read argument into variable l */\n                            if (formatAttr.flags & SECUREC_FLAG_I64) {\n                                l = (SecInt64)va_arg(argList, SecInt64);\n                            } else if (formatAttr.flags & SECUREC_FLAG_LONGLONG) {\n                                l = (SecInt64)va_arg(argList, SecInt64);\n                            } else\n#ifdef SECUREC_ON_64BITS\n                            if (formatAttr.flags & SECUREC_FLAG_LONG) {\n                                l = (long)va_arg(argList, long);\n                            } else\n#endif /* SECUREC_ON_64BITS */\n                            if (formatAttr.flags & SECUREC_FLAG_CHAR) {\n                                if (formatAttr.flags & SECUREC_FLAG_SIGNED) {\n                                    l = (char)va_arg(argList, int); /* sign extend */\n                                    if (l >= 128) { /* 128 on some platform, char is always unsigned */\n                                        SecUnsignedInt64 tmpL = (SecUnsignedInt64)l;\n                                        unsigned char tmpCh = (unsigned char)(~(tmpL));\n                                        l = tmpCh + 1;\n                                        formatAttr.flags |= SECUREC_FLAG_NEGATIVE;\n                                    }\n                                } else {\n                                    l = (unsigned char)va_arg(argList, int);    /* zero-extend */\n                                }\n\n                            } else if (formatAttr.flags & SECUREC_FLAG_SHORT) {\n                                if (formatAttr.flags & SECUREC_FLAG_SIGNED) {\n                                    l = (short)va_arg(argList, int);    /* sign extend */\n                                } else {\n                                    l = (unsigned short)va_arg(argList, int);   /* zero-extend */\n                                }\n\n                            }\n#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT\n                            else if (formatAttr.flags & SECUREC_FLAG_PTRDIFF) {\n                                l = (ptrdiff_t)va_arg(argList, ptrdiff_t);  /* sign extend */\n                            } else if (formatAttr.flags & SECUREC_FLAG_SIZE) {\n                                if (formatAttr.flags & SECUREC_FLAG_SIGNED) {\n                                    /* No suitable macros were found to handle the branch */\n                                    if (SecIsSameSize(sizeof(size_t), sizeof(long))) {\n                                        l = va_arg(argList, long);  /* sign extend */\n                                    } else if (SecIsSameSize(sizeof(size_t), sizeof(long long))) {\n                                        l = va_arg(argList, long long); /* sign extend */\n                                    } else {\n                                        l = va_arg(argList, int);   /* sign extend */\n                                    }\n                                } else {\n                                    l = (SecInt64)(size_t)va_arg(argList, size_t);  /* sign extend */\n                                }\n                            } else if (formatAttr.flags & SECUREC_FLAG_INTMAX) {\n                                if (formatAttr.flags & SECUREC_FLAG_SIGNED) {\n                                    l = va_arg(argList, SecInt64);  /* sign extend */\n                                } else {\n                                    /* sign extend */\n                                    l = (SecInt64)(SecUnsignedInt64)va_arg(argList, SecUnsignedInt64);\n                                }\n                            }\n#endif\n                            else {\n                                if (formatAttr.flags & SECUREC_FLAG_SIGNED) {\n                                    l = va_arg(argList, int);   /* sign extend */\n                                } else {\n                                    l = (unsigned int)va_arg(argList, int); /* zero-extend */\n                                }\n\n                            }\n\n                            /* check for negative; copy into number */\n                            if ((formatAttr.flags & SECUREC_FLAG_SIGNED) && l < 0) {\n                                number = (SecUnsignedInt64)(-l);\n                                formatAttr.flags |= SECUREC_FLAG_NEGATIVE;\n                            } else {\n                                number = (SecUnsignedInt64)l;\n                            }\n\n                            if (((formatAttr.flags & SECUREC_FLAG_I64) == 0) &&\n#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT\n                                ((formatAttr.flags & SECUREC_FLAG_INTMAX) == 0) &&\n#endif\n#ifdef SECUREC_ON_64BITS\n                                ((formatAttr.flags & SECUREC_FLAG_PTRDIFF) == 0) &&\n                                ((formatAttr.flags & SECUREC_FLAG_SIZE) == 0) &&\n#if !defined(SECUREC_COMPATIBLE_WIN_FORMAT)  /* on window 64 system sizeof long is 32bit */\n                                ((formatAttr.flags & SECUREC_FLAG_LONG) == 0) &&\n#endif\n#endif\n                                ((formatAttr.flags & SECUREC_FLAG_LONGLONG) == 0)) {\n\n                                    number &= 0xffffffff;  /* use 0xffffffff as 32 bit mask */\n                            }\n\n                            /* check precision value for default */\n                            if (formatAttr.precision < 0) {\n                                formatAttr.precision = 1;   /* default precision */\n                            } else {\n#if defined(SECUREC_COMPATIBLE_WIN_FORMAT)\n                                formatAttr.flags &= ~SECUREC_FLAG_LEADZERO;\n#else\n                                if (!(formatAttr.flags & SECUREC_FLAG_POINTER)) {\n                                    formatAttr.flags &= ~SECUREC_FLAG_LEADZERO;\n                                }\n#endif\n                                if (formatAttr.precision > SECUREC_MAX_PRECISION) {\n                                    formatAttr.precision = SECUREC_MAX_PRECISION;\n                                }\n                            }\n\n                            /* Check if data is 0; if so, turn off hex prefix,\n                             * 'p' add 0x prefix, otherwise not add prefix\n                             */\n                            if (number == 0) {\n#if !(defined(SECUREC_VXWORKS_PLATFORM) || defined(__hpux))\n                                prefixLen = 0;\n#else\n                                if ((ch == 'p') && (formatAttr.flags & SECUREC_FLAG_ALTERNATE)) {\n                                    prefixLen = SECUREC_PREFIX_LEN;\n                                } else {\n                                    prefixLen = 0;\n                                }\n#endif\n                            }\n\n                            /* Convert data to ASCII */\n                            formatBuf.str = &buffer.str[SECUREC_BUFFER_SIZE];\n\n                            if (number > 0) {\n#ifdef SECUREC_ON_64BITS\n                                switch (radix) {\n                                    /* the compiler will optimize each one */\n                                    case SECUREC_RADIX_DECIMAL:\n                                        SECUREC_SPECIAL_QWORD_BASE10(number);\n                                        break;\n                                    case SECUREC_RADIX_HEX:\n                                        SECUREC_SPECIAL_QWORD(number, SECUREC_RADIX_HEX);\n                                        break;\n                                    case SECUREC_RADIX_OCTAL:\n                                        SECUREC_SPECIAL_QWORD(number, SECUREC_RADIX_OCTAL);\n                                        break;\n                                    default:\n                                        break;\n                                }\n#else /* for 32 bits system */\n                                if (number <= 0xFFFFFFFFUL) {\n                                    /* in most case, the value to be converted is small value */\n                                    SecUnsignedInt32 n32Tmp = (SecUnsignedInt32)number;\n                                    switch (radix) {\n                                        case SECUREC_RADIX_HEX:\n                                            SECUREC_SPECIAL_DWORD(n32Tmp, SECUREC_RADIX_HEX);\n                                            break;\n                                        case SECUREC_RADIX_OCTAL:\n                                            SECUREC_SPECIAL_DWORD(n32Tmp, SECUREC_RADIX_OCTAL);\n                                            break;\n\n#ifdef _AIX\n                                        /* the compiler will optimize div 10 */\n                                        case SECUREC_RADIX_DECIMAL:\n                                            SECUREC_SPECIAL_DWORD(n32Tmp, SECUREC_RADIX_DECIMAL);\n                                            break;\n#else\n                                        case SECUREC_RADIX_DECIMAL:\n                                            do {\n                                                /* fast div 10 */\n                                                SecUnsignedInt32 q;\n                                                SecUnsignedInt32 r;\n                                                do {\n                                                    *--formatBuf.str = digits[n32Tmp % SECUREC_RADIX_DECIMAL];\n                                                    q = (n32Tmp >> 1) + (n32Tmp >> 2); /* fast div  magic 2 */\n                                                    q = q + (q >> 4); /* fast div  magic 4 */\n                                                    q = q + (q >> 8); /* fast div  magic 8 */\n                                                    q = q + (q >> 16); /* fast div  magic 16 */\n                                                    q = q >> 3; /* fast div  magic 3 */\n                                                    r = n32Tmp - SECUREC_MUL_TEN(q);\n                                                    n32Tmp = (r > 9) ? (q + 1) : q; /* fast div  magic 9 */\n                                                } while (n32Tmp != 0);\n                                            } SECUREC_WHILE_ZERO;\n                                            break;\n#endif\n                                        default:\n                                            break;\n                                    }   /* end switch */\n                                } else {\n                                    /* the value to be converted is greater than 4G */\n#if defined(SECUREC_VXWORKS_VERSION_5_4)\n                                    do {\n                                        SecUnsignedInt32 digit = 0; /* ascii value of digit */\n                                        SecUnsignedInt64 quotient = 0;\n                                        if (SecU64Div32(number,(SecUnsignedInt32)radix, &quotient, &digit) != 0) {\n                                            noOutput = 1;\n                                            break;\n                                        }\n                                        *--formatBuf.str = digits[digit];\n                                        number = quotient;\n                                    } while (number != 0);\n#else\n                                    switch (radix) {\n                                        /* the compiler will optimize div 10 */\n                                        case SECUREC_RADIX_DECIMAL:\n                                            SECUREC_SPECIAL_QWORD_BASE10(number);\n                                            break;\n                                        case SECUREC_RADIX_OCTAL:\n                                            SECUREC_SPECIAL_QWORD(number, SECUREC_RADIX_OCTAL);\n                                            break;\n                                        case SECUREC_RADIX_HEX:\n                                            SECUREC_SPECIAL_QWORD(number, SECUREC_RADIX_HEX);\n                                            break;\n                                        default:\n                                            break;\n                                    }\n#endif\n                                }\n#endif\n\n                            }\n                            /* compute length of number,.if textLen > 0, then formatBuf.str must be in buffer.str */\n                            textLen = (int)(size_t)((char *)&buffer.str[SECUREC_BUFFER_SIZE] - formatBuf.str);\n                            if (formatAttr.precision > textLen) {\n                                int ii;\n                                for (ii = 0; ii < formatAttr.precision - textLen; ++ii) {\n                                    *--formatBuf.str = '0';\n                                }\n                                textLen = formatAttr.precision;\n                            }\n\n                            /* Force a leading zero if FORCEOCTAL flag set */\n                            if ((formatAttr.flags & SECUREC_FLAG_FORCE_OCTAL) &&\n                                (textLen == 0 || formatBuf.str[0] != '0')) {\n                                *--formatBuf.str = '0';\n                                ++textLen;  /* add a zero */\n                            }\n                        } SECUREC_WHILE_ZERO;\n                        break;\n                    default:\n                        break;\n                }\n\n                while (noOutput < 1) {\n                    if (formatAttr.flags & SECUREC_FLAG_SIGNED) {\n                        if (formatAttr.flags & SECUREC_FLAG_NEGATIVE) {\n                            /* prefix is a '-' */\n                            prefix[0] = SECUREC_CHAR('-');\n                            prefixLen = 1;\n                        } else if (formatAttr.flags & SECUREC_FLAG_SIGN) {\n                            /* prefix is '+' */\n                            prefix[0] = SECUREC_CHAR('+');\n                            prefixLen = 1;\n                        } else if (formatAttr.flags & SECUREC_FLAG_SIGN_SPACE) {\n                            /* prefix is ' ' */\n                            prefix[0] = SECUREC_CHAR(' ');\n                            prefixLen = 1;\n                        }\n                    }\n\n#if defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && (!defined(SECUREC_ON_UNIX))\n                    if ((formatAttr.flags & SECUREC_FLAG_POINTER) && (textLen == 0)) {\n                        formatAttr.flags &= ~SECUREC_FLAG_LEADZERO;\n                        formatBuf.str = &buffer.str[SECUREC_BUFFER_SIZE - 1];\n                        *formatBuf.str-- = '\\0';\n                        *formatBuf.str-- = ')';\n                        *formatBuf.str-- = 'l';\n                        *formatBuf.str-- = 'i';\n                        *formatBuf.str-- = 'n';\n                        *formatBuf.str = '(';\n                        textLen = 5; /* length of (nil) is 5 */\n                    }\n#endif\n\n                    /* calculate amount of padding */\n                    padding = (formatAttr.fldWidth - textLen) - prefixLen;\n\n                    /* put out the padding, prefix, and text, in the correct order */\n\n                    if (!(formatAttr.flags & (SECUREC_FLAG_LEFT | SECUREC_FLAG_LEADZERO)) && padding > 0) {\n                        /* pad on left with blanks */\n                        if (SECUREC_IS_REST_BUF_ENOUGH(stream, padding)) {\n                            /* char * cast to wchar * */\n                            SECUREC_SAFE_PADDING(SECUREC_CHAR(' '), padding, stream, &charsOut);\n                        } else {\n                            SECUREC_WRITE_MULTI_CHAR(SECUREC_CHAR(' '), padding, stream, &charsOut);\n                        }\n                    }\n\n                    /* write prefix */\n                    if (prefixLen > 0) {\n                        SecChar *pPrefix = prefix;\n                        if (SECUREC_IS_REST_BUF_ENOUGH(stream, prefixLen)) {\n                            /* max prefix len is 2, use loop copy */ /* char * cast to wchar * in WCHAR version */\n                            SECUREC_SAFE_WRITE_STR_OPT(pPrefix, prefixLen, stream, &charsOut);\n                        } else {\n                            SECUREC_WRITE_STRING(prefix, prefixLen, stream, &charsOut);\n                        }\n                    }\n\n                    if ((formatAttr.flags & SECUREC_FLAG_LEADZERO) && !(formatAttr.flags & SECUREC_FLAG_LEFT)\n                        && padding > 0) {\n                        /* write leading zeros */\n                        if (SECUREC_IS_REST_BUF_ENOUGH(stream, padding)) {\n                            /* char * cast to wchar * */\n                            SECUREC_SAFE_PADDING(SECUREC_CHAR('0'), padding, stream, &charsOut);\n                        } else {\n                            SECUREC_WRITE_MULTI_CHAR(SECUREC_CHAR('0'), padding, stream, &charsOut);\n                        }\n                    }\n\n                    /* write text */\n#ifndef SECUREC_FOR_WCHAR\n                    if (formatAttr.bufferIsWide != 0 && (textLen > 0)) {\n#if SECUREC_HAVE_WCTOMB\n                        wchar_t *p = formatBuf.wStr;\n                        int count = textLen;\n                        while (count > 0) {\n                            char tmpBuf[SECUREC_MB_LEN + 1];\n                            SECUREC_MASK_MSVC_CRT_WARNING\n                            int retVal = wctomb(tmpBuf, *p);\n                            SECUREC_END_MASK_MSVC_CRT_WARNING\n                            if (retVal <= 0) {\n                                charsOut = -1;\n                                break;\n                            }\n                            SECUREC_WRITE_STRING(tmpBuf, retVal, stream, &charsOut);\n                            --count;\n                            ++p;\n                        }\n#else\n                        charsOut = -1;\n                        break;\n#endif\n                    } else {\n                        if (SECUREC_IS_REST_BUF_ENOUGH(stream, textLen)) {\n                            SECUREC_SAFE_WRITE_STR(formatBuf.str, textLen, stream, &charsOut);\n                        } else {\n                            SECUREC_WRITE_STRING(formatBuf.str, textLen, stream, &charsOut);\n                        }\n                    }\n#else /* SECUREC_FOR_WCHAR */\n                    if (formatAttr.bufferIsWide == 0 && textLen > 0) {\n#if SECUREC_HAVE_MBTOWC\n                        int count = textLen;\n                        char *p = formatBuf.str;\n\n                        while (count > 0) {\n                            wchar_t wChar = L'\\0';\n                            int retVal = mbtowc(&wChar, p, (size_t)MB_CUR_MAX);\n                            if (retVal <= 0) {\n                                charsOut = -1;\n                                break;\n                            }\n                            SecWriteCharW(wChar, stream, &charsOut);\n                            p += retVal;\n                            count -= retVal;\n                        }\n#else\n                        charsOut = -1;\n                        break;\n#endif\n                    } else {\n                        if (SECUREC_IS_REST_BUF_ENOUGH(stream, textLen)) {\n                            /* char * cast to wchar * */\n                            SECUREC_SAFE_WRITE_STR(formatBuf.wStr, textLen, stream, &charsOut);\n                        } else {\n                            SECUREC_WRITE_STRING(formatBuf.wStr, textLen, stream, &charsOut);\n                        }\n                    }\n#endif /* SECUREC_FOR_WCHAR */\n\n                    if (charsOut >= 0 && (formatAttr.flags & SECUREC_FLAG_LEFT) && padding > 0) {\n                        /* pad on right with blanks */\n                        if (SECUREC_IS_REST_BUF_ENOUGH(stream, padding)) {\n                            /* char * cast to wchar * */\n                            SECUREC_SAFE_PADDING(SECUREC_CHAR(' '), padding, stream, &charsOut);\n                        } else {\n                            SECUREC_WRITE_MULTI_CHAR(SECUREC_CHAR(' '), padding, stream, &charsOut);\n                        }\n                    }\n                    break;\n                }\n#if SECUREC_ENABLE_SPRINTF_FLOAT\n                if (floatBuf != NULL) {\n                    SECUREC_FREE(floatBuf);\n                    floatBuf = NULL;\n                }\n#endif\n                break;\n            case STAT_INVALID:\n                return -1;\n            default:\n                return -1;          /* input format is wrong, directly return */\n        }\n    }\n\n    if (state != STAT_NORMAL && state != STAT_TYPE) {\n        return -1;\n    }\n\n    return charsOut;            /* the number of characters written */\n}\n#endif /* OUTPUT_INL_2B263E9C_43D8_44BB_B17A_6D2033DECEE5 */\n\n"
  },
  {
    "path": "third_party/securec/src/scanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The scanf_s function is equivalent to fscanf_s with the argument stdin interposed before the arguments to scanf_s\n *     The scanf_s function reads data from the standard input stream stdin and\n *    writes the data into the location that's given by argument. Each argument\n *    must be a pointer to a variable of a type that corresponds to a type specifier\n *    in format. If copying occurs between strings that overlap, the behavior is\n *    undefined.\n *\n * <INPUT PARAMETERS>\n *    format                  Format control string.\n *    ...                       Optional arguments.\n *\n * <OUTPUT PARAMETERS>\n *    ...                       The converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Returns the number of fields successfully converted and assigned;\n *    the return value does not include fields that were read but not assigned.\n *    A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\n\nint scanf_s(const char *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vscanf_s(format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/secinput.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef SEC_INPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C\n#define SEC_INPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C\n#include \"securecutil.h\"\n\n#define SECUREC_SCANF_EINVAL             (-1)\n#define SECUREC_SCANF_ERROR_PARA         (-2)\n\n/* for internal stream flag */\n#define SECUREC_MEM_STR_FLAG             0X01\n#define SECUREC_FILE_STREAM_FLAG         0X02\n#define SECUREC_FROM_STDIN_FLAG          0X04\n#define SECUREC_LOAD_FILE_TO_MEM_FLAG    0X08\n\n#define SECUREC_UNINITIALIZED_FILE_POS   (-1)\n#define SECUREC_BOM_HEADER_SIZE          2\n#define SECUREC_BOM_HEADER_BE_1ST        0xFEU\n#define SECUREC_BOM_HEADER_BE_2ST        0xFFU\n#define SECUREC_BOM_HEADER_LE_1ST        0xFFU\n#define SECUREC_BOM_HEADER_LE_2ST        0xFEU\n#define SECUREC_UTF8_BOM_HEADER_SIZE     3\n#define SECUREC_UTF8_BOM_HEADER_1ST      0xEFU\n#define SECUREC_UTF8_BOM_HEADER_2ND      0xBBU\n#define SECUREC_UTF8_BOM_HEADER_3RD      0xBFU\n#define SECUREC_UTF8_LEAD_1ST            0xE0\n#define SECUREC_UTF8_LEAD_2ND            0x80\n\ntypedef struct {\n    unsigned int flag;          /* mark the properties of input stream */\n    int count;                  /* the size of buffered string in bytes */\n    const char *cur;            /* the pointer to next read position */\n    char *base;                 /* the pointer to the header of buffered string */\n#if SECUREC_ENABLE_SCANF_FILE\n    FILE *pf;                   /* the file pointer */\n    long oriFilePos;            /* the original position of file offset when fscanf is called */\n    int fileRealRead;\n#if defined(SECUREC_NO_STD_UNGETC)\n    unsigned int lastChar;      /* the char code of last input */\n    int fUnget;                 /* the boolean flag of pushing a char back to read stream */\n#endif\n#endif\n} SecFileStream;\n\n\n#define SECUREC_INIT_SEC_FILE_STREAM_COMMON(fileStream, streamFlag, curPtr, strCount) do { \\\n    (fileStream).flag = (streamFlag); \\\n    (fileStream).count = (strCount); \\\n    (fileStream).cur = (curPtr); \\\n    (fileStream).base = NULL; \\\n} SECUREC_WHILE_ZERO\n\n#if SECUREC_ENABLE_SCANF_FILE\n#if defined(SECUREC_NO_STD_UNGETC)\n/* This initialization for eliminating redundant initialization.\n * Compared with the previous version initialization 0,\n * the current code causes the binary size to increase by some bytes\n */\n#define SECUREC_INIT_SEC_FILE_STREAM(fileStream, streamFlag, stream, filePos, curPtr, strCount) do { \\\n    SECUREC_INIT_SEC_FILE_STREAM_COMMON((fileStream), (streamFlag), (curPtr), (strCount)); \\\n    (fileStream).pf = (stream); \\\n    (fileStream).oriFilePos = (filePos); \\\n    (fileStream).fileRealRead = 0; \\\n    (fileStream).lastChar = 0; \\\n    (fileStream).fUnget = 0; \\\n} SECUREC_WHILE_ZERO\n#else\n#define SECUREC_INIT_SEC_FILE_STREAM(fileStream, streamFlag, stream, filePos, curPtr, strCount) do { \\\n    SECUREC_INIT_SEC_FILE_STREAM_COMMON((fileStream), (streamFlag), (curPtr), (strCount)); \\\n    (fileStream).pf = (stream); \\\n    (fileStream).oriFilePos = (filePos); \\\n    (fileStream).fileRealRead = 0; \\\n} SECUREC_WHILE_ZERO\n#endif\n#else /* No SECUREC_ENABLE_SCANF_FILE */\n#define SECUREC_INIT_SEC_FILE_STREAM(fileStream, streamFlag, stream, filePos, curPtr, strCount) do { \\\n    SECUREC_INIT_SEC_FILE_STREAM_COMMON((fileStream), (streamFlag), (curPtr), (strCount)); \\\n} SECUREC_WHILE_ZERO\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n    extern int SecInputS(SecFileStream *stream, const char *cFormat, va_list argList);\n    extern void SecClearDestBuf(const char *buffer, const char *format, va_list argList);\n#if SECUREC_IN_KERNEL == 0\n    extern int SecInputSW(SecFileStream *stream, const wchar_t *cFormat, va_list argList);\n    extern void SecClearDestBufW(const wchar_t *buffer, const wchar_t *format, va_list argList);\n#endif\n/* 20150105 For software and hardware decoupling,such as UMG */\n#if defined(SECUREC_SYSAPI4VXWORKS)\n#ifdef feof\n#undef feof\n#endif\n    extern int feof(FILE *stream);\n#endif\n\n#if defined(SECUREC_SYSAPI4VXWORKS) || defined(SECUREC_CTYPE_MACRO_ADAPT)\n#ifndef isspace\n#define isspace(c) (((c) == ' ') || ((c) == '\\t') || ((c) == '\\r') || ((c) == '\\n'))\n#endif\n#ifndef iswspace\n#define iswspace(c) (((c) == L' ') || ((c) == L'\\t') || ((c) == L'\\r') || ((c) == L'\\n'))\n#endif\n#ifndef isascii\n#define isascii(c) (((unsigned char)(c)) <= 0x7f)\n#endif\n#ifndef isupper\n#define isupper(c) ((c) >= 'A' && (c) <= 'Z')\n#endif\n#ifndef islower\n#define islower(c) ((c) >= 'a' && (c) <= 'z')\n#endif\n#ifndef isalpha\n#define isalpha(c) (isupper(c) || (islower(c)))\n#endif\n#ifndef isdigit\n#define isdigit(c) ((c) >= '0' && (c) <= '9')\n#endif\n#ifndef isxupper\n#define isxupper(c) ((c) >= 'A' && (c) <= 'F')\n#endif\n#ifndef isxlower\n#define isxlower(c) ((c) >= 'a' && (c) <= 'f')\n#endif\n#ifndef isxdigit\n#define isxdigit(c) (isdigit(c) || isxupper(c) || isxlower(c))\n#endif\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n/* Reserved file operation macro interface */\n#define SECUREC_LOCK_FILE(s)\n#define SECUREC_UNLOCK_FILE(s)\n#define SECUREC_LOCK_STDIN(i, s)\n#define SECUREC_UNLOCK_STDIN(i, s)\n#endif\n\n\n"
  },
  {
    "path": "third_party/securec/src/securecutil.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/* Avoid duplicate header files,not include securecutil.h */\n#include \"securecutil.h\"\n\n\n#if defined(ANDROID) && (SECUREC_HAVE_WCTOMB || SECUREC_HAVE_MBTOWC)\n#include <wchar.h>\n#if SECUREC_HAVE_WCTOMB\n/*\n * Convert wide characters to narrow multi-bytes\n */\nint wctomb(char *s, wchar_t wc)\n{\n    return wcrtomb(s, wc, NULL);\n}\n#endif\n\n#if SECUREC_HAVE_MBTOWC\n/*\n * Converting narrow multi-byte characters to wide characters\n */\nint mbtowc(wchar_t *pwc, const char *s, size_t n)\n{\n    return mbrtowc(pwc, s, n, NULL);\n}\n#endif\n#endif\n\n/* high Num << 8 | num of SPC Ver */\n#define SECUREC_C_VERSION     (0x5 << 8)\n#define SECUREC_SPC_VERSION   7\n#define SECUREC_VERSION_STR   \"Huawei Secure C V100R001C01SPC007B002\"\n\n/* SPC verNumber<->verStr like:\n * 0X201<->C01\n * 0X202<->SPC001   Redefine numbers after this version\n * 0X502<->SPC002\n * 0X503<->SPC003\n * ...\n * 0X50a<->SPC010\n * 0X50b<->SPC011\n * ...\n */\n/* CP  verNumber<->verStr like:\n * 0X601<->CP0001\n * 0X602<->CP0002\n * ...\n */\nconst char *GetHwSecureCVersion(unsigned short *verNumber)\n{\n    if (verNumber != NULL) {\n        *verNumber = (unsigned short)(SECUREC_C_VERSION | SECUREC_SPC_VERSION);\n    }\n    return SECUREC_VERSION_STR;\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(GetHwSecureCVersion);\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/securecutil.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef SECURECUTIL_H_46C86578_F8FF_4E49_8E64_9B175241761F\n#define SECURECUTIL_H_46C86578_F8FF_4E49_8E64_9B175241761F\n#include \"securec.h\"\n\n#if (defined(_MSC_VER)) && (_MSC_VER >= 1400)\n#define SECUREC_MASK_MSVC_CRT_WARNING __pragma(warning(push)) \\\n    __pragma(warning(disable:4996 4127))\n#define SECUREC_END_MASK_MSVC_CRT_WARNING  __pragma(warning(pop))\n#else\n#define SECUREC_MASK_MSVC_CRT_WARNING\n#define SECUREC_END_MASK_MSVC_CRT_WARNING\n#endif\n#define SECUREC_WHILE_ZERO SECUREC_MASK_MSVC_CRT_WARNING while (0) SECUREC_END_MASK_MSVC_CRT_WARNING\n\n#ifndef SECUREC_HAVE_STRNLEN\n#if (defined(_XOPEN_SOURCE) && _XOPEN_SOURCE >= 700) || (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 200809L)\n#if SECUREC_IN_KERNEL\n#define SECUREC_HAVE_STRNLEN 0\n#else\n#if defined(__GLIBC__) && __GLIBC__ >= 2 && defined(__GLIBC_MINOR__) && __GLIBC_MINOR__ >= 10\n#define SECUREC_HAVE_STRNLEN 1\n#else\n#define SECUREC_HAVE_STRNLEN 0\n#endif\n#endif\n#else\n#define SECUREC_HAVE_STRNLEN 0\n#endif\n#endif\n\n#if SECUREC_IN_KERNEL\n/* in kernel disbale functions */\n#ifndef SECUREC_ENABLE_SCANF_FILE\n#define SECUREC_ENABLE_SCANF_FILE 0\n#endif\n#ifndef SECUREC_ENABLE_SCANF_FLOAT\n#define SECUREC_ENABLE_SCANF_FLOAT 0\n#endif\n#ifndef SECUREC_ENABLE_SPRINTF_FLOAT\n#define SECUREC_ENABLE_SPRINTF_FLOAT 0\n#endif\n#ifndef SECUREC_HAVE_MBTOWC\n#define SECUREC_HAVE_MBTOWC 0\n#endif\n#ifndef SECUREC_HAVE_WCTOMB\n#define SECUREC_HAVE_WCTOMB 0\n#endif\n#ifndef SECUREC_HAVE_WCHART\n#define SECUREC_HAVE_WCHART 0\n#endif\n#else /* no in kernel */\n/* Systems that do not support file, can define this macro to 0. */\n#ifndef SECUREC_ENABLE_SCANF_FILE\n#define SECUREC_ENABLE_SCANF_FILE 1\n#endif\n#ifndef SECUREC_ENABLE_SCANF_FLOAT\n#define SECUREC_ENABLE_SCANF_FLOAT 1\n#endif\n/* Systems that do not support float, can define this macro to 0. */\n#ifndef SECUREC_ENABLE_SPRINTF_FLOAT\n#define SECUREC_ENABLE_SPRINTF_FLOAT 1\n#endif\n#ifndef SECUREC_HAVE_MBTOWC\n#define SECUREC_HAVE_MBTOWC 1\n#endif\n#ifndef SECUREC_HAVE_WCTOMB\n#define SECUREC_HAVE_WCTOMB 1\n#endif\n#ifndef SECUREC_HAVE_WCHART\n#define SECUREC_HAVE_WCHART 1\n#endif\n#endif\n\n\n#define SECUREC_INT_MAX                     2147483647\n#define SECUREC_MUL_SIXTEEN(x)              ((x) << 4)\n#define SECUREC_MUL_EIGHT(x)                ((x) << 3)\n#define SECUREC_MUL_TEN(x)                  ((((x) << 2) + (x)) << 1)\n/* Limited format input and output width */\n#define SECUREC_MAX_WIDTH_LEN_DIV_TEN       21474836\n#define SECUREC_MAX_WIDTH_LEN               SECUREC_MUL_TEN(SECUREC_MAX_WIDTH_LEN_DIV_TEN)\n/* Is the x multiplied by 10 greater than */\n#define SECUREC_MUL_TEN_ADD_BEYOND_MAX(x)   (((x) > SECUREC_MAX_WIDTH_LEN_DIV_TEN))\n\n#define SECUREC_FLOAT_BUFSIZE               (309 + 40)  /* Max length of double value */\n#define SECUREC_FLOAT_BUFSIZE_LB            (4932 + 40) /* Max length of long double value */\n#define SECUREC_FLOAT_DEFAULT_PRECISION     6\n\n/* This macro does not handle pointer equality or integer overflow */\n#define SECUREC_MEMORY_NO_OVERLAP(dest, src, count) \\\n    (((src) < (dest) && ((const char *)(src) + (count)) <= (char *)(dest)) || \\\n    ((dest) < (src) && ((char *)(dest) + (count)) <= (const char *)(src)))\n\n#define SECUREC_MEMORY_IS_OVERLAP(dest, src, count) \\\n    (((src) < (dest) && ((const char *)(src) + (count)) > (char *)(dest)) || \\\n    ((dest) < (src) && ((char *)(dest) + (count)) > (const char *)(src)))\n\n/*\n * Check whether the strings overlap, len is the length of the string not include terminator\n * Length is related to data type char or wchar , do not force conversion of types\n */\n#define SECUREC_STRING_NO_OVERLAP(dest, src, len) \\\n    (((src) < (dest) && ((src) + (len)) < (dest)) || \\\n    ((dest) < (src) && ((dest) + (len)) < (src)))\n\n/*\n * Check whether the strings overlap for strcpy wcscpy function, dest len and src Len are not include terminator\n * Length is related to data type char or wchar , do not force conversion of types\n */\n#define SECUREC_STRING_IS_OVERLAP(dest, src, len) \\\n    (((src) < (dest) && ((src) + (len)) >= (dest)) || \\\n    ((dest) < (src) && ((dest) + (len)) >= (src)))\n\n/*\n * Check whether the strings overlap for strcat wcscat function, dest len and src Len are not include terminator\n * Length is related to data type char or wchar , do not force conversion of types\n */\n#define SECUREC_CAT_STRING_IS_OVERLAP(dest, destLen, src, srcLen) \\\n    (((dest) < (src) && ((dest) + (destLen) + (srcLen)) >= (src)) || \\\n    ((src) < (dest) && ((src) + (srcLen)) >= (dest)))\n\n\n#if SECUREC_HAVE_STRNLEN\n#define SECUREC_CALC_STR_LEN(str, maxLen, outLen) do { \\\n    *(outLen) = strnlen((str), (maxLen)); \\\n} SECUREC_WHILE_ZERO\n#define SECUREC_CALC_STR_LEN_OPT(str, maxLen, outLen) do { \\\n    if ((maxLen) > 8) { \\\n        /* Optimization or len less then 8 */ \\\n        if (*((str) + 0) == '\\0') { \\\n            *(outLen) = 0; \\\n        } else if (*((str) + 1) == '\\0') { \\\n            *(outLen) = 1; \\\n        } else if (*((str) + 2) == '\\0') { \\\n            *(outLen) = 2; \\\n        } else if (*((str) + 3) == '\\0') { \\\n            *(outLen) = 3; \\\n        } else if (*((str) + 4) == '\\0') { \\\n            *(outLen) = 4; \\\n        } else if (*((str) + 5) == '\\0') { \\\n            *(outLen) = 5; \\\n        } else if (*((str) + 6) == '\\0') { \\\n            *(outLen) = 6; \\\n        } else if (*((str) + 7) == '\\0') { \\\n            *(outLen) = 7; \\\n        } else if (*((str) + 8) == '\\0') { \\\n            /* Optimization with a length of 8 */ \\\n            *(outLen) = 8; \\\n        } else { \\\n            /* The offset is 8 because the performance of 8 byte alignment is high */ \\\n            *(outLen) = 8 + strnlen((str) + 8, (maxLen) - 8); \\\n        } \\\n    } else { \\\n        SECUREC_CALC_STR_LEN((str), (maxLen), (outLen)); \\\n    } \\\n} SECUREC_WHILE_ZERO\n#else\n#define SECUREC_CALC_STR_LEN(str, maxLen, outLen) do { \\\n    const char *strEnd = (const char *)(str); \\\n    size_t availableSize = (size_t)(maxLen); \\\n    while (availableSize > 0 && *strEnd != '\\0') { \\\n        --availableSize; \\\n        ++strEnd; \\\n    } \\\n    *(outLen) = (size_t)(strEnd - (str)); \\\n} SECUREC_WHILE_ZERO\n#define SECUREC_CALC_STR_LEN_OPT SECUREC_CALC_STR_LEN\n#endif\n\n#define SECUREC_CALC_WSTR_LEN(str, maxLen, outLen) do { \\\n    const wchar_t *strEnd = (const wchar_t *)(str); \\\n    *(outLen) = 0; \\\n    while (*(outLen) < (maxLen) && *strEnd != L'\\0') { \\\n        *(outLen) = *(outLen) + 1; \\\n        ++strEnd; \\\n    } \\\n} SECUREC_WHILE_ZERO\n\n\n#ifdef SECUREC_FORMAT_OUTPUT_INPUT\n#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) || defined(__ARMCC_VERSION)\ntypedef __int64 SecInt64;\ntypedef unsigned __int64 SecUnsignedInt64;\n#if defined(__ARMCC_VERSION)\ntypedef unsigned int SecUnsignedInt32;\n#else\ntypedef unsigned __int32 SecUnsignedInt32;\n#endif\n#else\ntypedef unsigned int SecUnsignedInt32;\ntypedef long long SecInt64;\ntypedef unsigned long long SecUnsignedInt64;\n#endif\n\n#ifdef SECUREC_FOR_WCHAR\n#if defined(SECUREC_VXWORKS_PLATFORM) && !defined(__WINT_TYPE__)\ntypedef wchar_t wint_t;\n#endif\ntypedef wchar_t SecChar;\ntypedef wchar_t SecUnsignedChar;\ntypedef wint_t SecInt;\ntypedef wint_t SecUnsignedInt;\n#else /*  no SECUREC_FOR_WCHAR */\ntypedef char SecChar;\ntypedef unsigned char SecUnsignedChar;\ntypedef int SecInt;\ntypedef unsigned int SecUnsignedInt;\n#endif\n#endif\n\n/* Determine whether the address is 8-byte aligned\n * Some systems do not have uintptr_t type, so  use NULL to clear tool alarm 507\n */\n#define SECUREC_ADDR_ALIGNED_8(addr) (SecIsAddrAligned8((addr), NULL) == 0)\n\n/* If you define the memory allocation function,\n * you need to define the function prototype. You can define this macro as a header file.\n */\n#if defined(SECUREC_MALLOC_PROTOTYPE)\nSECUREC_MALLOC_PROTOTYPE\n#endif\n\n#ifndef SECUREC_MALLOC\n#define SECUREC_MALLOC(x) malloc((size_t)(x))\n#endif\n\n#ifndef SECUREC_FREE\n#define SECUREC_FREE(x)   free((void *)(x))\n#endif\n\n/* struct for performance */\ntypedef struct {\n    unsigned char buf[1]; /* Performance optimization code structure assignment length 1 bytes */\n} SecStrBuf1;\ntypedef struct {\n    unsigned char buf[2]; /* Performance optimization code structure assignment length 2 bytes */\n} SecStrBuf2;\ntypedef struct {\n    unsigned char buf[3]; /* Performance optimization code structure assignment length 3 bytes */\n} SecStrBuf3;\ntypedef struct {\n    unsigned char buf[4]; /* Performance optimization code structure assignment length 4 bytes */\n} SecStrBuf4;\ntypedef struct {\n    unsigned char buf[5]; /* Performance optimization code structure assignment length 5 bytes */\n} SecStrBuf5;\ntypedef struct {\n    unsigned char buf[6]; /* Performance optimization code structure assignment length 6 bytes */\n} SecStrBuf6;\ntypedef struct {\n    unsigned char buf[7]; /* Performance optimization code structure assignment length 7 bytes */\n} SecStrBuf7;\ntypedef struct {\n    unsigned char buf[8]; /* Performance optimization code structure assignment length 8 bytes */\n} SecStrBuf8;\ntypedef struct {\n    unsigned char buf[9]; /* Performance optimization code structure assignment length 9 bytes */\n} SecStrBuf9;\ntypedef struct {\n    unsigned char buf[10]; /* Performance optimization code structure assignment length 10 bytes */\n} SecStrBuf10;\ntypedef struct {\n    unsigned char buf[11]; /* Performance optimization code structure assignment length 11 bytes */\n} SecStrBuf11;\ntypedef struct {\n    unsigned char buf[12]; /* Performance optimization code structure assignment length 12 bytes */\n} SecStrBuf12;\ntypedef struct {\n    unsigned char buf[13]; /* Performance optimization code structure assignment length 13 bytes */\n} SecStrBuf13;\ntypedef struct {\n    unsigned char buf[14]; /* Performance optimization code structure assignment length 14 bytes */\n} SecStrBuf14;\ntypedef struct {\n    unsigned char buf[15]; /* Performance optimization code structure assignment length 15 bytes */\n} SecStrBuf15;\ntypedef struct {\n    unsigned char buf[16]; /* Performance optimization code structure assignment length 16 bytes */\n} SecStrBuf16;\ntypedef struct {\n    unsigned char buf[17]; /* Performance optimization code structure assignment length 17 bytes */\n} SecStrBuf17;\ntypedef struct {\n    unsigned char buf[18]; /* Performance optimization code structure assignment length 18 bytes */\n} SecStrBuf18;\ntypedef struct {\n    unsigned char buf[19]; /* Performance optimization code structure assignment length 19 bytes */\n} SecStrBuf19;\ntypedef struct {\n    unsigned char buf[20]; /* Performance optimization code structure assignment length 20 bytes */\n} SecStrBuf20;\ntypedef struct {\n    unsigned char buf[21]; /* Performance optimization code structure assignment length 21 bytes */\n} SecStrBuf21;\ntypedef struct {\n    unsigned char buf[22]; /* Performance optimization code structure assignment length 22 bytes */\n} SecStrBuf22;\ntypedef struct {\n    unsigned char buf[23]; /* Performance optimization code structure assignment length 23 bytes */\n} SecStrBuf23;\ntypedef struct {\n    unsigned char buf[24]; /* Performance optimization code structure assignment length 24 bytes */\n} SecStrBuf24;\ntypedef struct {\n    unsigned char buf[25]; /* Performance optimization code structure assignment length 25 bytes */\n} SecStrBuf25;\ntypedef struct {\n    unsigned char buf[26]; /* Performance optimization code structure assignment length 26 bytes */\n} SecStrBuf26;\ntypedef struct {\n    unsigned char buf[27]; /* Performance optimization code structure assignment length 27 bytes */\n} SecStrBuf27;\ntypedef struct {\n    unsigned char buf[28]; /* Performance optimization code structure assignment length 28 bytes */\n} SecStrBuf28;\ntypedef struct {\n    unsigned char buf[29]; /* Performance optimization code structure assignment length 29 bytes */\n} SecStrBuf29;\ntypedef struct {\n    unsigned char buf[30]; /* Performance optimization code structure assignment length 30 bytes */\n} SecStrBuf30;\ntypedef struct {\n    unsigned char buf[31]; /* Performance optimization code structure assignment length 31 bytes */\n} SecStrBuf31;\ntypedef struct {\n    unsigned char buf[32]; /* Performance optimization code structure assignment length 32 bytes */\n} SecStrBuf32;\ntypedef struct {\n    unsigned char buf[33]; /* Performance optimization code structure assignment length 33 bytes */\n} SecStrBuf33;\ntypedef struct {\n    unsigned char buf[34]; /* Performance optimization code structure assignment length 34 bytes */\n} SecStrBuf34;\ntypedef struct {\n    unsigned char buf[35]; /* Performance optimization code structure assignment length 35 bytes */\n} SecStrBuf35;\ntypedef struct {\n    unsigned char buf[36]; /* Performance optimization code structure assignment length 36 bytes */\n} SecStrBuf36;\ntypedef struct {\n    unsigned char buf[37]; /* Performance optimization code structure assignment length 37 bytes */\n} SecStrBuf37;\ntypedef struct {\n    unsigned char buf[38]; /* Performance optimization code structure assignment length 38 bytes */\n} SecStrBuf38;\ntypedef struct {\n    unsigned char buf[39]; /* Performance optimization code structure assignment length 39 bytes */\n} SecStrBuf39;\ntypedef struct {\n    unsigned char buf[40]; /* Performance optimization code structure assignment length 40 bytes */\n} SecStrBuf40;\ntypedef struct {\n    unsigned char buf[41]; /* Performance optimization code structure assignment length 41 bytes */\n} SecStrBuf41;\ntypedef struct {\n    unsigned char buf[42]; /* Performance optimization code structure assignment length 42 bytes */\n} SecStrBuf42;\ntypedef struct {\n    unsigned char buf[43]; /* Performance optimization code structure assignment length 43 bytes */\n} SecStrBuf43;\ntypedef struct {\n    unsigned char buf[44]; /* Performance optimization code structure assignment length 44 bytes */\n} SecStrBuf44;\ntypedef struct {\n    unsigned char buf[45]; /* Performance optimization code structure assignment length 45 bytes */\n} SecStrBuf45;\ntypedef struct {\n    unsigned char buf[46]; /* Performance optimization code structure assignment length 46 bytes */\n} SecStrBuf46;\ntypedef struct {\n    unsigned char buf[47]; /* Performance optimization code structure assignment length 47 bytes */\n} SecStrBuf47;\ntypedef struct {\n    unsigned char buf[48]; /* Performance optimization code structure assignment length 48 bytes */\n} SecStrBuf48;\ntypedef struct {\n    unsigned char buf[49]; /* Performance optimization code structure assignment length 49 bytes */\n} SecStrBuf49;\ntypedef struct {\n    unsigned char buf[50]; /* Performance optimization code structure assignment length 50 bytes */\n} SecStrBuf50;\ntypedef struct {\n    unsigned char buf[51]; /* Performance optimization code structure assignment length 51 bytes */\n} SecStrBuf51;\ntypedef struct {\n    unsigned char buf[52]; /* Performance optimization code structure assignment length 52 bytes */\n} SecStrBuf52;\ntypedef struct {\n    unsigned char buf[53]; /* Performance optimization code structure assignment length 53 bytes */\n} SecStrBuf53;\ntypedef struct {\n    unsigned char buf[54]; /* Performance optimization code structure assignment length 54 bytes */\n} SecStrBuf54;\ntypedef struct {\n    unsigned char buf[55]; /* Performance optimization code structure assignment length 55 bytes */\n} SecStrBuf55;\ntypedef struct {\n    unsigned char buf[56]; /* Performance optimization code structure assignment length 56 bytes */\n} SecStrBuf56;\ntypedef struct {\n    unsigned char buf[57]; /* Performance optimization code structure assignment length 57 bytes */\n} SecStrBuf57;\ntypedef struct {\n    unsigned char buf[58]; /* Performance optimization code structure assignment length 58 bytes */\n} SecStrBuf58;\ntypedef struct {\n    unsigned char buf[59]; /* Performance optimization code structure assignment length 59 bytes */\n} SecStrBuf59;\ntypedef struct {\n    unsigned char buf[60]; /* Performance optimization code structure assignment length 60 bytes */\n} SecStrBuf60;\ntypedef struct {\n    unsigned char buf[61]; /* Performance optimization code structure assignment length 61 bytes */\n} SecStrBuf61;\ntypedef struct {\n    unsigned char buf[62]; /* Performance optimization code structure assignment length 62 bytes */\n} SecStrBuf62;\ntypedef struct {\n    unsigned char buf[63]; /* Performance optimization code structure assignment length 63 bytes */\n} SecStrBuf63;\ntypedef struct {\n    unsigned char buf[64]; /* Performance optimization code structure assignment length 64 bytes */\n} SecStrBuf64;\n\n\n\n\n/* User can change the error handler by modify the following definition,\n * such as logging the detail error in file.\n */\n#if defined(_DEBUG) || defined(DEBUG)\n#if defined(SECUREC_ERROR_HANDLER_BY_ASSERT)\n#define SECUREC_ERROR_INVALID_PARAMTER(msg) assert(msg \"invalid argument\" == NULL)\n#define SECUREC_ERROR_INVALID_RANGE(msg)    assert(msg \"invalid dest buffer size\" == NULL)\n#define SECUREC_ERROR_BUFFER_OVERLAP(msg)   assert(msg \"buffer overlap\" == NULL)\n#elif defined(SECUREC_ERROR_HANDLER_BY_PRINTF)\n#if SECUREC_IN_KERNEL\n#define SECUREC_ERROR_INVALID_PARAMTER(msg) printk(\"%s invalid argument\\n\", msg)\n#define SECUREC_ERROR_INVALID_RANGE(msg)    printk(\"%s invalid dest buffer size\\n\", msg)\n#define SECUREC_ERROR_BUFFER_OVERLAP(msg)   printk(\"%s buffer overlap\\n\", msg)\n#else\n#define SECUREC_ERROR_INVALID_PARAMTER(msg) printf(\"%s invalid argument\\n\", msg)\n#define SECUREC_ERROR_INVALID_RANGE(msg)    printf(\"%s invalid dest buffer size\\n\", msg)\n#define SECUREC_ERROR_BUFFER_OVERLAP(msg)   printf(\"%s buffer overlap\\n\", msg)\n#endif\n#elif defined(SECUREC_ERROR_HANDLER_BY_FILE_LOG)\n#define SECUREC_ERROR_INVALID_PARAMTER(msg) LogSecureCRuntimeError(msg \" EINVAL\\n\")\n#define SECUREC_ERROR_INVALID_RANGE(msg)    LogSecureCRuntimeError(msg \" ERANGE\\n\")\n#define SECUREC_ERROR_BUFFER_OVERLAP(msg)   LogSecureCRuntimeError(msg \" EOVERLAP\\n\")\n#else /* no HANDLER is defined */\n#define SECUREC_ERROR_INVALID_PARAMTER(msg) ((void)0)\n#define SECUREC_ERROR_INVALID_RANGE(msg)    ((void)0)\n#define SECUREC_ERROR_BUFFER_OVERLAP(msg)   ((void)0)\n#endif\n#else /* no DEBUG */\n#define SECUREC_ERROR_INVALID_PARAMTER(msg) ((void)0)\n#define SECUREC_ERROR_INVALID_RANGE(msg)    ((void)0)\n#define SECUREC_ERROR_BUFFER_OVERLAP(msg)   ((void)0)\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/* assembly language memory copy and memory set for X86 or MIPS ... */\n#ifdef SECUREC_USE_ASM\n    extern void *memcpy_opt(void *, const void *, size_t);\n    extern void *memset_opt(void *, int, size_t);\n#endif\n\n#if defined(SECUREC_ERROR_HANDLER_BY_FILE_LOG)\n    extern void LogSecureCRuntimeError(const char *errDetail);\n#endif\n\n#ifdef SECUREC_INLINE_DO_MEMCPY\nstatic void SecDoMemcpy(void *dest, const void *src, size_t count)\n{\n    /*\n     * if SECUREC_USE_ASM macro is enabled, it will call assembly language function to improve performance.\n     */\n#ifdef SECUREC_USE_ASM\n    (void)memcpy_opt(dest, src, count);\n#else\n    /* large enough, let system API do it */\n    (void)memcpy(dest, src, count);\n#endif\n}\n#endif\n\n#ifdef SECUREC_INLINE_DO_MEMSET\nstatic void SecDoMemset(void *dest, int c, size_t count)\n{\n#ifdef SECUREC_USE_ASM\n    (void)memset_opt(dest, c, count);\n#else\n    (void)memset(dest, c, count);\n#endif\n}\n#endif\n\n#ifdef SECUREC_INLINE_STR_LEN\n/* The function compiler will be inlined and not placed in other files */\nstatic size_t SecStrMinLen(const char *str, size_t maxLen)\n{\n    size_t len;\n    SECUREC_CALC_STR_LEN(str, maxLen, &len);\n    return len;\n}\n#endif\n\n#ifdef SECUREC_INLINE_STR_LEN_OPT\n/* The function compiler will be inlined and not placed in other files */\nstatic size_t SecStrMinLenOpt(const char *str, size_t maxLen)\n{\n    size_t len;\n    SECUREC_CALC_STR_LEN_OPT(str, maxLen, &len);\n    return len;\n}\n#endif\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/secureinput_a.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_FORMAT_OUTPUT_INPUT 1\n#ifdef SECUREC_FOR_WCHAR\n#undef SECUREC_FOR_WCHAR\n#endif\n\n#include \"secinput.h\"\n\n#include \"input.inl\"\n\n"
  },
  {
    "path": "third_party/securec/src/secureinput_w.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/* if some platforms don't have wchar.h, dont't include it */\n#if !(defined(SECUREC_VXWORKS_PLATFORM))\n/* This header file is placed below secinput.h, which will cause tool alarm,\n * but  If there is no macro above, it will cause vs2010 compiling alarm\n */\n#if defined(_MSC_VER) && (_MSC_VER >= 1400)\n#ifndef __STDC_WANT_SECURE_LIB__\n/* The order of adjustment is to eliminate alarm of Duplicate Block */\n#define __STDC_WANT_SECURE_LIB__ 0\n#endif\n#ifndef _CRTIMP_ALTERNATIVE\n#define _CRTIMP_ALTERNATIVE     /* comment microsoft *_s function */\n#endif\n#endif\n#include <wchar.h>\n#endif\n#define SECUREC_ENABLE_WCHAR_FUNC       0\n#define SECUREC_FORMAT_OUTPUT_INPUT     1\n#ifndef SECUREC_FOR_WCHAR\n#define SECUREC_FOR_WCHAR\n#endif\n\n#include \"secinput.h\"\n\n#ifndef WEOF\n#define WEOF ((wchar_t)(-1))\n#endif\n\n#include \"input.inl\"\n\n"
  },
  {
    "path": "third_party/securec/src/secureprintoutput.h",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef SECUREPRINTOUTPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C\n#define SECUREPRINTOUTPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C\n#include \"securecutil.h\"\n\n/* flag definitions */\n/* Using macros instead of enumerations is because some of the enumerated types under the compiler are 16bit. */\n#define SECUREC_FLAG_SIGN           0x00001U\n#define SECUREC_FLAG_SIGN_SPACE     0x00002U\n#define SECUREC_FLAG_LEFT           0x00004U\n#define SECUREC_FLAG_LEADZERO       0x00008U\n#define SECUREC_FLAG_LONG           0x00010U\n#define SECUREC_FLAG_SHORT          0x00020U\n#define SECUREC_FLAG_SIGNED         0x00040U\n#define SECUREC_FLAG_ALTERNATE      0x00080U\n#define SECUREC_FLAG_NEGATIVE       0x00100U\n#define SECUREC_FLAG_FORCE_OCTAL    0x00200U\n#define SECUREC_FLAG_LONG_DOUBLE    0x00400U\n#define SECUREC_FLAG_WIDECHAR       0x00800U\n#define SECUREC_FLAG_LONGLONG       0x01000U\n#define SECUREC_FLAG_CHAR           0x02000U\n#define SECUREC_FLAG_POINTER        0x04000U\n#define SECUREC_FLAG_I64            0x08000U\n#define SECUREC_FLAG_PTRDIFF        0x10000U\n#define SECUREC_FLAG_SIZE           0x20000U\n#ifdef  SECUREC_COMPATIBLE_LINUX_FORMAT\n#define SECUREC_FLAG_INTMAX         0x40000U\n#endif\n\n/* state definitions. Identify the status of the current format */\ntypedef enum {\n    STAT_NORMAL,\n    STAT_PERCENT,\n    STAT_FLAG,\n    STAT_WIDTH,\n    STAT_DOT,\n    STAT_PRECIS,\n    STAT_SIZE,\n    STAT_TYPE,\n    STAT_INVALID\n} SecFmtState;\n\n/* Format output buffer pointer and available size */\ntypedef struct {\n    int count;\n    char *cur;\n} SecPrintfStream;\n\n\n#ifndef SECUREC_BUFFER_SIZE\n#ifdef SECUREC_STACK_SIZE_LESS_THAN_1K\n/* SECUREC_BUFFER_SIZE Can not be less than 23 ,\n * the length of the octal representation of 64-bit integers with zero lead\n */\n#define SECUREC_BUFFER_SIZE    256\n#else\n#define SECUREC_BUFFER_SIZE    512\n#endif\n#endif\n#if SECUREC_BUFFER_SIZE < 23\n#error SECUREC_BUFFER_SIZE Can not be less than 23\n#endif\n\n#define SECUREC_MAX_PRECISION  SECUREC_BUFFER_SIZE\n/* max. # bytes in multibyte char  ,see MB_LEN_MAX */\n#define SECUREC_MB_LEN 16\n/* The return value of the internal function, which is returned when truncated */\n#define SECUREC_PRINTF_TRUNCATE (-2)\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n    extern int SecVsnprintfImpl(char *string, size_t count, const char *format, va_list argList);\n#if SECUREC_IN_KERNEL == 0\n    extern int SecVswprintfImpl(wchar_t *string, size_t sizeInWchar, const wchar_t *format, va_list argList);\n#endif\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n\n\n"
  },
  {
    "path": "third_party/securec/src/secureprintoutput_a.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_DO_MEMCPY    1\n#define SECUREC_FORMAT_OUTPUT_INPUT 1\n#ifdef SECUREC_FOR_WCHAR\n#undef SECUREC_FOR_WCHAR\n#endif\n\n#include \"secureprintoutput.h\"\n\n#define SECUREC_CHAR(x) x\n#define SECUREC_WRITE_MULTI_CHAR  SecWriteMultiChar\n#define SECUREC_WRITE_STRING      SecWriteString\n\n#ifndef EOF\n#define EOF (-1)\n#endif\n\n/* put a char to output */\n#define SECUREC_PUTC(c, outStream)    ((--(outStream)->count >= 0) ? \\\n    (int)((unsigned int)(unsigned char)(*((outStream)->cur++) = (char)(c)) & 0xff) : EOF)\n/* to clear e835 */\n#define SECUREC_PUTC_ZERO(outStream)    ((--(outStream)->count >= 0) ? \\\n    ((*((outStream)->cur++) = (char)('\\0'))) : EOF)\n\nstatic void SecWriteMultiChar(char ch, int num, SecPrintfStream *f, int *pnumwritten);\nstatic void SecWriteString(const char *string, int len, SecPrintfStream *f, int *pnumwritten);\n\n#include \"output.inl\"\n\n/*\n * Wide character formatted output implementation\n */\nint SecVsnprintfImpl(char *string, size_t count, const char *format, va_list argList)\n{\n    SecPrintfStream str;\n    int retVal;\n\n    str.count = (int)count; /* this count include \\0 character, Must be greater than zero */\n    str.cur = string;\n\n    retVal = SecOutputS(&str, format, argList);\n    if ((retVal >= 0) && (SECUREC_PUTC_ZERO(&str) != EOF)) {\n        return retVal;\n    } else if (str.count < 0) {\n        /* the buffer was too small; we return truncation */\n        string[count - 1] = '\\0';\n        return SECUREC_PRINTF_TRUNCATE;\n    }\n    string[0] = '\\0'; /* empty the dest strDest */\n    return -1;\n}\n\n/*\n * Sec write Wide character\n */\nstatic void SecWriteMultiChar(char ch, int num, SecPrintfStream *f, int *pnumwritten)\n{\n    int count = num;\n    while (count-- > 0) {\n        if (SECUREC_PUTC(ch, f) == EOF) {\n            *pnumwritten = -1;\n            break;\n        } else {\n            *pnumwritten = *pnumwritten + 1;\n        }\n    }\n}\n\n/*\n * Sec write string function\n */\nstatic void SecWriteString(const char *string, int len, SecPrintfStream *f, int *pnumwritten)\n{\n    const char *str = string;\n    int count = len;\n    while (count-- > 0) {\n        if (SECUREC_PUTC(*str, f) == EOF) {\n            *pnumwritten = -1;\n            break;\n        } else {\n            *pnumwritten = *pnumwritten + 1;\n            ++str;\n        }\n    }\n}\n\n"
  },
  {
    "path": "third_party/securec/src/secureprintoutput_w.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/* if some platforms don't have wchar.h, dont't include it */\n#if !(defined(SECUREC_VXWORKS_PLATFORM))\n/* This header file is placed below secinput.h, which will cause tool alarm,\n * but if there is no macro above, it will cause compiling alarm\n */\n#if defined(_MSC_VER) && (_MSC_VER >= 1400)\n#ifndef _CRTIMP_ALTERNATIVE\n#define _CRTIMP_ALTERNATIVE     /* comment microsoft *_s function */\n#endif\n#ifndef __STDC_WANT_SECURE_LIB__\n#define __STDC_WANT_SECURE_LIB__ 0\n#endif\n#endif\n#include <wchar.h>\n#endif\n\n#define SECUREC_ENABLE_WCHAR_FUNC   0\n#define SECUREC_INLINE_DO_MEMCPY    1\n#define SECUREC_FORMAT_OUTPUT_INPUT 1\n#ifndef SECUREC_FOR_WCHAR\n#define SECUREC_FOR_WCHAR\n#endif\n\n#include \"secureprintoutput.h\"\n\n#ifndef WEOF\n#define WEOF ((wchar_t)(-1))\n#endif\n\n#define SECUREC_CHAR(x) L ## x\n#define SECUREC_WRITE_MULTI_CHAR SecWriteMultiCharW\n#define SECUREC_WRITE_STRING     SecWriteStringW\n\nstatic void SecWriteCharW(wchar_t ch, SecPrintfStream *f, int *pnumwritten);\nstatic void SecWriteMultiCharW(wchar_t ch, int num, SecPrintfStream *f, int *pnumwritten);\nstatic void SecWriteStringW(const wchar_t *string, int len, SecPrintfStream *f, int *pnumwritten);\nstatic int SecPutWcharStrEndingZero(SecPrintfStream *str, int zeroCount);\n\n\n#include \"output.inl\"\n\n/*\n * Wide character formatted output implementation\n */\nint SecVswprintfImpl(wchar_t *string, size_t sizeInWchar, const wchar_t *format, va_list argList)\n{\n    SecPrintfStream str;\n    int retVal; /* If initialization causes  e838 */\n\n    str.cur = (char *)string;\n    /* this count include \\0 character, Must be greater than zero */\n    str.count = (int)(sizeInWchar * sizeof(wchar_t));\n\n    retVal = SecOutputSW(&str, format, argList);\n    if ((retVal >= 0) && SecPutWcharStrEndingZero(&str, (int)sizeof(wchar_t))) {\n        return (retVal);\n    } else if (str.count < 0) {\n        /* the buffer was too small; we return truncation */\n        string[sizeInWchar - 1] = L'\\0';\n        return SECUREC_PRINTF_TRUNCATE;\n    }\n    string[0] = L'\\0';\n    return -1;\n}\n\n/*\n * Output one zero character zero into the SecPrintfStream structure\n */\nstatic int SecPutZeroChar(SecPrintfStream *str)\n{\n    if (str->count > 0) {\n        *(str->cur) = (char)('\\0');\n        str->count = str->count - 1;\n        str->cur = str->cur + 1;\n        return 0;\n    }\n    return -1;\n}\n\n/*\n * Output a wide character zero end into the SecPrintfStream structure\n */\nstatic int SecPutWcharStrEndingZero(SecPrintfStream *str, int zeroCount)\n{\n    int succeed = 0;\n    int i = 0;\n\n    while (i < zeroCount && (SecPutZeroChar(str) == 0)) {\n        ++i;\n    }\n    if (i == zeroCount) {\n        succeed = 1;\n    }\n    return succeed;\n}\n\n\n/*\n * Output a wide character into the SecPrintfStream structure\n */\nstatic wchar_t SecPutCharW(wchar_t ch, SecPrintfStream *f)\n{\n    wchar_t wcRet = 0;\n    if (((f)->count -= (int)sizeof(wchar_t)) >= 0) {\n        *(wchar_t *)(void *)(f->cur) = ch;\n        f->cur += sizeof(wchar_t);\n        wcRet = ch;\n    } else {\n        wcRet = (wchar_t)WEOF;\n    }\n    return wcRet;\n}\n\n/*\n * Output a wide character into the SecPrintfStream structure, returns the number of characters written\n */\nstatic void SecWriteCharW(wchar_t ch, SecPrintfStream *f, int *pnumwritten)\n{\n    if (SecPutCharW(ch, f) == (wchar_t)WEOF) {\n        *pnumwritten = -1;\n    } else {\n        *pnumwritten = *pnumwritten + 1;\n    }\n}\n\n/*\n * Output multiple wide character into the SecPrintfStream structure,  returns the number of characters written\n */\nstatic void SecWriteMultiCharW(wchar_t ch, int num, SecPrintfStream *f, int *pnumwritten)\n{\n    int count = num;\n    while (count-- > 0) {\n        SecWriteCharW(ch, f, pnumwritten);\n        if (*pnumwritten == -1) {\n            break;\n        }\n    }\n}\n\n/*\n * Output a wide string into the SecPrintfStream structure,  returns the number of characters written\n */\nstatic void SecWriteStringW(const wchar_t *string, int len, SecPrintfStream *f, int *pnumwritten)\n{\n    const wchar_t *str = string;\n    int count = len;\n    while (count-- > 0) {\n        SecWriteCharW(*str++, f, pnumwritten);\n        if (*pnumwritten == -1) {\n            break;\n        }\n    }\n}\n\n"
  },
  {
    "path": "third_party/securec/src/snprintf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n#if SECUREC_ENABLE_SNPRINTF\n/*\n * <FUNCTION DESCRIPTION>\n *    The snprintf_s function is equivalent to the snprintf function\n *    except for the parameter destMax/count and the explicit runtime-constraints violation\n *    The snprintf_s function formats and stores count or fewer characters in\n *    strDest and appends a terminating null. Each argument (if any) is converted\n *    and output according to the corresponding format specification in format.\n *    The formatting is consistent with the printf family of functions; If copying\n *    occurs between strings that overlap, the behavior is undefined.\n *\n * <INPUT PARAMETERS>\n *    strDest                 Storage location for the output.\n *    destMax                 The size of the storage location for output. Size\n *                                 in bytes for snprintf_s or size in words for snwprintf_s.\n *    count                    Maximum number of character to store.\n *    format                  Format-control string.\n *    ...                        Optional arguments.\n *\n * <OUTPUT PARAMETERS>\n *    strDest                 is updated\n *\n * <RETURN VALUE>\n *    return  the number of characters written, not including the terminating null\n *    return -1 if an  error occurs.\n *    return -1 if count < destMax and the output string  has been truncated\n *\n * If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n *\n */\nint snprintf_s(char *strDest, size_t destMax, size_t count, const char *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vsnprintf_s(strDest, destMax, count, format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(snprintf_s);\n#endif\n#endif\n\n#if SECUREC_SNPRINTF_TRUNCATED\n/*\n * <FUNCTION DESCRIPTION>\n *    The snprintf_truncated_s function is equivalent to the snprintf function\n *    except for the parameter destMax/count and the explicit runtime-constraints violation\n *    The snprintf_truncated_s function formats and stores count or fewer characters in\n *    strDest and appends a terminating null. Each argument (if any) is converted\n *    and output according to the corresponding format specification in format.\n *    The formatting is consistent with the printf family of functions; If copying\n *    occurs between strings that overlap, the behavior is undefined.\n *\n * <INPUT PARAMETERS>\n *    strDest                 Storage location for the output.\n *    destMax                 The size of the storage location for output. Size\n *                                 in bytes for snprintf_truncated_s or size in words for snwprintf_s.\n *    format                  Format-control string.\n *    ...                        Optional arguments.\n *\n * <OUTPUT PARAMETERS>\n *    strDest                 is updated\n *\n * <RETURN VALUE>\n *    return  the number of characters written, not including the terminating null\n *    return -1 if an  error occurs.\n *    return destMax-1 if output string  has been truncated\n *\n * If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n *\n */\nint snprintf_truncated_s(char *strDest, size_t destMax, const char *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vsnprintf_truncated_s(strDest, destMax, format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(snprintf_truncated_s);\n#endif\n\n#endif\n\n\n"
  },
  {
    "path": "third_party/securec/src/sprintf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The sprintf_s function is equivalent to the sprintf function\n *    except for the parameter destMax and the explicit runtime-constraints violation\n *    The sprintf_s function formats and stores a series of characters and values\n *    in strDest. Each argument (if any) is converted and output according to\n *    the corresponding format specification in format. The format consists of\n *    ordinary characters and has the same form and function as the format argument\n *    for printf. A null character is appended after the last character written.\n *    If copying occurs between strings that overlap, the behavior is undefined.\n *\n * <INPUT PARAMETERS>\n *    strDest                 Storage location for output.\n *    destMax                Maximum number of characters to store.\n *    format                  Format-control string.\n *    ...                        Optional arguments\n *\n * <OUTPUT PARAMETERS>\n *    strDest                 is updated\n *\n * <RETURN VALUE>\n *    return the number of bytes stored in strDest, not counting the terminating null character.\n *    return -1 if an error occurred.\n *\n * If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nint sprintf_s(char *strDest, size_t destMax, const char *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vsprintf_s(strDest, destMax, format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(sprintf_s);\n#endif\n\n\n"
  },
  {
    "path": "third_party/securec/src/sscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The sscanf_s function is equivalent to fscanf_s,\n *     except that input is obtained from a string (specified by the argument buffer) rather than from a stream\n *    The sscanf function reads data from buffer into the location given by each\n *    argument. Every argument must be a pointer to a variable with a type that\n *    corresponds to a type specifier in format. The format argument controls the\n *    interpretation of the input fields and has the same form and function as\n *    the format argument for the scanf function.\n *    If copying takes place between strings that overlap, the behavior is undefined.\n *\n * <INPUT PARAMETERS>\n *    buffer                 Stored data.\n *    format                 Format control string, see Format Specifications.\n *    ...                    Optional arguments.\n *\n * <OUTPUT PARAMETERS>\n *    ...                    The converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Each of these functions returns the number of fields successfully converted\n *    and assigned; the return value does not include fields that were read but\n *    not assigned.\n *    A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint sscanf_s(const char *buffer, const char *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vsscanf_s(buffer, format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(sscanf_s);\n#endif\n\n\n"
  },
  {
    "path": "third_party/securec/src/strcat_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_STR_LEN     1\n#define SECUREC_INLINE_STR_LEN_OPT 1\n#define SECUREC_INLINE_DO_MEMCPY   1\n#include \"securecutil.h\"\n\n/*\n * Befor this function, the basic parameter checking has been done\n */\nstatic errno_t SecDoStrcat(char *strDest, size_t destMax, const char *strSrc)\n{\n    size_t destLen = SecStrMinLen(strDest, destMax);\n    /* Only optimize strSrc, do not apply this function to strDest */\n    size_t srcLen = SecStrMinLenOpt(strSrc, destMax - destLen);\n\n    if (SECUREC_CAT_STRING_IS_OVERLAP(strDest, destLen, strSrc, srcLen)) {\n        strDest[0] = '\\0';\n        if (strDest + destLen <= strSrc && destLen == destMax) {\n            SECUREC_ERROR_INVALID_PARAMTER(\"strcat_s\");\n            return EINVAL_AND_RESET;\n        }\n        SECUREC_ERROR_BUFFER_OVERLAP(\"strcat_s\");\n        return EOVERLAP_AND_RESET;\n    }\n    if (srcLen + destLen >= destMax || strDest == strSrc) {\n        strDest[0] = '\\0';\n        if (destLen == destMax) {\n            SECUREC_ERROR_INVALID_PARAMTER(\"strcat_s\");\n            return EINVAL_AND_RESET;\n        }\n        SECUREC_ERROR_INVALID_RANGE(\"strcat_s\");\n        return ERANGE_AND_RESET;\n    }\n    SecDoMemcpy(strDest + destLen, strSrc, srcLen + 1); /* single character length  include \\0 */\n    return EOK;\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The strcat_s function appends a copy of the string pointed to by strSrc (including the terminating null character)\n *    to the end of the  string pointed to by strDest.\n *    The initial character of strSrc overwrites the terminating null character of strDest.\n *    strcat_s will return EOVERLAP_AND_RESET if the source and destination strings overlap.\n *\n *    Note that the second parameter is the total size of the buffer, not the\n *    remaining size.\n *\n * <INPUT PARAMETERS>\n *    strDest             Null-terminated destination string buffer.\n *    destMax             Size of the destination string buffer.\n *    strSrc              Null-terminated source string buffer.\n *\n * <OUTPUT PARAMETERS>\n *    strDest             is updated\n *\n * <RETURN VALUE>\n *    EOK                 Success\n *    EINVAL              strDest is  NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN\n *    EINVAL_AND_RESET    (strDest unterminated  and all other parameters are valid)or\n *                         (strDest != NULL and strSrc is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN)\n *    ERANGE              destMax is 0 and destMax > SECUREC_STRING_MAX_LEN\n *    ERANGE_AND_RESET      strDest have not enough space  and all other parameters are valid  and not overlap\n *    EOVERLAP_AND_RESET   dest buffer and source buffer are overlapped and all  parameters are valid\n *\n *    If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nerrno_t strcat_s(char *strDest, size_t destMax, const char *strSrc)\n{\n    if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"strcat_s\");\n        return ERANGE;\n    }\n    if (strDest == NULL || strSrc == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"strcat_s\");\n        if (strDest != NULL) {\n            strDest[0] = '\\0';\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    }\n    return SecDoStrcat(strDest, destMax, strSrc);\n}\n\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(strcat_s);\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/strcpy_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_STR_LEN   1\n#define SECUREC_INLINE_DO_MEMCPY 1\n\n#include \"securecutil.h\"\n\n#if SECUREC_IN_KERNEL== 0\n#ifndef SECUREC_STRCOPY_THRESHOLD_SIZE\n#define SECUREC_STRCOPY_THRESHOLD_SIZE   32UL\n#endif\n\n/*\n * Determine whether the address is 8-byte aligned, use static to increase performance\n * return 0 is aligned\n */\nstatic int SecIsAddrAligned8(const void *addr, const void *zeroAddr)\n{\n    return (int)(((size_t)((const char*)addr - (const char*)zeroAddr)) & 7); /* use 7 to check aligned 8 */\n}\n\n/* The purpose of converting to void is to clean up the alarm */\n#define SECUREC_SMALL_STR_COPY do { \\\n    if (SECUREC_ADDR_ALIGNED_8(strDest) && SECUREC_ADDR_ALIGNED_8(strSrc)) { \\\n        /* use struct assignment */ \\\n        switch (srcStrLen) { \\\n            case 1: \\\n                *(SecStrBuf1 *)(void *)strDest = *(const SecStrBuf1 *)(const void *)strSrc; \\\n                break; \\\n            case 2: \\\n                *(SecStrBuf2 *)(void *)strDest = *(const SecStrBuf2 *)(const void *)strSrc; \\\n                break; \\\n            case 3: \\\n                *(SecStrBuf3 *)(void *)strDest = *(const SecStrBuf3 *)(const void *)strSrc; \\\n                break; \\\n            case 4: \\\n                *(SecStrBuf4 *)(void *)strDest = *(const SecStrBuf4 *)(const void *)strSrc; \\\n                break; \\\n            case 5: \\\n                *(SecStrBuf5 *)(void *)strDest = *(const SecStrBuf5 *)(const void *)strSrc; \\\n                break; \\\n            case 6: \\\n                *(SecStrBuf6 *)(void *)strDest = *(const SecStrBuf6 *)(const void *)strSrc; \\\n                break; \\\n            case 7: \\\n                *(SecStrBuf7 *)(void *)strDest = *(const SecStrBuf7 *)(const void *)strSrc; \\\n                break; \\\n            case 8: \\\n                *(SecStrBuf8 *)(void *)strDest = *(const SecStrBuf8 *)(const void *)strSrc; \\\n                break; \\\n            case 9: \\\n                *(SecStrBuf9 *)(void *)strDest = *(const SecStrBuf9 *)(const void *)strSrc; \\\n                break; \\\n            case 10: \\\n                *(SecStrBuf10 *)(void *)strDest = *(const SecStrBuf10 *)(const void *)strSrc; \\\n                break; \\\n            case 11: \\\n                *(SecStrBuf11 *)(void *)strDest = *(const SecStrBuf11 *)(const void *)strSrc; \\\n                break; \\\n            case 12: \\\n                *(SecStrBuf12 *)(void *)strDest = *(const SecStrBuf12 *)(const void *)strSrc; \\\n                break; \\\n            case 13: \\\n                *(SecStrBuf13 *)(void *)strDest = *(const SecStrBuf13 *)(const void *)strSrc; \\\n                break; \\\n            case 14: \\\n                *(SecStrBuf14 *)(void *)strDest = *(const SecStrBuf14 *)(const void *)strSrc; \\\n                break; \\\n            case 15: \\\n                *(SecStrBuf15 *)(void *)strDest = *(const SecStrBuf15 *)(const void *)strSrc; \\\n                break; \\\n            case 16: \\\n                *(SecStrBuf16 *)(void *)strDest = *(const SecStrBuf16 *)(const void *)strSrc; \\\n                break; \\\n            case 17: \\\n                *(SecStrBuf17 *)(void *)strDest = *(const SecStrBuf17 *)(const void *)strSrc; \\\n                break; \\\n            case 18: \\\n                *(SecStrBuf18 *)(void *)strDest = *(const SecStrBuf18 *)(const void *)strSrc; \\\n                break; \\\n            case 19: \\\n                *(SecStrBuf19 *)(void *)strDest = *(const SecStrBuf19 *)(const void *)strSrc; \\\n                break; \\\n            case 20: \\\n                *(SecStrBuf20 *)(void *)strDest = *(const SecStrBuf20 *)(const void *)strSrc; \\\n                break; \\\n            case 21: \\\n                *(SecStrBuf21 *)(void *)strDest = *(const SecStrBuf21 *)(const void *)strSrc; \\\n                break; \\\n            case 22: \\\n                *(SecStrBuf22 *)(void *)strDest = *(const SecStrBuf22 *)(const void *)strSrc; \\\n                break; \\\n            case 23: \\\n                *(SecStrBuf23 *)(void *)strDest = *(const SecStrBuf23 *)(const void *)strSrc; \\\n                break; \\\n            case 24: \\\n                *(SecStrBuf24 *)(void *)strDest = *(const SecStrBuf24 *)(const void *)strSrc; \\\n                break; \\\n            case 25: \\\n                *(SecStrBuf25 *)(void *)strDest = *(const SecStrBuf25 *)(const void *)strSrc; \\\n                break; \\\n            case 26: \\\n                *(SecStrBuf26 *)(void *)strDest = *(const SecStrBuf26 *)(const void *)strSrc; \\\n                break; \\\n            case 27: \\\n                *(SecStrBuf27 *)(void *)strDest = *(const SecStrBuf27 *)(const void *)strSrc; \\\n                break; \\\n            case 28: \\\n                *(SecStrBuf28 *)(void *)strDest = *(const SecStrBuf28 *)(const void *)strSrc; \\\n                break; \\\n            case 29: \\\n                *(SecStrBuf29 *)(void *)strDest = *(const SecStrBuf29 *)(const void *)strSrc; \\\n                break; \\\n            case 30: \\\n                *(SecStrBuf30 *)(void *)strDest = *(const SecStrBuf30 *)(const void *)strSrc; \\\n                break; \\\n            case 31: \\\n                *(SecStrBuf31 *)(void *)strDest = *(const SecStrBuf31 *)(const void *)strSrc; \\\n                break; \\\n            case 32: \\\n                *(SecStrBuf32 *)(void *)strDest = *(const SecStrBuf32 *)(const void *)strSrc; \\\n                break; \\\n            default: \\\n                break; \\\n        } /* END switch */ \\\n    } else { \\\n        char *tmpStrDest = (char *)strDest; \\\n        const char *tmpStrSrc = (const char *)strSrc; \\\n        switch (srcStrLen) { \\\n            case 32: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 31: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 30: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 29: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 28: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 27: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 26: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 25: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 24: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 23: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 22: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 21: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 20: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 19: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 18: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 17: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 16: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 15: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 14: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 13: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 12: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 11: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 10: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 9: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 8: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 7: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 6: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 5: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 4: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 3: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 2: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            case 1: \\\n                *(tmpStrDest++) = *(tmpStrSrc++); \\\n                /* fall-through */ /* FALLTHRU */ \\\n            default: \\\n                break; \\\n        } \\\n    } \\\n} SECUREC_WHILE_ZERO\n#endif\n\n/*\n * Check Src Range\n */\nstatic errno_t CheckSrcRange(char *strDest, size_t destMax, const char *strSrc)\n{\n    size_t tmpDestMax = destMax;\n    const char *tmpSrc = strSrc;\n    /* use destMax as boundary checker and destMax must be greater than zero */\n    while (*(tmpSrc) != '\\0' && tmpDestMax > 0) {\n        ++tmpSrc;\n        --tmpDestMax;\n    }\n    if (tmpDestMax == 0) {\n        strDest[0] = '\\0';\n        SECUREC_ERROR_INVALID_RANGE(\"strcpy_s\");\n        return ERANGE_AND_RESET;\n    }\n    return EOK;\n}\n\n/*\n * Handling errors\n */\nerrno_t strcpy_error(char *strDest, size_t destMax, const char *strSrc)\n{\n    if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"strcpy_s\");\n        return ERANGE;\n    } else if (strDest == NULL || strSrc == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"strcpy_s\");\n        if (strDest != NULL) {\n            strDest[0] = '\\0';\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    }\n    return CheckSrcRange(strDest, destMax, strSrc);\n}\n\n/*\n * Performance optimization. srcStrLen  include '\\0'\n */\nstatic void SecDoStrcpyOpt(char *strDest, const char *strSrc, size_t srcStrLen)\n{\n#if SECUREC_IN_KERNEL\n    SecDoMemcpy(strDest, strSrc, srcStrLen);\n#else\n    if (srcStrLen > SECUREC_STRCOPY_THRESHOLD_SIZE) {\n        SecDoMemcpy(strDest, strSrc, srcStrLen);\n    } else {\n        SECUREC_SMALL_STR_COPY;\n    }\n#endif\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The strcpy_s function copies the string pointed to  strSrc\n *          (including the terminating null character) into the array pointed to by strDest\n *    The destination string must be large enough to hold the source string,\n *    including the terminating null character. strcpy_s will return EOVERLAP_AND_RESET\n *    if the source and destination strings overlap.\n *\n * <INPUT PARAMETERS>\n *    strDest                          Location of destination string buffer\n *    destMax                        Size of the destination string buffer.\n *    strSrc                            Null-terminated source string buffer.\n *\n * <OUTPUT PARAMETERS>\n *    strDest                         is updated.\n *\n * <RETURN VALUE>\n *    EOK                               Success\n *    EINVAL                          strDest is  NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN\n *    EINVAL_AND_RESET       strDest !=  NULL and strSrc is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN\n *    ERANGE                         destMax is 0 and destMax > SECUREC_STRING_MAX_LEN\n *    ERANGE_AND_RESET      strDest have not enough space  and all other parameters are valid  and not overlap\n *    EOVERLAP_AND_RESET   dest buffer and source buffer are overlapped and all  parameters are valid\n *\n *    If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nerrno_t strcpy_s(char *strDest, size_t destMax, const char *strSrc)\n{\n    if ((destMax > 0 && destMax <= SECUREC_STRING_MAX_LEN && strDest != NULL && strSrc != NULL && strDest != strSrc)) {\n        size_t srcStrLen = SecStrMinLen(strSrc, destMax) + 1; /* len  include \\0 */\n        if (srcStrLen <= destMax) {\n            /* use mem overlap check include \\0 */\n            if (SECUREC_MEMORY_NO_OVERLAP(strDest, strSrc, srcStrLen)) {\n                /* performance optimization srcStrLen include '\\0' */\n                SecDoStrcpyOpt(strDest, strSrc, srcStrLen);\n                return EOK;\n            } else {\n                strDest[0] = '\\0';\n                SECUREC_ERROR_BUFFER_OVERLAP(\"strcpy_s\");\n                return EOVERLAP_AND_RESET;\n            }\n        }\n    }\n    return strcpy_error(strDest, destMax, strSrc);\n}\n\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(strcpy_s);\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/strncat_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_STR_LEN   1\n#define SECUREC_INLINE_DO_MEMCPY 1\n\n#include \"securecutil.h\"\n\n/*\n * Befor this function, the basic parameter checking has been done\n */\nstatic errno_t SecDoStrncat(char *strDest, size_t destMax, const char *strSrc, size_t count)\n{\n    size_t destLen = SecStrMinLen(strDest, destMax);\n    /* The strSrc is no longer optimized. The reason is that when count is small,\n     * the efficiency of strnlen is higher than that of self realization.\n     */\n    size_t srcLen = SecStrMinLen(strSrc, count);\n\n    if (SECUREC_CAT_STRING_IS_OVERLAP(strDest, destLen, strSrc, srcLen)) {\n        strDest[0] = '\\0';\n        if (strDest + destLen <= strSrc && destLen == destMax) {\n            SECUREC_ERROR_INVALID_PARAMTER(\"strncat_s\");\n            return EINVAL_AND_RESET;\n        }\n        SECUREC_ERROR_BUFFER_OVERLAP(\"strncat_s\");\n        return EOVERLAP_AND_RESET;\n    }\n    if (srcLen + destLen >= destMax || strDest == strSrc) {\n        strDest[0] = '\\0';\n        if (destLen == destMax) {\n            SECUREC_ERROR_INVALID_PARAMTER(\"strncat_s\");\n            return EINVAL_AND_RESET;\n        }\n        SECUREC_ERROR_INVALID_RANGE(\"strncat_s\");\n        return ERANGE_AND_RESET;\n    }\n    SecDoMemcpy(strDest + destLen, strSrc, srcLen);    /* no  terminator */\n    *(strDest + destLen + srcLen) = '\\0';\n    return EOK;\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The strncat_s function appends not more than n successive  characters\n *    (not including the terminating null  character)\n *     from the array pointed to by strSrc to the end of the  string pointed to by strDest\n *    The strncat_s function try to append the first D characters of strSrc to\n *    the end of strDest, where D is the lesser of count and the length of strSrc.\n *    If appending those D characters will fit within strDest (whose size is given\n *    as destMax) and still leave room for a null terminator, then those characters\n *    are appended, starting at the original terminating null of strDest, and a\n *    new terminating null is appended; otherwise, strDest[0] is set to the null\n *    character.\n *\n * <INPUT PARAMETERS>\n *    strDest            Null-terminated destination string.\n *    destMax            Size of the destination buffer.\n *    strSrc             Null-terminated source string.\n *    count              Number of character to append, or truncate.\n *\n * <OUTPUT PARAMETERS>\n *    strDest            is updated\n *\n * <RETURN VALUE>\n *    EOK                Success\n *    EINVAL             strDest is  NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN\n *    EINVAL_AND_RESET      (strDest unterminated  and all other parameters are valid)or\n *                        (strDest !=  NULL and strSrc is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN)\n *    ERANGE                         destMax is 0 and destMax > SECUREC_STRING_MAX_LEN\n *    ERANGE_AND_RESET      strDest have not enough space  and all other parameters are valid  and not overlap\n *    EOVERLAP_AND_RESET   dest buffer and source buffer are overlapped and all  parameters are valid\n *\n *    If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nerrno_t strncat_s(char *strDest, size_t destMax, const char *strSrc, size_t count)\n{\n    if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"strncat_s\");\n        return ERANGE;\n    }\n\n    if (strDest == NULL || strSrc == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"strncat_s\");\n        if (strDest != NULL) {\n            strDest[0] = '\\0';\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    }\n    if (count > SECUREC_STRING_MAX_LEN) {\n#ifdef  SECUREC_COMPATIBLE_WIN_FORMAT\n        if (count == (size_t)(-1)) {\n            /* Windows internal functions may pass in -1 when calling this function */\n            return SecDoStrncat(strDest, destMax, strSrc, destMax);\n        }\n#endif\n        strDest[0] = '\\0';\n        SECUREC_ERROR_INVALID_RANGE(\"strncat_s\");\n        return ERANGE_AND_RESET;\n    }\n    return SecDoStrncat(strDest, destMax, strSrc, count);\n}\n\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(strncat_s);\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/strncpy_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_STR_LEN 1\n#define SECUREC_INLINE_DO_MEMCPY 1\n\n#include \"securecutil.h\"\n\n#if defined(SECUREC_COMPATIBLE_WIN_FORMAT)\n#define SECUREC_STRNCPY_PARAM_OK(strDest, destMax, strSrc, count) \\\n    (((destMax) > 0 && (destMax) <= SECUREC_STRING_MAX_LEN && (strDest) != NULL && (strSrc) != NULL && \\\n    ((count) <= SECUREC_STRING_MAX_LEN || (count) == ((size_t)(-1))) && (count) > 0))\n#else\n#define SECUREC_STRNCPY_PARAM_OK(strDest, destMax, strSrc, count) \\\n    (((destMax) > 0 && (destMax) <= SECUREC_STRING_MAX_LEN && (strDest) != NULL && (strSrc) != NULL && \\\n    (count) <= SECUREC_STRING_MAX_LEN && (count) > 0))\n#endif\n\n/*\n * Check Src Count Range\n */\nstatic errno_t CheckSrcCountRange(char *strDest, size_t destMax, const char *strSrc, size_t count)\n{\n    size_t tmpDestMax = destMax;\n    size_t tmpCount = count;\n    const char *endPos = strSrc;\n\n    /* use destMax and  count as boundary checker and destMax must be greater than zero */\n    while (*(endPos) != '\\0' && tmpDestMax > 0 && tmpCount > 0) {\n        ++endPos;\n        --tmpCount;\n        --tmpDestMax;\n    }\n    if (tmpDestMax == 0) {\n        strDest[0] = '\\0';\n        SECUREC_ERROR_INVALID_RANGE(\"strncpy_s\");\n        return ERANGE_AND_RESET;\n    }\n    return EOK;\n}\n\n/*\n * Handling errors, when dest euqal src return EOK\n */\nerrno_t strncpy_error(char *strDest, size_t destMax, const char *strSrc, size_t count)\n{\n    if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"strncpy_s\");\n        return ERANGE;\n    } else if (strDest == NULL || strSrc == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"strncpy_s\");\n        if (strDest != NULL) {\n            strDest[0] = '\\0';\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    } else if (count > SECUREC_STRING_MAX_LEN) {\n        strDest[0] = '\\0';      /* clear dest string */\n        SECUREC_ERROR_INVALID_RANGE(\"strncpy_s\");\n        return ERANGE_AND_RESET;\n    } else if (count == 0) {\n        strDest[0] = '\\0';\n        return EOK;\n    }\n\n    return CheckSrcCountRange(strDest, destMax, strSrc, count);\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *   The strncpy_s function copies not more than n successive characters (not including the terminating null character)\n *                     from the array pointed to by strSrc to the array pointed to by strDest.\n *\n * <INPUT PARAMETERS>\n *    strDest                          Destination string.\n *    destMax                          The size of the destination string, in characters.\n *    strSrc                            Source string.\n *    count                              Number of characters to be copied.\n *\n * <OUTPUT PARAMETERS>\n *    strDest                           is updated\n *\n * <RETURN VALUE>\n *    EOK                               Success\n *    EINVAL                          strDest is  NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN\n *    EINVAL_AND_RESET       strDest !=  NULL and strSrc is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN\n *    ERANGE                         destMax is 0 and destMax > SECUREC_STRING_MAX_LEN\n *    ERANGE_AND_RESET      strDest have not enough space  and all other parameters are valid  and not overlap\n *    EOVERLAP_AND_RESET   dest buffer and source buffer are overlapped and all  parameters are valid\n *\n *    If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nerrno_t strncpy_s(char *strDest, size_t destMax, const char *strSrc, size_t count)\n{\n    if (SECUREC_STRNCPY_PARAM_OK(strDest, destMax, strSrc, count)) {\n        size_t minCpLen;        /* use it to store the maxi length limit */\n        if (count < destMax) {\n            minCpLen = SecStrMinLen(strSrc, count); /* no ending terminator */\n        } else {\n            size_t tmpCount = destMax;\n#ifdef  SECUREC_COMPATIBLE_WIN_FORMAT\n            if (count == ((size_t)(-1))) {\n                tmpCount = destMax - 1;\n            }\n#endif\n            minCpLen = SecStrMinLen(strSrc, tmpCount);\n            if (minCpLen == destMax) {\n                strDest[0] = '\\0';\n                SECUREC_ERROR_INVALID_RANGE(\"strncpy_s\");\n                return ERANGE_AND_RESET;\n            }\n        }\n        if (SECUREC_STRING_NO_OVERLAP(strDest, strSrc, minCpLen) || strDest == strSrc) {\n            /* Not overlap */\n            SecDoMemcpy(strDest, strSrc, minCpLen);    /* copy string without terminator */\n            strDest[minCpLen] = '\\0';\n            return EOK;\n        } else {\n            strDest[0] = '\\0';\n            SECUREC_ERROR_BUFFER_OVERLAP(\"strncpy_s\");\n            return EOVERLAP_AND_RESET;\n        }\n    }\n    return strncpy_error(strDest, destMax, strSrc, count);\n}\n\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(strncpy_s);\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/strtok_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * Find beginning of token (skip over leading delimiters).Note that\n * there is no token if this loop sets string to point to the terminal null.\n */\nstatic char *SecFindBegin(char *strToken, const char *strDelimit)\n{\n    char *token = strToken;\n    while (*token != '\\0') {\n        const char *ctl = strDelimit;\n        while (*ctl != '\\0' && *ctl != *token) {\n            ++ctl;\n        }\n        if (*ctl == '\\0') { /* don't find any delimiter in string header, break the loop */\n            break;\n        }\n        ++token;\n    }\n    return token;\n}\n\n/*\n * Find rest of token\n */\nstatic char *SecFindRest(char *strToken, const char *strDelimit)\n{\n    /* Find the rest of the token. If it is not the end of the string,\n     * put a null there.\n     */\n    char *token = strToken;\n    while (*token != '\\0') {\n        const char *ctl = strDelimit;\n        while (*ctl != '\\0' && *ctl != *token) {\n            ++ctl;\n        }\n        if (*ctl != '\\0') {        /* find a delimiter */\n            *token++ = '\\0';       /* set string termintor */\n            break;\n        }\n        ++token;\n    }\n    return token;\n}\n\n/*\n * Find the final position pointer\n */\nstatic char *SecUpdateToken(char *strToken, const char *strDelimit, char **context)\n{\n    /* point to updated position */\n    char *token = SecFindRest(strToken, strDelimit);\n    /* record string position for next search in the context */\n    *context = token;\n    /* Determine if a token has been found. */\n    if (token == strToken) {\n        return NULL;\n    }\n    return strToken;\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The strtok_s function parses a string into a sequence of strToken,\n *    replace all characters in strToken string that match to strDelimit set with 0.\n *    On the first call to strtok_s the string to be parsed should be specified in strToken.\n *    In each subsequent call that should parse the same string, strToken should be NULL\n * <INPUT PARAMETERS>\n *    strToken            String containing token or tokens.\n *    strDelimit          Set of delimiter characters.\n *    context             Used to store position information between calls\n *                             to strtok_s\n * <OUTPUT PARAMETERS>\n *   context               is updated\n * <RETURN VALUE>\n *   On the first call returns the address of the first non \\0 character, otherwise NULL is returned.\n *   In subsequent calls, the strtoken is set to NULL, and the context set is the same as the previous call,\n *   return NULL if the *context string length is equal 0, otherwise return *context.\n */\nchar *strtok_s(char *strToken, const char *strDelimit, char **context)\n{\n    char *orgToken = strToken;\n    /* validate delimiter and string context */\n    if (context == NULL || strDelimit == NULL) {\n        return NULL;\n    }\n    /* valid input string and string pointer from where to search */\n    if (orgToken == NULL && (*context) == NULL) {\n        return NULL;\n    }\n    /* If string is null, continue searching from previous string position stored in context */\n    if (orgToken == NULL) {\n        orgToken = *context;\n    }\n    orgToken = SecFindBegin(orgToken, strDelimit);\n    return SecUpdateToken(orgToken, strDelimit, context);\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(strtok_s);\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/swprintf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *   The  swprintf_s  function  is  the  wide-character  equivalent  of the sprintf_s function\n *\n * <INPUT PARAMETERS>\n *    strDest                   Storage location for the output.\n *    destMax                  Maximum number of characters to store.\n *    format                    Format-control string.\n *    ...                        Optional arguments\n *\n * <OUTPUT PARAMETERS>\n *    strDest                    is updated\n *\n * <RETURN VALUE>\n *    return  the number of wide characters stored in strDest, not  counting the terminating null wide character.\n *    return -1  if an error occurred.\n *\n * If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nint swprintf_s(wchar_t *strDest, size_t destMax, const wchar_t *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vswprintf_s(strDest, destMax, format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/swscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The  swscanf_s  function  is  the  wide-character  equivalent  of the sscanf_s function\n *    The swscanf_s function reads data from buffer into the location given by\n *    each argument. Every argument must be a pointer to a variable with a type\n *    that corresponds to a type specifier in format. The format argument controls\n *    the interpretation of the input fields and has the same form and function\n *    as the format argument for the scanf function. If copying takes place between\n *    strings that overlap, the behavior is undefined.\n *\n * <INPUT PARAMETERS>\n *    buffer                 Stored data.\n *    format                 Format control string, see Format Specifications.\n *    ...                    Optional arguments.\n *\n * <OUTPUT PARAMETERS>\n *    ...                    the converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Each of these functions returns the number of fields successfully converted\n *    and assigned; The return value does not include fields that were read but not\n *    assigned.\n *    A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint swscanf_s(const wchar_t *buffer, const wchar_t *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vswscanf_s(buffer, format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/vfscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"secinput.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The vfscanf_s function is equivalent to fscanf_s, with the variable argument list replaced by argList\n *    The vfscanf_s function reads data from the current position of stream into\n *    the locations given by argument (if any). Each argument must be a pointer\n *    to a variable of a type that corresponds to a type specifier in format.\n *    format controls the interpretation of the input fields and has the same\n *    form and function as the format argument for scanf.\n *\n * <INPUT PARAMETERS>\n *    stream               Pointer to FILE structure.\n *    format               Format control string, see Format Specifications.\n *    argList              pointer to list of arguments\n *\n * <OUTPUT PARAMETERS>\n *    argList              the converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Each of these functions returns the number of fields successfully converted\n *    and assigned; the return value does not include fields that were read but\n *    not assigned. A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint vfscanf_s(FILE *stream, const char *format, va_list argList)\n{\n    int retVal;                 /* If initialization causes  e838 */\n    SecFileStream fStr;\n\n    if ((stream == NULL) || (format == NULL)) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vfscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n    if (stream == stdin) {\n        return vscanf_s(format, argList);\n    }\n\n    SECUREC_LOCK_FILE(stream);\n    SECUREC_INIT_SEC_FILE_STREAM(fStr, SECUREC_FILE_STREAM_FLAG, stream, SECUREC_UNINITIALIZED_FILE_POS, NULL, 0);\n    retVal = SecInputS(&fStr, format, argList);\n    SECUREC_UNLOCK_FILE(stream);\n    if (retVal < 0) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vfscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n\n    return retVal;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/vfwscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"secinput.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The  vfwscanf_s  function  is  the  wide-character  equivalent  of the vfscanf_s function\n *    The vfwscanf_s function reads data from the current position of stream into\n *    the locations given by argument (if any). Each argument must be a pointer\n *    to a variable of a type that corresponds to a type specifier in format.\n *    format controls the interpretation of the input fields and has the same form\n *    and function as the format argument for scanf.\n *\n * <INPUT PARAMETERS>\n *    stream               Pointer to FILE structure.\n *    format               Format control string, see Format Specifications.\n *    argList              pointer to list of arguments\n *\n * <OUTPUT PARAMETERS>\n *    argList              the converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Each of these functions returns the number of fields successfully converted\n *    and assigned; the return value does not include fields that were read but\n *    not assigned. A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint vfwscanf_s(FILE *stream, const wchar_t *format, va_list argList)\n{\n    int retVal; /* If initialization causes  e838 */\n    SecFileStream fStr;\n\n    if ((stream == NULL) || (format == NULL)) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vfwscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n    if (stream == stdin) {\n        return vwscanf_s(format, argList);\n    }\n\n    SECUREC_LOCK_FILE(stream);\n    SECUREC_INIT_SEC_FILE_STREAM(fStr, SECUREC_FILE_STREAM_FLAG, stream, SECUREC_UNINITIALIZED_FILE_POS, NULL, 0);\n    retVal = SecInputSW(&fStr, format, argList);\n    SECUREC_UNLOCK_FILE(stream);\n    if (retVal < 0) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vfwscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n    return retVal;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/vscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"secinput.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *     The vscanf_s function is equivalent to scanf_s, with the variable argument list replaced by argList,\n *     The vscanf_s function reads data from the standard input stream stdin and\n *    writes the data into the location that's given by argument. Each argument\n *    must be a pointer to a variable of a type that corresponds to a type specifier\n *    in format. If copying occurs between strings that overlap, the behavior is\n *    undefined.\n *\n * <INPUT PARAMETERS>\n *    format                Format control string.\n *    argList               pointer to list of arguments\n *\n * <OUTPUT PARAMETERS>\n *    argList               the converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Returns the number of fields successfully converted and assigned;\n *    the return value does not include fields that were read but not assigned.\n *    A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint vscanf_s(const char *format, va_list argList)\n{\n    int retVal;                 /* If initialization causes  e838 */\n    SecFileStream fStr;\n    SECUREC_INIT_SEC_FILE_STREAM(fStr, SECUREC_FROM_STDIN_FLAG, stdin, 0, NULL, 0);\n    /*\n     * \"va_list\" has different definition on different platform, so we can't use argList == NULL\n     * to determine it's invalid. If you has fixed platform, you can check some fields to validate it,\n     * such as \"argList == NULL\" or argList.xxx != NULL or *(size_t *)&argList != 0.\n     */\n    if (format == NULL || fStr.pf == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n\n    SECUREC_LOCK_STDIN(0, fStr.pf);\n\n    retVal = SecInputS(&fStr, format, argList);\n\n    SECUREC_UNLOCK_STDIN(0, fStr.pf);\n    if (retVal < 0) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n    return retVal;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/vsnprintf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"secureprintoutput.h\"\n\n#if SECUREC_ENABLE_VSNPRINTF\n/*\n * <FUNCTION DESCRIPTION>\n *    The vsnprintf_s function is equivalent to the vsnprintf function\n *     except for the parameter destMax/count and the explicit runtime-constraints violation\n *    The vsnprintf_s function takes a pointer to an argument list, then formats\n *    and writes up to count characters of the given data to the memory pointed\n *    to by strDest and appends a terminating null.\n *\n * <INPUT PARAMETERS>\n *    strDest                  Storage location for the output.\n *    destMax                The size of the strDest for output.\n *    count                    Maximum number of character to write(not including\n *                                the terminating NULL)\n *    format                   Format-control string.\n *    argList                     pointer to list of arguments.\n *\n * <OUTPUT PARAMETERS>\n *    strDest                is updated\n *\n * <RETURN VALUE>\n *    return  the number of characters written, not including the terminating null\n *    return -1 if an  error occurs.\n *    return -1 if count < destMax and the output string  has been truncated\n *\n * If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nint vsnprintf_s(char *strDest, size_t destMax, size_t count, const char *format, va_list argList)\n{\n    int retVal;\n\n    if (format == NULL || strDest == NULL || destMax == 0 || destMax > SECUREC_STRING_MAX_LEN ||\n        (count > (SECUREC_STRING_MAX_LEN - 1) && count != (size_t)(-1))) {\n        if (strDest != NULL && destMax > 0 && destMax <= SECUREC_STRING_MAX_LEN) {\n            strDest[0] = '\\0';\n        }\n        SECUREC_ERROR_INVALID_PARAMTER(\"vsnprintf_s\");\n        return -1;\n    }\n\n    if (destMax > count) {\n        retVal = SecVsnprintfImpl(strDest, count + 1, format, argList);\n        if (retVal == SECUREC_PRINTF_TRUNCATE) {  /* lsd add to keep dest buffer not destroyed 2014.2.18 */\n            /* the string has been truncated, return  -1 */\n            return -1;          /* to skip error handler,  return strlen(strDest) or -1 */\n        }\n    } else {\n        retVal = SecVsnprintfImpl(strDest, destMax, format, argList);\n#ifdef SECUREC_COMPATIBLE_WIN_FORMAT\n        if (retVal == SECUREC_PRINTF_TRUNCATE && count == (size_t)(-1)) {\n            return -1;\n        }\n#endif\n    }\n\n    if (retVal < 0) {\n        strDest[0] = '\\0';      /* empty the dest strDest */\n\n        if (retVal == SECUREC_PRINTF_TRUNCATE) {\n            /* Buffer too small */\n            SECUREC_ERROR_INVALID_RANGE(\"vsnprintf_s\");\n        }\n\n        SECUREC_ERROR_INVALID_PARAMTER(\"vsnprintf_s\");\n        return -1;\n    }\n\n    return retVal;\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(vsnprintf_s);\n#endif\n#endif\n\n#if SECUREC_SNPRINTF_TRUNCATED\n/*\n * <FUNCTION DESCRIPTION>\n *    The vsnprintf_truncated_s function is equivalent to the vsnprintf function\n *     except for the parameter destMax/count and the explicit runtime-constraints violation\n *    The vsnprintf_truncated_s function takes a pointer to an argument list, then formats\n *    and writes up to count characters of the given data to the memory pointed\n *    to by strDest and appends a terminating null.\n *\n * <INPUT PARAMETERS>\n *    strDest                  Storage location for the output.\n *    destMax                The size of the strDest for output.\n *                                the terminating NULL)\n *    format                   Format-control string.\n *    argList                     pointer to list of arguments.\n *\n * <OUTPUT PARAMETERS>\n *    strDest                is updated\n *\n * <RETURN VALUE>\n *    return  the number of characters written, not including the terminating null\n *    return -1 if an  error occurs.\n *    return destMax-1 if output string  has been truncated\n *\n * If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nint vsnprintf_truncated_s(char *strDest, size_t destMax, const char *format, va_list argList)\n{\n    int retVal;\n\n    if (format == NULL || strDest == NULL || destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) {\n        if (strDest != NULL && destMax > 0 && destMax <= SECUREC_STRING_MAX_LEN) {\n            strDest[0] = '\\0';\n        }\n        SECUREC_ERROR_INVALID_PARAMTER(\"vsnprintf_truncated_s\");\n        return -1;\n    }\n\n    retVal = SecVsnprintfImpl(strDest, destMax, format, argList);\n\n    if (retVal < 0) {\n        if (retVal == SECUREC_PRINTF_TRUNCATE) {\n            return (int)(destMax - 1);  /* to skip error handler,  return strlen(strDest) */\n        }\n        strDest[0] = '\\0';      /* empty the dest strDest */\n        SECUREC_ERROR_INVALID_PARAMTER(\"vsnprintf_truncated_s\");\n        return -1;\n    }\n\n    return retVal;\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(vsnprintf_truncated_s);\n#endif\n#endif\n\n\n"
  },
  {
    "path": "third_party/securec/src/vsprintf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"secureprintoutput.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The vsprintf_s function is equivalent to the vsprintf function\n *    except for the parameter destMax and the explicit runtime-constraints violation\n *    The vsprintf_s function takes a pointer to an argument list, and then formats\n *    and writes the given data to the memory pointed to by strDest.\n *    The function differ from the non-secure versions only in that the secure\n *    versions support positional parameters.\n *\n * <INPUT PARAMETERS>\n *    strDest                Storage location for the output.\n *    destMax                Size of strDest\n *    format                 Format specification.\n *    argList                   pointer to list of arguments\n *\n * <OUTPUT PARAMETERS>\n *    strDest                is updated\n *\n * <RETURN VALUE>\n *    return  the number of characters written, not including the terminating null character,\n *    return -1  if an  error occurs.\n *\n * If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nint vsprintf_s(char *strDest, size_t destMax, const char *format, va_list argList)\n{\n    int retVal;               /* If initialization causes  e838 */\n\n    if (format == NULL || strDest == NULL || destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) {\n        if (strDest != NULL && destMax > 0 && destMax <= SECUREC_STRING_MAX_LEN) {\n            strDest[0] = '\\0';\n        }\n        SECUREC_ERROR_INVALID_PARAMTER(\"vsprintf_s\");\n        return -1;\n    }\n\n    retVal = SecVsnprintfImpl(strDest, destMax, format, argList);\n\n    if (retVal < 0) {\n        strDest[0] = '\\0';\n        if (retVal == SECUREC_PRINTF_TRUNCATE) {\n            /* Buffer is too small */\n            SECUREC_ERROR_INVALID_RANGE(\"vsprintf_s\");\n        }\n        SECUREC_ERROR_INVALID_PARAMTER(\"vsprintf_s\");\n        return -1;\n    }\n\n    return retVal;\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(vsprintf_s);\n#endif\n\n\n"
  },
  {
    "path": "third_party/securec/src/vsscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"secinput.h\"\n#if defined(SECUREC_VXWORKS_PLATFORM) && (!defined(SECUREC_SYSAPI4VXWORKS) && !defined(SECUREC_CTYPE_MACRO_ADAPT))\n#include <ctype.h>\n#endif\n\n/*\n * <NAME>\n *    vsscanf_s\n *\n *\n * <FUNCTION DESCRIPTION>\n *    The vsscanf_s function is equivalent to sscanf_s, with the variable argument list replaced by argList\n *    The vsscanf_s function reads data from buffer into the location given by\n *    each argument. Every argument must be a pointer to a variable with a type\n *    that corresponds to a type specifier in format. The format argument controls\n *    the interpretation of the input fields and has the same form and function\n *    as the format argument for the scanf function.\n *    If copying takes place between strings that overlap, the behavior is undefined.\n *\n * <INPUT PARAMETERS>\n *    buffer                Stored data\n *    format                Format control string, see Format Specifications.\n *    argList               pointer to list of arguments\n *\n * <OUTPUT PARAMETERS>\n *    argList               the converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Each of these functions returns the number of fields successfully converted\n *    and assigned; the return value does not include fields that were read but\n *    not assigned. A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint vsscanf_s(const char *buffer, const char *format, va_list argList)\n{\n    size_t count;               /* If initialization causes  e838 */\n    int retVal;\n    SecFileStream fStr;\n\n    /* validation section */\n    if (buffer == NULL || format == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vsscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n    count = strlen(buffer);\n    if (count == 0 || count > SECUREC_STRING_MAX_LEN) {\n        SecClearDestBuf(buffer, format, argList);\n        SECUREC_ERROR_INVALID_PARAMTER(\"vsscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n#ifdef SECUREC_VXWORKS_PLATFORM\n    /*\n     * in vxworks platform when buffer is white string, will set first %s argument tu zero.like following useage:\n     * \"   \\v\\f\\t\\r\\n\", \"%s\", str, strSize\n     * do not check all character, just first and last character then consider it is white string\n     */\n    if (isspace((int)buffer[0]) && isspace((int)buffer[count - 1])) {\n        SecClearDestBuf(buffer, format, argList);\n    }\n#endif\n    SECUREC_INIT_SEC_FILE_STREAM(fStr, SECUREC_MEM_STR_FLAG, NULL, 0, buffer, (int)count);\n    retVal = SecInputS(&fStr, format, argList);\n    if (retVal < 0) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vsscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n    return retVal;\n}\n#if SECUREC_IN_KERNEL\nEXPORT_SYMBOL(vsscanf_s);\n#endif\n\n"
  },
  {
    "path": "third_party/securec/src/vswprintf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"secureprintoutput.h\"\n\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The  vswprintf_s  function  is  the  wide-character  equivalent  of the vsprintf_s function\n *\n * <INPUT PARAMETERS>\n *    strDest                  Storage location for the output.\n *    destMax                Size of strDest\n *    format                  Format specification.\n *    argList                   pointer to list of arguments\n *\n * <OUTPUT PARAMETERS>\n *    strDest                 is updated\n *\n * <RETURN VALUE>\n *    return  the number of wide characters stored in strDest, not  counting the terminating null wide character.\n *    return -1  if an error occurred.\n *\n * If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nint vswprintf_s(wchar_t *strDest, size_t destMax, const wchar_t *format, va_list argList)\n{\n    int retVal;               /* If initialization causes  e838 */\n\n    if (format == NULL || strDest == NULL || destMax == 0 || destMax > (SECUREC_WCHAR_STRING_MAX_LEN)) {\n        if (strDest != NULL && destMax > 0) {\n            strDest[0] = '\\0';\n        }\n        SECUREC_ERROR_INVALID_PARAMTER(\"vswprintf_s\");\n        return -1;\n    }\n\n    retVal = SecVswprintfImpl(strDest, destMax, format, argList);\n\n    if (retVal < 0) {\n        strDest[0] = '\\0';\n        if (retVal == SECUREC_PRINTF_TRUNCATE) {\n            /* Buffer too small */\n            SECUREC_ERROR_INVALID_RANGE(\"vswprintf_s\");\n        }\n        SECUREC_ERROR_INVALID_PARAMTER(\"vswprintf_s\");\n        return -1;\n    }\n\n    return retVal;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/vswscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"secinput.h\"\n\nstatic size_t SecWcslen(const wchar_t *s)\n{\n    const wchar_t *end = s;\n    while (*end != L'\\0') {\n        ++end;\n    }\n    return ((size_t)((end - s)));\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The  vswscanf_s  function  is  the  wide-character  equivalent  of the vsscanf_s function\n *    The vsscanf_s function reads data from buffer into the location given by\n *    each argument. Every argument must be a pointer to a variable with a type\n *    that corresponds to a type specifier in format.\n *    The format argument controls the interpretation of the input fields and\n *    has the same form and function as the format argument for the scanf function.\n *    If copying takes place between strings that overlap, the behavior is undefined.\n *\n * <INPUT PARAMETERS>\n *    buffer                Stored data\n *    format                Format control string, see Format Specifications.\n *    argList               pointer to list of arguments\n *\n * <OUTPUT PARAMETERS>\n *    argList               the converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Each of these functions returns the number of fields successfully converted\n *    and assigned; the return value does not include fields that were read but\n *    not assigned. A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint vswscanf_s(const wchar_t *buffer, const wchar_t *format, va_list argList)\n{\n    size_t count; /* If initialization causes  e838 */\n    SecFileStream fStr;\n    int retVal;\n\n    /* validation section */\n    if (buffer == NULL || format == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vswscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n    count = SecWcslen(buffer);\n    if (count == 0 || count > SECUREC_WCHAR_STRING_MAX_LEN) {\n        SecClearDestBufW(buffer, format, argList);\n        SECUREC_ERROR_INVALID_PARAMTER(\"vswscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n    SECUREC_INIT_SEC_FILE_STREAM(fStr, SECUREC_MEM_STR_FLAG, NULL, 0,\\\n                                 (const char *)buffer, (int)count * ((int)sizeof(wchar_t)));\n    retVal = SecInputSW(&fStr, format, argList);\n    if (retVal < 0) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vswscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n    return retVal;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/vwscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"secinput.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The  vwscanf_s  function  is  the  wide-character  equivalent  of the vscanf_s function\n *    The vwscanf_s function is the wide-character version of vscanf_s. The\n *    function reads data from the standard input stream stdin and writes the\n *    data into the location that's given by argument. Each argument  must be a\n *    pointer to a variable of a type that corresponds to a type specifier in\n *    format. If copying occurs between strings that overlap, the behavior is\n *    undefined.\n *\n * <INPUT PARAMETERS>\n *    format                 Format control string.\n *    argList                pointer to list of arguments\n *\n * <OUTPUT PARAMETERS>\n *    argList                the converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Returns the number of fields successfully converted and assigned;\n *    the return value does not include fields that were read but not assigned.\n *    A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\nint vwscanf_s(const wchar_t *format, va_list argList)\n{\n    int retVal;                 /* If initialization causes  e838 */\n    SecFileStream fStr;\n\n    SECUREC_INIT_SEC_FILE_STREAM(fStr, SECUREC_FROM_STDIN_FLAG, stdin, 0, NULL, 0);\n    if (format == NULL || fStr.pf == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vwscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n\n    SECUREC_LOCK_STDIN(0, fStr.pf);\n\n    retVal = SecInputSW(&fStr, format, argList);\n\n    SECUREC_UNLOCK_STDIN(0, fStr.pf);\n\n    if (retVal < 0) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"vwscanf_s\");\n        return SECUREC_SCANF_EINVAL;\n    }\n\n    return retVal;\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/wcscat_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_DO_MEMCPY 1\n\n#include \"securecutil.h\"\n\n/*\n * Befor this function, the basic parameter checking has been done\n */\nstatic errno_t SecDoWcscat(wchar_t *strDest, size_t destMax, const wchar_t *strSrc)\n{\n    size_t destLen;\n    size_t srcLen;\n    size_t maxCount; /* Store the maximum available count */\n\n    /* To calculate the length of a wide character, the parameter must be a wide character */\n    SECUREC_CALC_WSTR_LEN(strDest, destMax, &destLen);\n    maxCount = destMax - destLen;\n    SECUREC_CALC_WSTR_LEN(strSrc, maxCount, &srcLen);\n\n    if (SECUREC_CAT_STRING_IS_OVERLAP(strDest, destLen, strSrc, srcLen)) {\n        strDest[0] = L'\\0';\n        if (strDest + destLen <= strSrc && destLen == destMax) {\n            SECUREC_ERROR_INVALID_PARAMTER(\"wcscat_s\");\n            return EINVAL_AND_RESET;\n        }\n        SECUREC_ERROR_BUFFER_OVERLAP(\"wcscat_s\");\n        return EOVERLAP_AND_RESET;\n    }\n    if (srcLen + destLen >= destMax || strDest == strSrc) {\n        strDest[0] = L'\\0';\n        if (destLen == destMax) {\n            SECUREC_ERROR_INVALID_PARAMTER(\"wcscat_s\");\n            return EINVAL_AND_RESET;\n        }\n        SECUREC_ERROR_INVALID_RANGE(\"wcscat_s\");\n        return ERANGE_AND_RESET;\n    }\n    SecDoMemcpy(strDest + destLen, strSrc, (srcLen + 1) * sizeof(wchar_t)); /* single character length  include \\0 */\n    return EOK;\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The wcscat_s function appends a copy of the wide string pointed to by strSrc\n*      (including the terminating null wide character)\n *     to the end of the wide string pointed to by strDest.\n *    The arguments and return value of wcscat_s are wide-character strings.\n *\n *    The wcscat_s function appends strSrc to strDest and terminates the resulting\n *    string with a null character. The initial character of strSrc overwrites the\n *    terminating null character of strDest. wcscat_s will return EOVERLAP_AND_RESET if the\n *    source and destination strings overlap.\n *\n *    Note that the second parameter is the total size of the buffer, not the\n *    remaining size.\n *\n * <INPUT PARAMETERS>\n *    strDest              Null-terminated destination string buffer.\n *    destMax              Size of the destination string buffer.\n *    strSrc               Null-terminated source string buffer.\n *\n * <OUTPUT PARAMETERS>\n *    strDest               is updated\n *\n * <RETURN VALUE>\n *    EOK                   Success\n *    EINVAL                strDest is  NULL and destMax != 0 and destMax <= SECUREC_WCHAR_STRING_MAX_LEN\n *    EINVAL_AND_RESET      (strDest unterminated and all other parameters are valid) or\n *                          (strDest != NULL and strSrc is NULLL and destMax != 0\n *                           and destMax <= SECUREC_WCHAR_STRING_MAX_LEN)\n *    ERANGE                destMax > SECUREC_WCHAR_STRING_MAX_LEN or destMax is 0\n *    ERANGE_AND_RESET      strDest have not enough space  and all other parameters are valid  and not overlap\n *    EOVERLAP_AND_RESET     dest buffer and source buffer are overlapped and all  parameters are valid\n *\n *    If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nerrno_t wcscat_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc)\n{\n    if (destMax == 0 || destMax > SECUREC_WCHAR_STRING_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"wcscat_s\");\n        return ERANGE;\n    }\n\n    if (strDest == NULL || strSrc == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"wcscat_s\");\n        if (strDest != NULL) {\n            strDest[0] = L'\\0';\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    }\n\n    return SecDoWcscat(strDest, destMax, strSrc);\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/wcscpy_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_DO_MEMCPY 1\n\n#include \"securecutil.h\"\n\nstatic errno_t SecDoWcscpy(wchar_t *strDest, size_t destMax, const wchar_t *strSrc)\n{\n    size_t srcStrLen;\n\n    SECUREC_CALC_WSTR_LEN(strSrc, destMax, &srcStrLen);\n    if (srcStrLen == destMax) {\n        strDest[0] = '\\0';\n        SECUREC_ERROR_INVALID_RANGE(\"wcscpy_s\");\n        return ERANGE_AND_RESET;\n    }\n    if (strDest == strSrc) {\n        return EOK;\n    }\n\n    if (SECUREC_STRING_NO_OVERLAP(strDest, strSrc, srcStrLen)) {\n        /* performance optimization srcStrLen include '\\0' */\n        SecDoMemcpy(strDest, strSrc, (srcStrLen + 1) * sizeof(wchar_t)); /* single character length  include \\0 */\n        return EOK;\n    } else {\n        strDest[0] = L'\\0';\n        SECUREC_ERROR_BUFFER_OVERLAP(\"wcscpy_s\");\n        return EOVERLAP_AND_RESET;\n    }\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *   The wcscpy_s function copies the wide string pointed to by strSrc\n *   (including theterminating null wide character) into the array pointed to by strDest\n\n * <INPUT PARAMETERS>\n *    strDest               Destination string buffer\n *    destMax               Size of the destination string buffer.\n *    strSrc                Null-terminated source string buffer.\n *\n * <OUTPUT PARAMETERS>\n *    strDest               is updated.\n *\n * <RETURN VALUE>\n *    EOK                   Success\n *    EINVAL                strDest is  NULL and destMax != 0 and destMax <= SECUREC_WCHAR_STRING_MAX_LEN\n *    EINVAL_AND_RESET      strDest != NULL and strSrc is NULLL and destMax != 0\n *                          and destMax <= SECUREC_WCHAR_STRING_MAX_LEN\n *    ERANGE                destMax > SECUREC_WCHAR_STRING_MAX_LEN or destMax is 0\n *    ERANGE_AND_RESET      destMax <= length of strSrc and strDest != strSrc\n *                          and strDest != NULL and strSrc != NULL and destMax != 0\n *                          and destMax <= SECUREC_WCHAR_STRING_MAX_LEN and not overlap\n *    EOVERLAP_AND_RESET    dest buffer and source buffer are overlapped and destMax != 0\n *                          and destMax <= SECUREC_WCHAR_STRING_MAX_LEN\n *                          and strDest != NULL and strSrc !=NULL and strDest != strSrc\n *\n *    If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nerrno_t wcscpy_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc)\n{\n    if (destMax == 0 || destMax > SECUREC_WCHAR_STRING_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"wcscpy_s\");\n        return ERANGE;\n    }\n    if (strDest == NULL || strSrc == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"wcscpy_s\");\n        if (strDest != NULL) {\n            strDest[0] = L'\\0';\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    }\n    return SecDoWcscpy(strDest, destMax, strSrc);\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/wcsncat_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_DO_MEMCPY 1\n\n#include \"securecutil.h\"\n\n/*\n * Befor this function, the basic parameter checking has been done\n */\nstatic errno_t SecDoWcsncat(wchar_t *strDest, size_t destMax, const wchar_t *strSrc, size_t count)\n{\n    size_t destLen;\n    size_t srcLen;\n\n    /* To calculate the length of a wide character, the parameter must be a wide character */\n    SECUREC_CALC_WSTR_LEN(strDest, destMax, &destLen);\n    SECUREC_CALC_WSTR_LEN(strSrc, count, &srcLen);\n\n    if (SECUREC_CAT_STRING_IS_OVERLAP(strDest, destLen, strSrc, srcLen)) {\n        strDest[0] = L'\\0';\n        if (strDest + destLen <= strSrc && destLen == destMax) {\n            SECUREC_ERROR_INVALID_PARAMTER(\"wcsncat_s\");\n            return EINVAL_AND_RESET;\n        }\n        SECUREC_ERROR_BUFFER_OVERLAP(\"wcsncat_s\");\n        return EOVERLAP_AND_RESET;\n    }\n    if (srcLen + destLen >= destMax || strDest == strSrc) {\n        strDest[0] = L'\\0';\n        if (destLen == destMax) {\n            SECUREC_ERROR_INVALID_PARAMTER(\"wcsncat_s\");\n            return EINVAL_AND_RESET;\n        }\n        SECUREC_ERROR_INVALID_RANGE(\"wcsncat_s\");\n        return ERANGE_AND_RESET;\n    }\n    SecDoMemcpy(strDest + destLen, strSrc, srcLen * sizeof(wchar_t)); /* no  terminator */\n    *(strDest + destLen + srcLen) = L'\\0';\n    return EOK;\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The wcsncat_s function appends not more than n successive wide characters\n *     (not including the terminating null wide character)\n *     from the array pointed to by strSrc to the end of the wide string pointed to by strDest.\n *\n *    The wcsncat_s function try to append the first D characters of strSrc to\n *    the end of strDest, where D is the lesser of count and the length of strSrc.\n *    If appending those D characters will fit within strDest (whose size is\n *    given as destMax) and still leave room for a null terminator, then those\n *    characters are appended, starting at the original terminating null of\n *    strDest, and a new terminating null is appended; otherwise, strDest[0] is\n *    set to the null character.\n *\n * <INPUT PARAMETERS>\n *    strDest               Null-terminated destination string.\n *    destMax               Size of the destination buffer.\n *    strSrc                Null-terminated source string.\n *    count                 Number of character to append, or truncate.\n *\n * <OUTPUT PARAMETERS>\n *    strDest               is updated\n *\n * <RETURN VALUE>\n *    EOK                   Success\n *    EINVAL                strDest is  NULL and destMax != 0 and destMax <= SECUREC_WCHAR_STRING_MAX_LEN\n *    EINVAL_AND_RESET      (strDest unterminated and all other parameters are valid) or\n *                    (strDest != NULL and strSrc is NULLL and destMax != 0 and destMax <= SECUREC_WCHAR_STRING_MAX_LEN)\n *    ERANGE                destMax > SECUREC_WCHAR_STRING_MAX_LEN or destMax is 0\n *    ERANGE_AND_RESET      strDest have not enough space  and all other parameters are valid  and not overlap\n *    EOVERLAP_AND_RESET     dest buffer and source buffer are overlapped and all  parameters are valid\n *\n *    If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nerrno_t wcsncat_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc, size_t count)\n{\n    if (destMax == 0 || destMax > SECUREC_WCHAR_STRING_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"wcsncat_s\");\n        return ERANGE;\n    }\n    if (strDest == NULL || strSrc == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"wcsncat_s\");\n        if (strDest != NULL) {\n            strDest[0] = L'\\0';\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    }\n    if (count > SECUREC_WCHAR_STRING_MAX_LEN) {\n#ifdef  SECUREC_COMPATIBLE_WIN_FORMAT\n        if (count == ((size_t)-1)) {\n            /* Windows internal functions may pass in -1 when calling this function */\n            return SecDoWcsncat(strDest, destMax, strSrc, destMax);\n        }\n#endif\n        strDest[0] = L'\\0';\n        SECUREC_ERROR_INVALID_RANGE(\"wcsncat_s\");\n        return ERANGE_AND_RESET;\n    }\n    return SecDoWcsncat(strDest, destMax, strSrc, count);\n}\n\n\n"
  },
  {
    "path": "third_party/securec/src/wcsncpy_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define SECUREC_INLINE_DO_MEMCPY 1\n\n#include \"securecutil.h\"\n\nstatic errno_t SecDoWcsncpy(wchar_t *strDest, size_t destMax, const wchar_t *strSrc, size_t count)\n{\n    size_t srcStrLen;\n    if (count < destMax) {\n        SECUREC_CALC_WSTR_LEN(strSrc, count, &srcStrLen);\n    } else {\n        SECUREC_CALC_WSTR_LEN(strSrc, destMax, &srcStrLen);\n    }\n    if (srcStrLen == destMax) {\n        strDest[0] = '\\0';\n        SECUREC_ERROR_INVALID_RANGE(\"wcsncpy_s\");\n        return ERANGE_AND_RESET;\n    }\n    if (strDest == strSrc) {\n        return EOK;\n    }\n    if (SECUREC_STRING_NO_OVERLAP(strDest, strSrc, srcStrLen)) {\n        /* performance optimization srcStrLen not include '\\0' */\n        SecDoMemcpy(strDest, strSrc, srcStrLen * sizeof(wchar_t));\n        *(strDest + srcStrLen) = L'\\0';\n        return EOK;\n    } else {\n        strDest[0] = L'\\0';\n        SECUREC_ERROR_BUFFER_OVERLAP(\"wcsncpy_s\");\n        return EOVERLAP_AND_RESET;\n    }\n}\n\n/*\n * <FUNCTION DESCRIPTION>\n *    The wcsncpy_s function copies not more than n successive wide characters\n *     (not including the terminating null wide character)\n *     from the array pointed to by strSrc to the array pointed to by strDest\n *\n * <INPUT PARAMETERS>\n *    strDest             Destination string.\n *    destMax             The size of the destination string, in characters.\n *    strSrc              Source string.\n *    count                Number of characters to be copied.\n *\n * <OUTPUT PARAMETERS>\n *    strDest              is updated\n *\n * <RETURN VALUE>\n *    EOK                  Success\n *    EINVAL               strDest is  NULL and destMax != 0 and destMax <= SECUREC_WCHAR_STRING_MAX_LEN\n *    EINVAL_AND_RESET     strDest != NULL and strSrc is NULLL and destMax != 0\n *                         and destMax <= SECUREC_WCHAR_STRING_MAX_LEN\n *    ERANGE               destMax > SECUREC_WCHAR_STRING_MAX_LEN or destMax is 0\n *    ERANGE_AND_RESET     count > SECUREC_WCHAR_STRING_MAX_LEN or\n *                         (destMax <= length of strSrc and destMax <= count and strDest != strSrc\n *                          and strDest != NULL and strSrc != NULL and destMax != 0 and\n *                          destMax <= SECUREC_WCHAR_STRING_MAX_LEN and not overlap)\n *    EOVERLAP_AND_RESET     dest buffer and source buffer are overlapped and  all  parameters are valid\n *\n *\n *    If there is a runtime-constraint violation, strDest[0] will be set to the '\\0' when strDest and destMax valid\n */\nerrno_t wcsncpy_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc, size_t count)\n{\n    if (destMax == 0 || destMax > SECUREC_WCHAR_STRING_MAX_LEN) {\n        SECUREC_ERROR_INVALID_RANGE(\"wcsncpy_s\");\n        return ERANGE;\n    }\n    if (strDest == NULL || strSrc == NULL) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"wcsncpy_s\");\n        if (strDest != NULL) {\n            strDest[0] = '\\0';\n            return EINVAL_AND_RESET;\n        }\n        return EINVAL;\n    }\n    if (count > SECUREC_WCHAR_STRING_MAX_LEN) {\n#ifdef SECUREC_COMPATIBLE_WIN_FORMAT\n        if (count == (size_t)(-1)) {\n            return SecDoWcsncpy(strDest, destMax, strSrc, destMax - 1);\n        }\n#endif\n        strDest[0] = '\\0';      /* clear dest string */\n        SECUREC_ERROR_INVALID_RANGE(\"wcsncpy_s\");\n        return ERANGE_AND_RESET;\n    }\n\n    if (count == 0) {\n        strDest[0] = '\\0';\n        return EOK;\n    }\n\n    return SecDoWcsncpy(strDest, destMax, strSrc, count);\n}\n\n"
  },
  {
    "path": "third_party/securec/src/wcstok_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * FindBegin Wide character postion  function\n */\nstatic wchar_t *SecFindBeginW(wchar_t *strToken, const wchar_t *strDelimit)\n{\n    /* Find beginning of token (skip over leading delimiters). Note that\n     * there is no token if this loop sets string to point to the terminal null.\n     */\n    wchar_t *token = strToken;\n    while (*token != L'\\0') {\n        const wchar_t *ctl = strDelimit;\n        while (*ctl != L'\\0' && *ctl != *token) {\n            ++ctl;\n        }\n        if (*ctl == L'\\0') {\n            break;\n        }\n        ++token;\n    }\n    return token;\n}\n\n/*\n * FindBegin rest Wide character postion  function\n */\nstatic wchar_t *SecFindRestW(wchar_t *strToken, const wchar_t *strDelimit)\n{\n    /* Find the end of the token. If it is not the end of the string,\n     * put a null there.\n     */\n    wchar_t *token = strToken;\n    while (*token != L'\\0') {\n        const wchar_t *ctl = strDelimit;\n        while (*ctl != L'\\0' && *ctl != *token) {\n            ++ctl;\n        }\n        if (*ctl != L'\\0') {\n            *token++ = L'\\0';\n            break;\n        }\n        ++token;\n    }\n    return token;\n}\n\n/*\n * Update Token wide character  function\n */\nstatic wchar_t *SecUpdateTokenW(wchar_t *strToken, const wchar_t *strDelimit, wchar_t **context)\n{\n    /* point to updated position */\n    wchar_t *token = SecFindRestW(strToken, strDelimit);\n    /* Update the context */\n    *context = token;\n    /* Determine if a token has been found. */\n    if (token == strToken) {\n        return NULL;\n    }\n    return strToken;\n}\n\n/*\n * <NAME>\n *    wcstok_s\n *\n *\n * <FUNCTION DESCRIPTION>\n *   The  wcstok_s  function  is  the  wide-character  equivalent  of the strtok_s function\n *\n * <INPUT PARAMETERS>\n *    strToken               String containing token or tokens.\n *    strDelimit             Set of delimiter characters.\n *    context                Used to store position information between calls to\n *                               wcstok_s.\n *\n * <OUTPUT PARAMETERS>\n *    context               is updated\n * <RETURN VALUE>\n *   The  wcstok_s  function  is  the  wide-character  equivalent  of the strtok_s function\n */\nwchar_t *wcstok_s(wchar_t *strToken, const wchar_t *strDelimit, wchar_t **context)\n{\n    wchar_t *orgToken = strToken;\n    /* validation section */\n    if (context == NULL || strDelimit == NULL) {\n        return NULL;\n    }\n    if (orgToken == NULL && (*context) == NULL) {\n        return NULL;\n    }\n    /* If string==NULL, continue with previous string */\n    if (orgToken == NULL) {\n        orgToken = *context;\n    }\n    orgToken = SecFindBeginW(orgToken, strDelimit);\n    return SecUpdateTokenW(orgToken, strDelimit, context);\n}\n\n"
  },
  {
    "path": "third_party/securec/src/wmemcpy_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securecutil.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *   The wmemcpy_s function copies n successive wide characters\n *    from the object pointed to by src into the object pointed to by dest.t.\n *\n * <INPUT PARAMETERS>\n *    dest                   Destination buffer.\n *    destMax                Size of the destination buffer.\n *    src                     Buffer to copy from.\n *    count                  Number of characters to copy.\n *\n * <OUTPUT PARAMETERS>\n *    dest buffer            is uptdated.\n *\n * <RETURN VALUE>\n *    EOK                    Success\n *    EINVAL                 dest is  NULL  and destMax != 0 and count <= destMax\n *                           and destMax <= SECUREC_WCHAR_MEM_MAX_LEN\n *    EINVAL_AND_RESET       dest != NULL and src is NULLL and destMax != 0\n *                           and destMax <= SECUREC_WCHAR_MEM_MAX_LEN and count <= destMax\n *    ERANGE                 destMax > SECUREC_WCHAR_MEM_MAX_LEN or destMax is 0 or\n *                           (count > destMax and dest is  NULL and destMax != 0\n *                            and destMax <= SECUREC_WCHAR_MEM_MAX_LEN)\n *    ERANGE_AND_RESET       count > destMax and dest  !=  NULL and destMax != 0\n *                           and destMax <= SECUREC_WCHAR_MEM_MAX_LEN\n *    EOVERLAP_AND_RESET     dest buffer and source buffer are overlapped and\n *                           count <= destMax destMax != 0 and destMax <= SECUREC_WCHAR_MEM_MAX_LEN\n *                           and dest  !=  NULL  and src != NULL and dest != src\n *\n *    if an error occured, dest will be filled with 0 when dest and destMax valid .\n *    If the source and destination overlap, the behavior of wmemcpy_s is undefined.\n *    Use wmemmove_s to handle overlapping regions.\n */\nerrno_t wmemcpy_s(wchar_t *dest, size_t destMax, const wchar_t *src, size_t count)\n{\n    if (destMax == 0 || destMax > SECUREC_WCHAR_MEM_MAX_LEN) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"wmemcpy_s\");\n        return ERANGE;\n    }\n    if (count > destMax) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"wmemcpy_s\");\n        if (dest != NULL) {\n            (void)memset(dest, 0, destMax * sizeof(wchar_t));\n            return ERANGE_AND_RESET;\n        }\n        return ERANGE;\n    }\n    return memcpy_s(dest, destMax * sizeof(wchar_t), src, count * sizeof(wchar_t));\n}\n\n"
  },
  {
    "path": "third_party/securec/src/wmemmove_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securecutil.h\"\n\n/*\n * <FUNCTION DESCRIPTION>\n *   The wmemmove_s function copies n successive wide characters from the object pointed\n *   to by src into the object pointed to by dest.\n *\n * <INPUT PARAMETERS>\n *    dest                     Destination buffer.\n *    destMax                  Size of the destination buffer.\n *    src                      Source object.\n *    count                    Number of bytes or character to copy.\n *\n * <OUTPUT PARAMETERS>\n *    dest                     is updated.\n *\n * <RETURN VALUE>\n *    EOK                      Success\n *    EINVAL                   dest is  NULL and destMax != 0 and count <= destMax\n *                             and destMax <= SECUREC_WCHAR_MEM_MAX_LEN\n *    EINVAL_AND_RESET         dest != NULL and src is NULLL and destMax != 0\n *                             and destMax <= SECUREC_WCHAR_MEM_MAX_LEN and count <= destMax\n *    ERANGE                   destMax > SECUREC_WCHAR_MEM_MAX_LEN or destMax is 0 or\n *                             (count > destMax and dest is  NULL and destMax != 0\n *                             and destMax <= SECUREC_WCHAR_MEM_MAX_LEN)\n *    ERANGE_AND_RESET        count > destMax and dest  !=  NULL and destMax != 0\n *                             and destMax <= SECUREC_WCHAR_MEM_MAX_LEN\n *\n *\n *     If an error occured, dest will  be filled with 0 when dest and destMax valid.\n *     If some regions of the source area and the destination overlap, wmemmove_s\n *     ensures that the original source bytes in the overlapping region are copied\n *     before being overwritten\n */\nerrno_t wmemmove_s(wchar_t *dest, size_t destMax, const wchar_t *src, size_t count)\n{\n    if (destMax == 0 || destMax > SECUREC_WCHAR_MEM_MAX_LEN) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"wmemmove_s\");\n        return ERANGE;\n    }\n    if (count > destMax) {\n        SECUREC_ERROR_INVALID_PARAMTER(\"wmemmove_s\");\n        if (dest != NULL) {\n            (void)memset(dest, 0, destMax * sizeof(wchar_t));\n            return ERANGE_AND_RESET;\n        }\n        return ERANGE;\n    }\n    return memmove_s(dest, destMax * sizeof(wchar_t), src, count * sizeof(wchar_t));\n}\n\n"
  },
  {
    "path": "third_party/securec/src/wscanf_s.c",
    "content": "/**\n * Copyright 2020 Huawei Technologies Co., Ltd\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"securec.h\"\n\n/*\n * <NAME>\n * <FUNCTION DESCRIPTION>\n *    The  wscanf_s  function  is  the  wide-character  equivalent  of the scanf_s function\n *    The wscanf_s function reads data from the standard input stream stdin and\n *    writes the data into the location that's given by argument. Each argument\n *    must be a pointer to a variable of a type that corresponds to a type specifier\n *    in format. If copying occurs between strings that overlap, the behavior is\n *    undefined.\n *\n * <INPUT PARAMETERS>\n *    format                  Format control string.\n *    ...                         Optional arguments.\n *\n * <OUTPUT PARAMETERS>\n *    ...                     the converted value stored in user assigned address\n *\n * <RETURN VALUE>\n *    Returns the number of fields successfully converted and assigned;\n *    the return value does not include fields that were read but not assigned.\n *    A return value of 0 indicates that no fields were assigned.\n *    return -1 if an error occurs.\n */\n\nint wscanf_s(const wchar_t *format, ...)\n{\n    int ret;                    /* If initialization causes  e838 */\n    va_list argList;\n\n    va_start(argList, format);\n    ret = vwscanf_s(format, argList);\n    va_end(argList);\n    (void)argList;              /* to clear e438 last value assigned not used , the compiler will optimize this code */\n\n    return ret;\n}\n\n"
  }
]