[
  {
    "path": ".coveragerc",
    "content": "[run]\nbranch = true\nsource = delfin\nomit = delfin/tests/*\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE.md",
    "content": "<!-- This form is for bug reports and feature requests! -->\n\n**Is this a BUG REPORT or FEATURE REQUEST?**:\n\n> Uncomment only one, leave it on its own line: \n>\n> /kind bug\n> /kind feature\n\n\n**What happened**:\n\n**What you expected to happen**:\n\n**How to reproduce it (as minimally and precisely as possible)**:\n\n\n**Anything else we need to know?**:\n\n**Environment**:\n- Delfin(release/branch) version:\n- OS (e.g. from /etc/os-release):\n- Kernel (e.g. `uname -a`):\n- Install tools:\n- Others:\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "content": "<!-- Thanks for sending a pull request! -->\n\n**What this PR does / why we need it**:\n\n**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #\n\n**Special notes for your reviewer**:\n\n**Release note**:\n<!--  Steps to write your release note:\n1. Use the release-note-* labels to set the release note state (if you have access)\n2. Enter your extended release note in the below block; leaving it blank means using the PR title as the release note. If no release note is required, just write `NONE`.\n-->\n```release-note\n```\n"
  },
  {
    "path": ".github/workflows/delfin_ci.yml",
    "content": "name: Delfin CI\non: [push, pull_request, workflow_dispatch]\n\njobs:\n  build:\n    runs-on: ${{ matrix.platform }}\n    strategy:\n      max-parallel: 6\n      matrix:\n        platform: [ubuntu-20.04]\n        python-version: [ 3.8 ]\n\n    steps:\n      - uses: actions/checkout@v2\n      - name: Install Python version ${{ matrix.python-version }}\n        uses: actions/setup-python@v1\n        with:\n          python-version: ${{ matrix.python-version }}\n      - name: Install dependencies\n        run: |\n          python -m pip install --upgrade pip\n          pip install -r requirements.txt\n          pip install -r test-requirements.txt\n          pip install tox codecov\n      - name: Unit test with tox\n        run: tox\n      - name: Test coverage with  codecov\n        run: codecov\n"
  },
  {
    "path": ".github/workflows/delfin_e2e_test.yml",
    "content": "name: Delfin E2E Test\non: [push, pull_request, workflow_dispatch]\n\njobs:\n  test:\n    runs-on: ${{ matrix.platform }}\n    strategy:\n      max-parallel: 6\n      matrix:\n        platform: [ubuntu-20.04]\n        python-version: [ 3.8 ]\n\n    steps:\n      - name: Checkout delfin code\n        uses: actions/checkout@v2\n      - name: Install Python version ${{ matrix.python-version }}\n        uses: actions/setup-python@v1\n        with:\n          python-version: ${{ matrix.python-version }}\n      - name: Install dependencies\n        run: |\n          python -m pip install --upgrade pip\n          pip install -r requirements.txt\n          pip install -r test-requirements.txt\n          pip install tox codecov\n      - name: E2E Testing - Add Test Driver to Delfin\n        run: |\n          str=\"\\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ 'test_vendor test_model = delfin.tests.e2e.testdriver:TestDriver',\"\n          sed -i \"/FakeStorageDriver',/ a $str\" ./setup.py\n        shell: bash\n      - name: E2E Testing - Install RabbitMQ\n        uses: getong/rabbitmq-action@v1.2\n        with:\n          rabbitmq version: '3.8.2-management-alpine'\n          host port: 5672\n          rabbitmq user: 'guest'\n          rabbitmq password: 'guest'\n      - name: E2E Testing - Install Redis\n        uses: supercharge/redis-github-action@1.2.0\n        with:\n          redis-version: 6\n      - name: E2E Testing - Build and Deploy Delfin with Test driver\n        run: |\n          sudo mkdir -p /var/lib/delfin\n          sudo chmod 0777 /var/lib/delfin\n          sudo mkdir -p /etc/delfin\n          sudo chmod 0777 /etc/delfin\n          python3 setup.py install\n          cp ./etc/delfin/api-paste.ini /etc/delfin/\n          python3 ./script/create_db.py --config-file ./etc/delfin/delfin.conf\n          sleep 1\n          python3 ./delfin/cmd/task.py --config-file ./etc/delfin/delfin.conf > /tmp/task.log 2>&1 &\n          python3 ./delfin/cmd/alert.py --config-file ./etc/delfin/delfin.conf > /tmp/alert.log 2>&1 &\n          python3 ./delfin/cmd/api.py --config-file ./etc/delfin/delfin.conf > /tmp/api.log 2>&1 &\n        shell: bash\n      - name: E2E Testing - Run RobotFramework\n        run: |\n          sleep 3\n          pip install robotframework\n          pip install robotframework-requests\n          pip install robotframework-jsonlibrary\n          DELFIN_DIR=`pwd`\n          TOP_DIR=\"${DELFIN_DIR}/delfin/tests/e2e\"\n          ORIG_PATH='\"storage.json\"'\n          FILE_PATH=\"${TOP_DIR}/testdriver/storage.json\"\n          sed -i \"s|${ORIG_PATH}|\\\"${FILE_PATH}\\\"|g\" $TOP_DIR/test.json\n          sleep 1\n          robot delfin/tests/e2e\n        shell: bash\n"
  },
  {
    "path": ".gitignore",
    "content": "# IDE config file\n.idea\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n"
  },
  {
    "path": "CHANGELOG/CHANGELOG-v1.0.0.md",
    "content": "# Changelog\n\n## [v1.0.0](https://github.com/sodafoundation/delfin/tree/v1.0.0) (2020-09-29)\n\n[Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.8.0...v1.0.0)\n\n**Merged pull requests:**\n\n- Fixing config path to sync with ansible installer [\\#346](https://github.com/sodafoundation/delfin/pull/346) ([PravinRanjan10](https://github.com/PravinRanjan10))\n- Syncing dev branch to master [\\#345](https://github.com/sodafoundation/delfin/pull/345) ([PravinRanjan10](https://github.com/PravinRanjan10))\n- Standalone Installer script for delfin [\\#342](https://github.com/sodafoundation/delfin/pull/342) ([PravinRanjan10](https://github.com/PravinRanjan10))\n- Sync master to performance collection  dev branch [\\#339](https://github.com/sodafoundation/delfin/pull/339) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Updating Fake driver to sync with vmax model [\\#338](https://github.com/sodafoundation/delfin/pull/338) ([PravinRanjan10](https://github.com/PravinRanjan10))\n\n## [v0.8.0](https://github.com/sodafoundation/delfin/tree/v0.8.0) (2020-09-28)\n\n[Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.6.0...v0.8.0)\n\n**Merged pull requests:**\n\n-  Modifying some exception in VMAX and schema validation of ssh acces info [\\#343](https://github.com/sodafoundation/delfin/pull/343) ([NajmudheenCT](https://github.com/NajmudheenCT))\n\n## [v0.6.0](https://github.com/sodafoundation/delfin/tree/v0.6.0) (2020-09-21)\n\n[Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.6.1...v0.6.0)\n\n**Merged pull requests:**\n\n- Code improvements [\\#335](https://github.com/sodafoundation/delfin/pull/335) ([sushanthakumar](https://github.com/sushanthakumar))\n- Fix static code check tools function depth defect [\\#331](https://github.com/sodafoundation/delfin/pull/331) ([joseph-v](https://github.com/joseph-v))\n- Fix static code check tool defects [\\#330](https://github.com/sodafoundation/delfin/pull/330) ([joseph-v](https://github.com/joseph-v))\n- Fix VMAX establish rest session [\\#324](https://github.com/sodafoundation/delfin/pull/324) ([joseph-v](https://github.com/joseph-v))\n- Fix volume name in VMAX driver [\\#323](https://github.com/sodafoundation/delfin/pull/323) ([joseph-v](https://github.com/joseph-v))\n- Remove plain text password caching in drivers [\\#322](https://github.com/sodafoundation/delfin/pull/322) ([joseph-v](https://github.com/joseph-v))\n- Correct input argument of StorageBackendException in oceanstor [\\#317](https://github.com/sodafoundation/delfin/pull/317) ([joseph-v](https://github.com/joseph-v))\n\n## [v0.6.1](https://github.com/sodafoundation/delfin/tree/v0.6.1) (2020-09-21)\n\n[Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.4.0...v0.6.1)\n\n**Fixed bugs:**\n\n- sync\\_status is always 'synced' in the reponse of a registration [\\#241](https://github.com/sodafoundation/delfin/issues/241)\n- \\[task manager\\] Sync call stuck when rabbit-mq server is not running [\\#129](https://github.com/sodafoundation/delfin/issues/129)\n\n**Closed issues:**\n\n- Dell EMC VMAX volume name is different from Unisphere dashboard [\\#332](https://github.com/sodafoundation/delfin/issues/332)\n- Encrypt password before caching in drivers [\\#329](https://github.com/sodafoundation/delfin/issues/329)\n\n**Merged pull requests:**\n\n- Performance metric-config-update API for delfin [\\#333](https://github.com/sodafoundation/delfin/pull/333) ([PravinRanjan10](https://github.com/PravinRanjan10))\n-  VMAX driver Performance collection: Initial framework and  array level for metrics collection    [\\#326](https://github.com/sodafoundation/delfin/pull/326) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Performance-collection framework for delfin [\\#325](https://github.com/sodafoundation/delfin/pull/325) ([PravinRanjan10](https://github.com/PravinRanjan10))\n- Query para driver changes for list alert api [\\#319](https://github.com/sodafoundation/delfin/pull/319) ([sushanthakumar](https://github.com/sushanthakumar))\n- Exception handling for delete snmp trap config [\\#318](https://github.com/sodafoundation/delfin/pull/318) ([sushanthakumar](https://github.com/sushanthakumar))\n- Alert sync api changes [\\#316](https://github.com/sodafoundation/delfin/pull/316) ([sushanthakumar](https://github.com/sushanthakumar))\n- Fix static code check defects [\\#315](https://github.com/sodafoundation/delfin/pull/315) ([joseph-v](https://github.com/joseph-v))\n- Fix warnings from static analyze tool [\\#314](https://github.com/sodafoundation/delfin/pull/314) ([joseph-v](https://github.com/joseph-v))\n- Query para update for list alert api [\\#312](https://github.com/sodafoundation/delfin/pull/312) ([sushanthakumar](https://github.com/sushanthakumar))\n\n## [v0.4.0](https://github.com/sodafoundation/delfin/tree/v0.4.0) (2020-08-28)\n\n[Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.2.0...v0.4.0)\n\n**Implemented enhancements:**\n\n- Need a mechanism to support SSL certificate verify for HTTPS request from driver to storage device. [\\#227](https://github.com/sodafoundation/delfin/issues/227)\n\n**Fixed bugs:**\n\n- \\[Driver\\] Create extended exceptions for StorageBackendException [\\#184](https://github.com/sodafoundation/delfin/issues/184)\n- \\[Alert Manager\\] Irrelevant fields are shown as null in GET alert source  [\\#172](https://github.com/sodafoundation/delfin/issues/172)\n- \\[VMAX driver\\] Firmware version is missing [\\#147](https://github.com/sodafoundation/delfin/issues/147)\n- VMAX volume details for volumes without Storage Group [\\#74](https://github.com/sodafoundation/delfin/issues/74)\n- In api.py: Change function name discover\\_storage to update\\_storage\\_driver [\\#69](https://github.com/sodafoundation/delfin/issues/69)\n\n**Closed issues:**\n\n- \\[Alert Manager\\] Clear alert implementation for EMC Vmax [\\#261](https://github.com/sodafoundation/delfin/issues/261)\n- \\[Alert Manager\\] Clear alert implementation for Huawei Oceanstor [\\#260](https://github.com/sodafoundation/delfin/issues/260)\n- \\[Alert Manager\\] Clear alert analysis for storage backends  [\\#259](https://github.com/sodafoundation/delfin/issues/259)\n- \\[Alert Manager\\] Get alert analysis for storage backends [\\#258](https://github.com/sodafoundation/delfin/issues/258)\n- \\[Alert Manager\\] Alert specification check for other storage backends [\\#249](https://github.com/sodafoundation/delfin/issues/249)\n- \\[Alert Manager\\]Improve readability for alert model fields [\\#239](https://github.com/sodafoundation/delfin/issues/239)\n- Update Project ReadMe [\\#231](https://github.com/sodafoundation/delfin/issues/231)\n- Exporting alert model to export manager [\\#126](https://github.com/sodafoundation/delfin/issues/126)\n- \\[Alert manager\\] Load all custom mibs from configured path [\\#114](https://github.com/sodafoundation/delfin/issues/114)\n- \\[Alert Manager\\] Clear alert at backend [\\#99](https://github.com/sodafoundation/delfin/issues/99)\n- \\[task manager\\] Push resource data to Exporter [\\#93](https://github.com/sodafoundation/delfin/issues/93)\n- Handle the optimization issues in pool update [\\#55](https://github.com/sodafoundation/delfin/issues/55)\n- Handle multi node use cases in Driver Manager [\\#50](https://github.com/sodafoundation/delfin/issues/50)\n- Not correct behaviour of log info message [\\#46](https://github.com/sodafoundation/delfin/issues/46)\n\n**Merged pull requests:**\n\n- Clear alert fix in hpe 3par driver [\\#309](https://github.com/sodafoundation/delfin/pull/309) ([sushanthakumar](https://github.com/sushanthakumar))\n- Alert source configuration range changes [\\#308](https://github.com/sodafoundation/delfin/pull/308) ([sushanthakumar](https://github.com/sushanthakumar))\n- Hpe3par: update SSL certificate verification method [\\#307](https://github.com/sodafoundation/delfin/pull/307) ([jiangyutan](https://github.com/jiangyutan))\n- Send clear event when snmp validation succeed [\\#305](https://github.com/sodafoundation/delfin/pull/305) ([wisererik](https://github.com/wisererik))\n- update next release version in setup.py [\\#304](https://github.com/sodafoundation/delfin/pull/304) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Adding Configurable VMAX expiration time [\\#303](https://github.com/sodafoundation/delfin/pull/303) ([PravinRanjan10](https://github.com/PravinRanjan10))\n- Hpe3par:separate the common parts of rest and SSH interfaces [\\#302](https://github.com/sodafoundation/delfin/pull/302) ([jiangyutan](https://github.com/jiangyutan))\n- Updated delfin changes [\\#299](https://github.com/sodafoundation/delfin/pull/299) ([sushanthakumar](https://github.com/sushanthakumar))\n- List and clear alert changes for unisphere alerts [\\#298](https://github.com/sodafoundation/delfin/pull/298) ([sushanthakumar](https://github.com/sushanthakumar))\n- Optimizing vmax driver exception related code. [\\#297](https://github.com/sodafoundation/delfin/pull/297) ([PravinRanjan10](https://github.com/PravinRanjan10))\n-  Fetching Default SRP for volumes which are not associated with storage group [\\#296](https://github.com/sodafoundation/delfin/pull/296) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Hpe3par:modify traps;modify checkhealth's components [\\#295](https://github.com/sodafoundation/delfin/pull/295) ([jiangyutan](https://github.com/jiangyutan))\n- Add Secure backend driver and dynamic certificate reload [\\#290](https://github.com/sodafoundation/delfin/pull/290) ([joseph-v](https://github.com/joseph-v))\n- Remove debug infomation and Fix some grammar problems [\\#289](https://github.com/sodafoundation/delfin/pull/289) ([jiangyutan](https://github.com/jiangyutan))\n- Oceanstor driver return fix for clear alert [\\#283](https://github.com/sodafoundation/delfin/pull/283) ([sushanthakumar](https://github.com/sushanthakumar))\n- Handle invalid input while getting array details for VMAX driver [\\#282](https://github.com/sodafoundation/delfin/pull/282) ([joseph-v](https://github.com/joseph-v))\n-  Adding name and firmware version for VMAX [\\#277](https://github.com/sodafoundation/delfin/pull/277) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Fix oceanstor driver issue [\\#276](https://github.com/sodafoundation/delfin/pull/276) ([wisererik](https://github.com/wisererik))\n- hpe-3par driver support [\\#274](https://github.com/sodafoundation/delfin/pull/274) ([jiangyutan](https://github.com/jiangyutan))\n\n## [v0.2.0](https://github.com/sodafoundation/delfin/tree/v0.2.0) (2020-08-11)\n\n[Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.1.0...v0.2.0)\n\n**Implemented enhancements:**\n\n- Remove example code because it will not be used [\\#86](https://github.com/sodafoundation/delfin/issues/86)\n\n**Closed issues:**\n\n- Need to support SSH connection between delfin and devices. [\\#245](https://github.com/sodafoundation/delfin/issues/245)\n- \\[Alert Manager\\] Alert model filling for Huawei OceanStor [\\#195](https://github.com/sodafoundation/delfin/issues/195)\n\n**Merged pull requests:**\n\n- Update event type for alert model [\\#273](https://github.com/sodafoundation/delfin/pull/273) ([wisererik](https://github.com/wisererik))\n- Custom mib path enhancement [\\#271](https://github.com/sodafoundation/delfin/pull/271) ([sushanthakumar](https://github.com/sushanthakumar))\n- Alert source update with snmp validation [\\#270](https://github.com/sodafoundation/delfin/pull/270) ([sushanthakumar](https://github.com/sushanthakumar))\n- Update VMax driver to remove PyU4V lib [\\#268](https://github.com/sodafoundation/delfin/pull/268) ([joseph-v](https://github.com/joseph-v))\n- Adding raw\\_capacity and subscribed capacity in  VMAX driver [\\#267](https://github.com/sodafoundation/delfin/pull/267) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Add configuration for exporter framework [\\#266](https://github.com/sodafoundation/delfin/pull/266) ([wisererik](https://github.com/wisererik))\n- Clear alert support [\\#265](https://github.com/sodafoundation/delfin/pull/265) ([sushanthakumar](https://github.com/sushanthakumar))\n- Alert model refine changes [\\#264](https://github.com/sodafoundation/delfin/pull/264) ([sushanthakumar](https://github.com/sushanthakumar))\n- Add raw capacity to database model [\\#263](https://github.com/sodafoundation/delfin/pull/263) ([ThisIsClark](https://github.com/ThisIsClark))\n- Modify the constant type of sync status [\\#255](https://github.com/sodafoundation/delfin/pull/255) ([ThisIsClark](https://github.com/ThisIsClark))\n-  swagger correction in mutiple APIS [\\#253](https://github.com/sodafoundation/delfin/pull/253) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Update access\\_info model to support both REST and SSH. [\\#246](https://github.com/sodafoundation/delfin/pull/246) ([sfzeng](https://github.com/sfzeng))\n\n\n\n\\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*\n"
  },
  {
    "path": "CHANGELOG/CHANGELOG-v1.1.0.md",
    "content": "# Changelog\n\n## [Unreleased](https://github.com/sodafoundation/delfin/tree/HEAD)\n\n[Full Changelog](https://github.com/sodafoundation/delfin/compare/v1.1.0-rc2...HEAD)\n\n**Closed issues:**\n\n- Module file is missing in IBM driver folder [\\#455](https://github.com/sodafoundation/delfin/issues/455)\n- Open API Spec \\(Swagger file\\) do not display disk api correctly [\\#453](https://github.com/sodafoundation/delfin/issues/453)\n\n**Merged pull requests:**\n\n- Add python module file for ibm drivers [\\#456](https://github.com/sodafoundation/delfin/pull/456) ([joseph-v](https://github.com/joseph-v))\n- Fix api spec for format error in disk api [\\#454](https://github.com/sodafoundation/delfin/pull/454) ([joseph-v](https://github.com/joseph-v))\n\n## [v1.1.0-rc2](https://github.com/sodafoundation/delfin/tree/v1.1.0-rc2) (2021-01-05)\n\n[Full Changelog](https://github.com/sodafoundation/delfin/compare/v1.1.0-rc1...v1.1.0-rc2)\n\n**Merged pull requests:**\n\n- Performance collection unregistration during storage deletion [\\#451](https://github.com/sodafoundation/delfin/pull/451) ([sushanthakumar](https://github.com/sushanthakumar))\n- Add EMC Unity driver [\\#442](https://github.com/sodafoundation/delfin/pull/442) ([sushanthakumar](https://github.com/sushanthakumar))\n- delfin 0.8.0 to master [\\#439](https://github.com/sodafoundation/delfin/pull/439) ([PravinRanjan10](https://github.com/PravinRanjan10))\n- Seperation of task service class [\\#435](https://github.com/sodafoundation/delfin/pull/435) ([sushanthakumar](https://github.com/sushanthakumar))\n\n## [v1.1.0-rc1](https://github.com/sodafoundation/delfin/tree/v1.1.0-rc1) (2020-12-24)\n\n[Full Changelog](https://github.com/sodafoundation/delfin/compare/v1.0.0...v1.1.0-rc1)\n\n**Closed issues:**\n\n- Add python-dev instruction for delfin installation [\\#418](https://github.com/sodafoundation/delfin/issues/418)\n- Provide additional labels, storage name & storage serial number, in metrics collection data sent to Prometheus exporter [\\#417](https://github.com/sodafoundation/delfin/issues/417)\n- API update \\(swagger\\) for resource model improvement  [\\#405](https://github.com/sodafoundation/delfin/issues/405)\n- Resource management improvement implementation [\\#404](https://github.com/sodafoundation/delfin/issues/404)\n- oslo\\_service.wsgi.ConfigNotFound: Could not find config at api-paste.ini [\\#390](https://github.com/sodafoundation/delfin/issues/390)\n- Code improvements based on 0.8.0 version [\\#356](https://github.com/sodafoundation/delfin/issues/356)\n- Delfin installation doesn't work  [\\#340](https://github.com/sodafoundation/delfin/issues/340)\n\n**Merged pull requests:**\n\n-  Adding Prometheus alert manager exporter [\\#437](https://github.com/sodafoundation/delfin/pull/437) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Update installer for the changed exporter path [\\#436](https://github.com/sodafoundation/delfin/pull/436) ([joseph-v](https://github.com/joseph-v))\n- Delfin exporter configurations and modified  exporter selction mechanism [\\#433](https://github.com/sodafoundation/delfin/pull/433) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Update oceanstor for Controller, Port and Disk resource support [\\#426](https://github.com/sodafoundation/delfin/pull/426) ([joseph-v](https://github.com/joseph-v))\n-  Adding more labels to array leval metrics [\\#422](https://github.com/sodafoundation/delfin/pull/422) ([NajmudheenCT](https://github.com/NajmudheenCT))\n- Add python3-dev package instruction [\\#419](https://github.com/sodafoundation/delfin/pull/419) ([Anmolbansal1](https://github.com/Anmolbansal1))\n- Update Delfin APIs for new resources controller, port & disk [\\#415](https://github.com/sodafoundation/delfin/pull/415) ([joseph-v](https://github.com/joseph-v))\n- Add Port resource to Delfin [\\#408](https://github.com/sodafoundation/delfin/pull/408) ([joseph-v](https://github.com/joseph-v))\n- Add Disk resource to Delfin [\\#407](https://github.com/sodafoundation/delfin/pull/407) ([joseph-v](https://github.com/joseph-v))\n- Delfin resource support enhancement for 'controller' [\\#403](https://github.com/sodafoundation/delfin/pull/403) ([joseph-v](https://github.com/joseph-v))\n\n\n\n\\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*\n"
  },
  {
    "path": "Dockerfile",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nFROM ubuntu:18.04\n\nMAINTAINER soda team\n\nRUN apt-get update -y && \\\n    apt-get install -y python3-pip && \\\n    apt-get install -y sqlite3 && \\\n    apt-get install -y libffi-dev && \\\n    pip3 install --upgrade pip\n\nADD . /delfin\n\nWORKDIR /delfin\n\nRUN pip3 install -r requirements.txt && \\\n    python3 setup.py install\n\nENTRYPOINT [\"/delfin/script/start.sh\"]\n\n"
  },
  {
    "path": "LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n"
  },
  {
    "path": "README.md",
    "content": "# delfin : SODA Infrastructure Manager Project\n[![Build Status](https://travis-ci.com/sodafoundation/delfin.svg?branch=master)](https://travis-ci.com/sodafoundation/delfin)\n[![codecov.io](https://codecov.io/github/sodafoundation/delfin/coverage.svg?branch=master)](https://codecov.io/github/sodafoundation/delfin?branch=master)\n[![Releases](https://img.shields.io/github/release/sodafoundation/delfin/all.svg?style=flat-square)](https://github.com/sodafoundation/delfin/releases)\n[![LICENSE](https://img.shields.io/github/license/sodafoundation/delfin.svg?style=flat-square)](https://github.com/sodafoundation/delfin/blob/master/LICENSE)\n\n<img src=\"https://sodafoundation.io/wp-content/uploads/2020/01/SODA_logo_outline_color_800x800.png\" width=\"200\" height=\"200\">\n\n## Introduction\n\ndelfin (Dolphin in spanish!) , the SODA Infrastructure Manager project is an an open source project to provide unified, intelligent and scalable resource management, alert and performance monitoring. It will cover the resource management of all the storage backends & other infrastructures under SODA deployment. It will also provide the alert management and metric data(performance/health) for monitoring and further analysis. It will provide a scalable framework where more and more backends as well as client exporters can be added. This will enable to add more storage and infrastructure backends and also support different management clients for monitoring and health prediction.\n\nIt provides unified APIs to access, export and connect with clients as well as a set of interfaces for various driver addition.\n\nThis is one of the SODA Core Projects and is maintained by SODA Foundation directly.\n\n## Documentation\n\n[https://docs.sodafoundation.io](https://docs.sodafoundation.io/)\n\n## Quick Start - To Use/Experience\n\n[https://docs.sodafoundation.io/guides/user-guides/delfin](https://docs.sodafoundation.io/guides/user-guides/delfin/)\n\n## Quick Start - To Develop\n\n[https://docs.sodafoundation.io/guides/developer-guides/delfin](https://docs.sodafoundation.io/guides/developer-guides/delfin/)\n\n## Demo videos - To get to know the capabilities better\n\n[https://www.youtube.com/watch?v=WtlxF7SHID4](https://www.youtube.com/watch?v=WtlxF7SHID4)\n\n\n## Latest Releases\n\n[https://github.com/sodafoundation/delfin/releases](https://github.com/sodafoundation/delfin/releases)\n\n## Support and Issues\n\n[https://github.com/sodafoundation/delfin/issues](https://github.com/sodafoundation/delfin/issues)\n\n## Project Community\n\n[https://sodafoundation.io/slack/](https://sodafoundation.io/slack/)\n\n## How to contribute to this project?\n\nJoin [https://sodafoundation.io/slack/](https://sodafoundation.io/slack/) and share your interest in the ‘general’ channel\n\nCheckout [https://github.com/sodafoundation/delfin/issues](https://github.com/sodafoundation/delfin/issues) labelled with ‘good first issue’ or ‘help needed’ or ‘help wanted’ or ‘StartMyContribution’ or ‘SMC’\n\n## Project Roadmap\n\nWe want to build a unified intelligent and scalable infrastructure management framework for resource management (config, add, remove, update), alert management and performance metrics management.\n  \n[https://docs.sodafoundation.io](https://docs.sodafoundation.io/)\n\n## Join SODA Foundation\n\nWebsite : [https://sodafoundation.io](https://sodafoundation.io/)\n\nSlack  : [https://sodafoundation.io/slack/](https://sodafoundation.io/slack/)\n\nTwitter  : [@sodafoundation](https://twitter.com/sodafoundation)\n\nMailinglist  : [https://lists.sodafoundation.io](https://lists.sodafoundation.io/)\n"
  },
  {
    "path": "codecov.yml",
    "content": "comment:\n  layout: \"header, diff, tree\"\n\ncoverage:\n  range: \"70...100\"\n  precision: 2\n  round: down\n  status:\n    project:\n      default:\n        target: 70%\n    patch: off\n"
  },
  {
    "path": "delfin/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/alert_manager/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/alert_manager/alert_processor.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\nimport threading\n\nfrom oslo_log import log\n\nfrom delfin import context\nfrom delfin import coordination\nfrom delfin import db\nfrom delfin import exception\nfrom delfin.common import alert_util\nfrom delfin.drivers import api as driver_manager\nfrom delfin.exporter import base_exporter\nfrom delfin.task_manager import rpcapi\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertProcessor(object):\n    \"\"\"Alert model translation and export functions\"\"\"\n\n    def __init__(self):\n        self.driver_manager = driver_manager.API()\n        self.exporter_manager = base_exporter.AlertExporterManager()\n        self.task_rpcapi = rpcapi.TaskAPI()\n\n    def process_alert_info(self, alert):\n        \"\"\"Fills alert model using driver manager interface.\"\"\"\n        ctxt = context.get_admin_context()\n        storage = db.storage_get(ctxt, alert['storage_id'])\n        alert_model = {}\n\n        try:\n            alert_model = self.driver_manager.parse_alert(ctxt,\n                                                          alert['storage_id'],\n                                                          alert)\n            # Fill storage specific info\n            if alert_model:\n                storage = self.get_storage_from_parsed_alert(\n                    ctxt, storage, alert_model)\n                alert_util.fill_storage_attributes(alert_model, storage)\n        except exception.IncompleteTrapInformation as e:\n            LOG.warning(e)\n            threading.Thread(target=self.sync_storage_alert,\n                             args=(ctxt, alert['storage_id'])).start()\n        except exception.AlertSourceNotFound:\n            LOG.info(\"Could not identify alert source from parsed alert. \"\n                     \"Skipping the dispatch of alert\")\n            return\n        except Exception as e:\n            LOG.error(e)\n            raise exception.InvalidResults(\n                \"Failed to fill the alert model from driver.\")\n\n        # Export to base exporter which handles dispatch for all exporters\n        if alert_model:\n            LOG.info(\"Dispatching one SNMP Trap to {} with sn {}\".format(\n                alert_model['storage_id'], alert_model['serial_number']))\n            self.exporter_manager.dispatch(ctxt, [alert_model])\n\n    def get_storage_from_parsed_alert(self, ctxt, storage, alert_model):\n        # If parse_alert sets 'serial_number' or 'storage_name' in the\n        # alert_model, we need to get corresponding storage details\n        # from the db and fill that in alert_model\n        storage_sn = alert_model.get('serial_number')\n        storage_name = alert_model.get('storage_name')\n        filters = {\n            \"vendor\": storage['vendor'],\n            \"model\": storage['model'],\n        }\n        try:\n            if storage_sn and storage_sn != storage['serial_number']:\n                filters['serial_number'] = storage_sn\n            elif storage_name and storage_name != storage['name']:\n                filters['name'] = storage_name\n            else:\n                return storage\n\n            storage_list = db.storage_get_all(ctxt, filters=filters)\n            if not storage_list:\n                msg = \"Failed to get destination storage for SNMP Trap. \" \\\n                      \"Storage with serial number {} or storage name {} \" \\\n                      \"not found in DB\".format(storage_sn, storage_name)\n                raise exception.AlertSourceNotFound(msg)\n            db.alert_source_get(ctxt, storage_list[0]['id'])\n            storage = storage_list[0]\n        except exception.AlertSourceNotFound:\n            LOG.info(\"Storage with serial number {} or name {} \"\n                     \"is not registered for receiving \"\n                     \"SNMP Trap\".format(storage_sn, storage_name))\n            raise\n\n        return storage\n\n    @coordination.synchronized('sync-trap-{storage_id}', blocking=False)\n    def sync_storage_alert(self, context, storage_id):\n        time.sleep(10)\n        self.task_rpcapi.sync_storage_alerts(context, storage_id, None)\n"
  },
  {
    "path": "delfin/alert_manager/constants.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# SNMP dispatcher job id (static identifier)\nSNMP_DISPATCHER_JOB_ID = 1\n\n# Valid SNMP versions.\nSNMP_V1_INT = 1\nSNMP_V2_INT = 2\nSNMP_V3_INT = 3\nVALID_SNMP_VERSIONS = {\"snmpv1\": SNMP_V1_INT, \"snmpv2c\": SNMP_V2_INT,\n                       \"snmpv3\": SNMP_V3_INT}\n\n# Default limitation for batch query.\nDEFAULT_LIMIT = 1000\n"
  },
  {
    "path": "delfin/alert_manager/rpcapi.py",
    "content": "# Copyright 2012, Red Hat, Inc.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"\nClient side of the alert manager RPC API.\n\"\"\"\n\nimport oslo_messaging as messaging\nfrom oslo_config import cfg\n\nfrom delfin import rpc\n\nCONF = cfg.CONF\n\n\nclass AlertAPI(object):\n    \"\"\"Client side of the alert manager rpc API.\n\n    API version history:\n        1.0 - Initial version.\n    \"\"\"\n\n    RPC_API_VERSION = '1.0'\n\n    def __init__(self):\n        super(AlertAPI, self).__init__()\n        target = messaging.Target(topic=CONF.delfin_alert_topic,\n                                  version=self.RPC_API_VERSION)\n        self.client = rpc.get_client(target, version_cap=self.RPC_API_VERSION)\n\n    def sync_snmp_config(self, ctxt, snmp_config_to_del, snmp_config_to_add):\n        call_context = self.client.prepare(version='1.0', fanout=True)\n        return call_context.cast(ctxt,\n                                 'sync_snmp_config',\n                                 snmp_config_to_del=snmp_config_to_del,\n                                 snmp_config_to_add=snmp_config_to_add)\n\n    def check_snmp_config(self, ctxt, snmp_config):\n        call_context = self.client.prepare(version='1.0')\n        return call_context.cast(ctxt,\n                                 'check_snmp_config',\n                                 snmp_config=snmp_config)\n"
  },
  {
    "path": "delfin/alert_manager/snmp_validator.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport binascii\nimport copy\n\nimport six\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_utils import encodeutils\nfrom pyasn1.type.univ import OctetString\nfrom pysnmp.entity.rfc3413.oneliner import cmdgen\n\nfrom delfin import cryptor\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import utils\nfrom delfin.common import constants\nfrom delfin.exporter import base_exporter\n\nCONF = cfg.CONF\n\nLOG = log.getLogger(__name__)\n\n\nclass SNMPValidator(object):\n    def __init__(self):\n        self.exporter = base_exporter.AlertExporterManager()\n        self.snmp_error_flag = {}\n\n    def validate(self, ctxt, alert_source):\n        engine_id = alert_source.get('engine_id')\n        try:\n            hosts = alert_source['host'].split(',')\n            temp_alert_source = copy.deepcopy(alert_source)\n            # Sets a value to raise a SNMPConnectionFailed when multiple\n            # alarm sources fail to be verified\n            connection_times = 0\n            for host in hosts:\n                temp_alert_source['host'] = host\n                try:\n                    connection_times += 1\n                    alert_source = \\\n                        self.validate_connectivity(ctxt, temp_alert_source)\n                    break\n                except Exception as e:\n                    if connection_times == len(hosts):\n                        raise e\n            # If protocol is snmpv3, the snmp_validator will update\n            # engine id if engine id is empty. Therefore, engine id\n            # should be saved in database.\n            if not engine_id and alert_source.get('engine_id'):\n                alert_source_dict = {\n                    'engine_id': alert_source.get('engine_id')}\n                db.alert_source_update(ctxt,\n                                       alert_source.get('storage_id'),\n                                       alert_source_dict)\n            self._handle_validation_result(ctxt,\n                                           alert_source.get('storage_id'),\n                                           constants.Category.RECOVERY)\n        except exception.SNMPConnectionFailed:\n            self._handle_validation_result(ctxt,\n                                           alert_source.get('storage_id'))\n        except Exception as e:\n            msg = six.text_type(e)\n            LOG.error(\"Failed to check snmp config. Reason: %s\", msg)\n\n    @staticmethod\n    def validate_connectivity(ctxt, alert_source):\n        # Fill optional parameters with default values if not set in input\n        if not alert_source.get('port'):\n            alert_source['port'] = constants.DEFAULT_SNMP_CONNECT_PORT\n\n        if not alert_source.get('context_name'):\n            alert_source['context_name'] = None\n\n        if not alert_source.get('retry_num'):\n            alert_source['retry_num'] = constants.DEFAULT_SNMP_RETRY_NUM\n\n        if not alert_source.get('expiration'):\n            alert_source['expiration'] = constants.DEFAULT_SNMP_EXPIRATION_TIME\n\n        if CONF.snmp_validation_enabled is False:\n            return alert_source\n\n        storage_id = alert_source.get('storage_id')\n        access_info = db.access_info_get(ctxt, storage_id)\n        access_info = dict(access_info)\n        if access_info.get('model') not in constants.SNMP_SUPPORTED_MODELS:\n            return alert_source\n\n        cmd_gen = cmdgen.CommandGenerator()\n\n        version = alert_source.get('version')\n\n        # Connect to alert source through snmp get to check the configuration\n        try:\n            target = cmdgen.UdpTransportTarget((alert_source['host'],\n                                                alert_source['port']),\n                                               timeout=alert_source[\n                                                   'expiration'],\n                                               retries=alert_source[\n                                                   'retry_num'])\n            target.setLocalAddress((CONF.my_ip, 0))\n            if version.lower() == 'snmpv3':\n                # Register engine observer to get engineId,\n                # Code reference from: http://snmplabs.com/pysnmp/\n                observer_context = {}\n                cmd_gen.snmpEngine.observer.registerObserver(\n                    lambda e, p, v, c: c.update(\n                        securityEngineId=v['securityEngineId']),\n                    'rfc3412.prepareDataElements:internal',\n                    cbCtx=observer_context\n                )\n                auth_key = None\n                if alert_source['auth_key']:\n                    auth_key = encodeutils.to_utf8(\n                        cryptor.decode(alert_source['auth_key']))\n                privacy_key = None\n                if alert_source['privacy_key']:\n                    privacy_key = encodeutils.to_utf8(\n                        cryptor.decode(alert_source['privacy_key']))\n                auth_protocol = None\n                privacy_protocol = None\n                if alert_source['auth_protocol']:\n                    auth_protocol = constants.AUTH_PROTOCOL_MAP.get(\n                        alert_source['auth_protocol'].lower())\n                if alert_source['privacy_protocol']:\n                    privacy_protocol = constants.PRIVACY_PROTOCOL_MAP.get(\n                        alert_source['privacy_protocol'].lower())\n\n                engine_id = alert_source.get('engine_id')\n                if engine_id:\n                    engine_id = OctetString.fromHexString(engine_id)\n                error_indication, __, __, __ = cmd_gen.getCmd(\n                    cmdgen.UsmUserData(alert_source['username'],\n                                       authKey=auth_key,\n                                       privKey=privacy_key,\n                                       authProtocol=auth_protocol,\n                                       privProtocol=privacy_protocol,\n                                       securityEngineId=engine_id),\n                    target,\n                    constants.SNMP_QUERY_OID,\n                )\n\n                if 'securityEngineId' in observer_context:\n                    engine_id = observer_context.get('securityEngineId')\n                    alert_source['engine_id'] = binascii.hexlify(\n                        engine_id.asOctets()).decode()\n            else:\n                community_string = encodeutils.to_utf8(\n                    cryptor.decode(alert_source['community_string']))\n                error_indication, __, __, __ = cmd_gen.getCmd(\n                    cmdgen.CommunityData(\n                        community_string,\n                        contextName=alert_source['context_name']),\n                    target,\n                    constants.SNMP_QUERY_OID,\n                )\n\n            cmd_gen.snmpEngine.transportDispatcher.closeDispatcher()\n\n            if not error_indication:\n                return alert_source\n\n            # Prepare exception with error_indication\n            msg = six.text_type(error_indication)\n        except Exception as e:\n            msg = six.text_type(e)\n\n        # Since validation occur error, raise exception\n        LOG.error(\"Configuration validation failed with alert source for \"\n                  \"reason: %s.\" % msg)\n        raise exception.SNMPConnectionFailed(msg)\n\n    def _handle_validation_result(self, ctxt, storage_id,\n                                  category=constants.Category.FAULT):\n        try:\n            storage = db.storage_get(ctxt, storage_id)\n            serial_number = storage.get('serial_number')\n            if category == constants.Category.FAULT:\n                self.snmp_error_flag[serial_number] = True\n                self._dispatch_snmp_validation_alert(ctxt, storage, category)\n            elif self.snmp_error_flag.get(serial_number, True):\n                self.snmp_error_flag[serial_number] = False\n                self._dispatch_snmp_validation_alert(ctxt, storage, category)\n        except Exception as e:\n            msg = six.text_type(e)\n            LOG.error(\"Exception occurred when handling validation \"\n                      \"error: %s .\" % msg)\n\n    def _dispatch_snmp_validation_alert(self, ctxt, storage, category):\n\n        alert = {\n            'storage_id': storage['id'],\n            'storage_name': storage['name'],\n            'vendor': storage['vendor'],\n            'model': storage['model'],\n            'serial_number': storage['serial_number'],\n            'alert_id': constants.SNMP_CONNECTION_FAILED_ALERT_ID,\n            'sequence_number': 0,\n            'alert_name': 'SNMP connect failed',\n            'category': category,\n            'severity': constants.Severity.MAJOR,\n            'type': constants.EventType.COMMUNICATIONS_ALARM,\n            'location': 'NetworkEntity=%s' % storage['name'],\n            'description': \"SNMP connection to the storage failed. \"\n                           \"SNMP traps from storage will not be received.\",\n            'recovery_advice': \"1. The network connection is abnormal. \"\n                               \"2. SNMP authentication parameters \"\n                               \"are invalid.\",\n            'occur_time': utils.utcnow_ms(),\n        }\n        self.exporter.dispatch(ctxt, alert)\n"
  },
  {
    "path": "delfin/alert_manager/trap_receiver.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\nfrom oslo_log import log\nfrom oslo_service import periodic_task\nfrom oslo_utils import encodeutils\nfrom pysnmp.carrier.asyncore.dgram import udp\nfrom pysnmp.entity import engine, config\nfrom pysnmp.entity.rfc3413 import ntfrcv\nfrom pysnmp.proto.api import v2c\nfrom pysnmp.smi import builder, view\nfrom retrying import retry\n\nfrom delfin import context, cryptor\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import manager\nfrom delfin.alert_manager import alert_processor\nfrom delfin.alert_manager import constants\nfrom delfin.alert_manager import rpcapi\nfrom delfin.alert_manager import snmp_validator\nfrom delfin.common import constants as common_constants\nfrom delfin.db import api as db_api\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass TrapReceiver(manager.Manager):\n    \"\"\"Trap listening and processing functions\"\"\"\n\n    RPC_API_VERSION = '1.0'\n\n    def __init__(self, service_name=None, *args, **kwargs):\n        self.mib_view_controller = kwargs.get('mib_view_controller')\n        self.snmp_engine = kwargs.get('snmp_engine')\n        self.trap_receiver_address = kwargs.get('trap_receiver_address')\n        self.trap_receiver_port = kwargs.get('trap_receiver_port')\n        self.alert_processor = alert_processor.AlertProcessor()\n        self.snmp_validator = snmp_validator.SNMPValidator()\n        self.alert_rpc_api = rpcapi.AlertAPI()\n        super(TrapReceiver, self).__init__(host=kwargs.get('host'))\n\n    def sync_snmp_config(self, ctxt, snmp_config_to_del=None,\n                         snmp_config_to_add=None):\n        if snmp_config_to_del:\n            self._delete_snmp_config(ctxt, snmp_config_to_del)\n\n        if snmp_config_to_add:\n            self.snmp_validator.validate(ctxt, snmp_config_to_add)\n            self._add_snmp_config(ctxt, snmp_config_to_add)\n\n    def _add_snmp_config(self, ctxt, new_config):\n        storage_id = new_config.get(\"storage_id\")\n        LOG.info(\"Start to add snmp trap config for storage: %s\",\n                 storage_id)\n        try:\n            version_int = self._get_snmp_version_int(ctxt,\n                                                     new_config.get(\"version\"))\n            if version_int == constants.SNMP_V2_INT or \\\n                    version_int == constants.SNMP_V1_INT:\n                community_string = cryptor.decode(\n                    new_config.get(\"community_string\"))\n                community_string = encodeutils.to_utf8(community_string)\n                community_index = self._get_community_index(storage_id)\n                config.addV1System(self.snmp_engine, community_index,\n                                   community_string,\n                                   contextName=community_string)\n            else:\n                username = new_config.get(\"username\")\n                engine_id = new_config.get(\"engine_id\")\n                if engine_id:\n                    engine_id = v2c.OctetString(hexValue=engine_id)\n\n                auth_key = new_config.get(\"auth_key\")\n                auth_protocol = new_config.get(\"auth_protocol\")\n                privacy_key = new_config.get(\"privacy_key\")\n                privacy_protocol = new_config.get(\"privacy_protocol\")\n                if auth_key:\n                    auth_key = encodeutils.to_utf8(cryptor.decode(auth_key))\n                if privacy_key:\n                    privacy_key = encodeutils.to_utf8(\n                        cryptor.decode(privacy_key))\n                config.addV3User(\n                    self.snmp_engine,\n                    userName=username,\n                    authKey=auth_key,\n                    privKey=privacy_key,\n                    authProtocol=self._get_usm_auth_protocol(ctxt,\n                                                             auth_protocol),\n                    privProtocol=self._get_usm_priv_protocol(ctxt,\n                                                             privacy_protocol),\n                    securityEngineId=engine_id)\n            LOG.info(\"Add snmp trap config for storage: %s successfully.\",\n                     storage_id)\n        except Exception as e:\n            msg = six.text_type(e)\n            LOG.error(\"Failed to add snmp trap config for storage: %s. \"\n                      \"Reason: %s\", storage_id, msg)\n            raise e\n\n    def _delete_snmp_config(self, ctxt, snmp_config):\n        LOG.info(\"Start to remove snmp trap config.\")\n        version_int = self._get_snmp_version_int(ctxt,\n                                                 snmp_config.get(\"version\"))\n        if version_int == constants.SNMP_V3_INT:\n            username = snmp_config.get('username')\n            engine_id = snmp_config.get('engine_id')\n            if engine_id:\n                engine_id = v2c.OctetString(hexValue=engine_id)\n            try:\n                config.delV3User(self.snmp_engine, userName=username,\n                                 securityEngineId=engine_id)\n            except Exception as e:\n                msg = six.text_type(e)\n                LOG.warning(\"Snmp trap configuration to be \"\n                            \"deleted could not be found. Reason: %s\", msg)\n        else:\n            storage_id = snmp_config.get('storage_id')\n            community_index = self._get_community_index(storage_id)\n            config.delV1System(self.snmp_engine, community_index)\n\n    def _get_community_index(self, storage_id):\n        return storage_id.replace('-', '')\n\n    def _get_snmp_version_int(self, ctxt, version):\n        _version = version.lower()\n        version_int = constants.VALID_SNMP_VERSIONS.get(_version)\n        if version_int is None:\n            msg = \"Invalid snmp version %s.\" % version\n            raise exception.InvalidSNMPConfig(msg)\n\n        return version_int\n\n    def _get_usm_auth_protocol(self, ctxt, auth_protocol):\n        if auth_protocol:\n            usm_auth_protocol = common_constants.AUTH_PROTOCOL_MAP \\\n                .get(auth_protocol.lower())\n            if usm_auth_protocol:\n                return usm_auth_protocol\n            else:\n                msg = \"Invalid auth_protocol %s.\" % auth_protocol\n                raise exception.InvalidSNMPConfig(msg)\n        else:\n            return config.usmNoAuthProtocol\n\n    def _get_usm_priv_protocol(self, ctxt, privacy_protocol):\n        if privacy_protocol:\n            usm_priv_protocol = common_constants.PRIVACY_PROTOCOL_MAP.get(\n                privacy_protocol.lower())\n            if usm_priv_protocol:\n                return usm_priv_protocol\n            else:\n                msg = \"Invalid privacy_protocol %s.\" % privacy_protocol\n                raise exception.InvalidSNMPConfig(msg)\n\n        return config.usmNoPrivProtocol\n\n    def _mib_builder(self):\n        \"\"\"Loads given set of mib files from given path.\"\"\"\n        mib_builder = builder.MibBuilder()\n        self.mib_view_controller = view.MibViewController(mib_builder)\n\n    def _add_transport(self):\n        \"\"\"Configures the transport parameters for the snmp engine.\"\"\"\n        try:\n            config.addTransport(\n                self.snmp_engine,\n                udp.domainName,\n                udp.UdpTransport().openServerMode(\n                    (self.trap_receiver_address, int(self.trap_receiver_port)))\n            )\n        except Exception as e:\n            LOG.error('Failed to add transport, error is %s'\n                      % six.text_type(e))\n            raise exception.DelfinException(message=six.text_type(e))\n\n    @staticmethod\n    def _get_alert_source_by_host(source_ip):\n        \"\"\"Gets alert source for given source ip address.\"\"\"\n        filters = {'host~': source_ip}\n        ctxt = context.RequestContext()\n\n        # Using the known filter and db exceptions are handled by api\n        alert_sources = db.alert_source_get_all(ctxt, filters=filters)\n        if not alert_sources:\n            raise exception.AlertSourceNotFoundWithHost(source_ip)\n\n        # This is to make sure unique host is configured each alert source\n        unique_alert_source = None\n        if len(alert_sources) > 1:\n            # Clear invalid alert_source\n            for alert_source in alert_sources:\n                try:\n                    db.storage_get(ctxt, alert_source['storage_id'])\n                except exception.StorageNotFound:\n                    LOG.warning('Found redundancy alert source for storage %s'\n                                % alert_source['storage_id'])\n                    try:\n                        db.alert_source_delete(\n                            ctxt, alert_source['storage_id'])\n                    except Exception as e:\n                        LOG.warning('Delete the invalid alert source failed, '\n                                    'reason is %s' % six.text_type(e))\n                else:\n                    unique_alert_source = alert_source\n        else:\n            unique_alert_source = alert_sources[0]\n\n        if unique_alert_source is None:\n            msg = (_(\"Failed to get unique alert source with host %s.\")\n                   % source_ip)\n            raise exception.InvalidResults(msg)\n\n        return unique_alert_source\n\n    def _cb_fun(self, state_reference, context_engine_id, context_name,\n                var_binds, cb_ctx):\n        \"\"\"Callback function to process the incoming trap.\"\"\"\n        exec_context = self.snmp_engine.observer.getExecutionContext(\n            'rfc3412.receiveMessage:request')\n        LOG.debug(\"Get notification from: %s\" %\n                  \"#\".join([str(x) for x in exec_context['transportAddress']]))\n        alert = {}\n\n        try:\n            # transportAddress contains both ip and port, extract ip address\n            source_ip = exec_context['transportAddress'][0]\n            alert_source = self._get_alert_source_by_host(source_ip)\n\n            # In case of non v3 version, community string is used to map the\n            # trap. Pysnmp library helps to filter traps whose community string\n            # are not configured. But if a given community name x is configured\n            # for storage1, if the trap is received with x from storage 2,\n            # library will allow the trap. So for non v3 version, we need to\n            # verify that community name is configured at alert source db for\n            # the storage which is sending traps.\n            # context_name contains the incoming community string value\n            if exec_context['securityModel'] != constants.SNMP_V3_INT \\\n                    and cryptor.decode(alert_source['community_string']) \\\n                    != str(context_name):\n                msg = (_(\"Community string not matching with alert source %s, \"\n                         \"dropping it.\") % source_ip)\n                raise exception.InvalidResults(msg)\n\n            for oid, val in var_binds:\n                # Fill raw oid and values\n                oid_str = str(oid)\n                alert[oid_str] = str(val)\n\n            # Fill additional info to alert info\n            alert['transport_address'] = source_ip\n            alert['storage_id'] = alert_source['storage_id']\n            filters = {'mgmt_ip': source_ip,\n                       'storage_id': alert_source['storage_id']}\n            ctxt = context.RequestContext()\n            controllers = db.controller_get_all(ctxt, filters=filters)\n            if controllers:\n                alert['controller_name'] = controllers[0].get('name')\n\n            # Handover to alert processor for model translation and export\n            self.alert_processor.process_alert_info(alert)\n        except exception.DelfinException as e:\n            # Log and end the trap processing error flow\n            err_msg = _(\"Failed to process alert report (%s).\") % e.msg\n            LOG.exception(err_msg)\n        except Exception as e:\n            err_msg = six.text_type(e)\n            LOG.exception(err_msg)\n\n    def _load_snmp_config(self):\n        \"\"\"Load snmp config from database when service start.\"\"\"\n        ctxt = context.get_admin_context()\n        marker = None\n        finished = False\n        limit = constants.DEFAULT_LIMIT\n        while not finished:\n            alert_sources = db_api.alert_source_get_all(ctxt, marker=marker,\n                                                        limit=limit)\n            for alert_source in alert_sources:\n                snmp_config = dict()\n                snmp_config.update(alert_source)\n                self._add_snmp_config(ctxt, snmp_config)\n                marker = alert_source['storage_id']\n            if len(alert_sources) < limit:\n                finished = True\n\n    @retry(stop_max_attempt_number=180, wait_random_min=4000,\n           wait_random_max=6000)\n    def start(self):\n        \"\"\"Starts the snmp trap receiver with necessary prerequisites.\"\"\"\n        snmp_engine = engine.SnmpEngine()\n        self.snmp_engine = snmp_engine\n\n        try:\n            # Load all the mibs and do snmp config\n            self._mib_builder()\n\n            self._load_snmp_config()\n\n            # Register callback for notification receiver\n            ntfrcv.NotificationReceiver(snmp_engine, self._cb_fun)\n\n            # Add transport info(ip, port) and start the listener\n            self._add_transport()\n\n            snmp_engine.transportDispatcher.jobStarted(\n                constants.SNMP_DISPATCHER_JOB_ID)\n        except Exception as e:\n            LOG.error(e)\n            raise ValueError(\"Failed to setup for trap listener.\")\n\n        try:\n            LOG.info(\"Starting trap receiver.\")\n            snmp_engine.transportDispatcher.runDispatcher()\n        except Exception:\n            snmp_engine.transportDispatcher.closeDispatcher()\n            raise ValueError(\"Failed to start trap listener.\")\n\n    def stop(self):\n        \"\"\"Brings down the snmp trap receiver.\"\"\"\n        # Go ahead with shutdown, ignore if any errors happening during the\n        # process as it is shutdown\n        if self.snmp_engine:\n            self.snmp_engine.transportDispatcher.closeDispatcher()\n        LOG.info(\"Trap receiver stopped.\")\n\n    @periodic_task.periodic_task(spacing=1800, run_immediately=True)\n    def heart_beat_task_spawn(self, ctxt):\n        \"\"\"Periodical task to spawn snmp heart beat check.\"\"\"\n        LOG.info(\"Spawn the snmp heart beat check task.\")\n        alert_source_list = db.alert_source_get_all(ctxt)\n        for alert_source in alert_source_list:\n            self.alert_rpc_api.check_snmp_config(ctxt, alert_source)\n\n    def check_snmp_config(self, ctxt, snmp_config):\n        LOG.info(\"Received snmp config checking request for \"\n                 \"storage: %s\", snmp_config['storage_id'])\n        self.snmp_validator.validate(ctxt, snmp_config)\n"
  },
  {
    "path": "delfin/api/__init__.py",
    "content": "# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport paste.urlmap\n\n\ndef root_app_factory(loader, global_conf, **local_conf):\n    return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)\n"
  },
  {
    "path": "delfin/api/api_utils.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 OpenStack Foundation\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport six\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_utils import strutils\n\nfrom delfin.common import constants\nfrom delfin import exception\nfrom delfin.i18n import _\n\napi_common_opts = [\n    cfg.IntOpt('api_max_limit',\n               default=1000,\n               help='The maximum number of items that a collection '\n                    'resource returns in a single response'),\n\n]\n\nCONF = cfg.CONF\nCONF.register_opts(api_common_opts)\n\nLOG = log.getLogger(__name__)\n\n\ndef remove_invalid_options(context, search_options, allowed_search_options):\n    \"\"\"Remove search options that are not valid for API/context.\"\"\"\n    unknown_options = [opt for opt in search_options\n                       if opt not in allowed_search_options]\n    bad_options = \", \".join(unknown_options)\n    LOG.debug(\"Removing options '%(bad_options)s' from query\",\n              {\"bad_options\": bad_options})\n    for opt in unknown_options:\n        del search_options[opt]\n\n\ndef validate_integer(value, name, min_value=None, max_value=None):\n    \"\"\"Make sure that value is a valid integer, potentially within range.\n\n    :param value: the value of the integer\n    :param name: the name of the integer\n    :param min_value: the min_value of the integer\n    :param max_value: the max_value of the integer\n    :returns: integer\n    \"\"\"\n    try:\n        value = strutils.validate_integer(value, name, min_value, max_value)\n        return value\n    except ValueError as e:\n        raise exception.InvalidInput(six.text_type(e))\n\n\ndef get_pagination_params(params, max_limit=None):\n    \"\"\"Return marker, limit, offset tuple from request.\n\n    :param params: `wsgi.Request`'s GET dictionary, possibly containing\n                   'marker',  'limit', and 'offset' variables. 'marker' is the\n                   id of the last element the client has seen, 'limit' is the\n                   maximum number of items to return and 'offset' is the number\n                   of items to skip from the marker or from the first element.\n                   If 'limit' is not specified, or > max_limit, we default to\n                   max_limit. Negative values for either offset or limit will\n                   cause delfin.InvalidInput() exceptions to be raised. If no\n                   offset is present we'll default to 0 and if no marker is\n                   present we'll default to None.\n    :param max_limit: Max value 'limit' return value can take\n    :returns: Tuple (marker, limit, offset)\n    \"\"\"\n    max_limit = max_limit or CONF.api_max_limit\n    limit = _get_limit_param(params, max_limit)\n    marker = _get_marker_param(params)\n    offset = _get_offset_param(params)\n    return marker, limit, offset\n\n\ndef _get_limit_param(params, max_limit=None):\n    \"\"\"Extract integer limit from request's dictionary or fail.\n\n   Defaults to max_limit if not present and returns max_limit if present\n   'limit' is greater than max_limit.\n    \"\"\"\n    max_limit = max_limit or CONF.osapi_max_limit\n    try:\n        limit = int(params.pop('limit', max_limit))\n    except ValueError:\n        msg = _('limit param must be an integer')\n        raise exception.InvalidInput(msg)\n    if limit < 0:\n        msg = _('limit param must be positive')\n        raise exception.InvalidInput(msg)\n    limit = min(limit, max_limit)\n    return limit\n\n\ndef _get_marker_param(params):\n    \"\"\"Extract marker id from request's dictionary (defaults to None).\"\"\"\n    return params.pop('marker', None)\n\n\ndef _get_offset_param(params):\n    \"\"\"Extract offset id from request's dictionary (defaults to 0) or fail.\"\"\"\n    offset = params.pop('offset', 0)\n    return validate_integer(offset,\n                            'offset',\n                            0,\n                            constants.DB_MAX_INT)\n\n\ndef get_sort_params(params, default_key='created_at', default_dir='desc'):\n    \"\"\"Retrieves sort keys/directions parameters.\n\n    Processes the parameters to create a list of sort keys and sort directions\n    that correspond to either the 'sort' parameter or the 'sort_key' and\n    'sort_dir' parameter values. The value of the 'sort' parameter is a comma-\n    separated list of sort keys, each key is optionally appended with\n    ':<sort_direction>'.\n\n    The sort parameters are removed from the request parameters by this\n    function.\n\n    :param params: query parameters in the request\n    :param default_key: default sort key value, added to the list if no\n                        sort keys are supplied\n    :param default_dir: default sort dir value, added to the list if the\n                        corresponding key does not have a direction\n                        specified\n    :returns: list of sort keys, list of sort dirs\n\n    \"\"\"\n\n    sort_keys = []\n    sort_dirs = []\n    if 'sort' in params:\n        for sort in params.pop('sort').strip().split(','):\n            sort_key, _sep, sort_dir = sort.partition(':')\n            if not sort_dir:\n                sort_dir = default_dir\n            sort_keys.append(sort_key.strip())\n            sort_dirs.append(sort_dir.strip())\n    else:\n        sort_key = params.pop('sort_key', default_key)\n        sort_dir = params.pop('sort_dir', default_dir)\n        sort_keys.append(sort_key.strip())\n        sort_dirs.append(sort_dir.strip())\n    return sort_keys, sort_dirs\n"
  },
  {
    "path": "delfin/api/common/__init__.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2013 OpenStack, LLC.\n#\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"\nWSGI middleware for OpenStack API controllers.\n\"\"\"\n\nfrom oslo_log import log\nfrom oslo_service import wsgi as base_wsgi\nimport routes\n\nfrom delfin.api.common import wsgi\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass APIMapper(routes.Mapper):\n    def routematch(self, url=None, environ=None):\n        if url == \"\":\n            result = self._match(\"\", environ)\n            return result[0], result[1]\n        return routes.Mapper.routematch(self, url, environ)\n\n    def connect(self, *args, **kwargs):\n        # NOTE(inhye): Default the format part of a route to only accept json\n        #             and xml so it doesn't eat all characters after a '.'\n        #             in the url.\n        kwargs.setdefault('requirements', {})\n        if not kwargs['requirements'].get('format'):\n            kwargs['requirements']['format'] = 'json|xml'\n        return routes.Mapper.connect(self, *args, **kwargs)\n\n\nclass ProjectMapper(APIMapper):\n    def resource(self, member_name, collection_name, **kwargs):\n        if 'parent_resource' not in kwargs:\n            kwargs['path_prefix'] = '/'\n        else:\n            parent_resource = kwargs['parent_resource']\n            p_collection = parent_resource['collection_name']\n            p_member = parent_resource['member_name']\n            kwargs['path_prefix'] = '/%s/:%s_id' % (p_collection, p_member)\n        routes.Mapper.resource(self,\n                               member_name,\n                               collection_name,\n                               **kwargs)\n\n\nclass APIRouter(base_wsgi.Router):\n    \"\"\"Routes requests on the API to the appropriate controller and method.\"\"\"\n    ExtensionManager = None  # override in subclasses\n\n    @classmethod\n    def factory(cls, global_config, **local_config):\n        \"\"\"Simple paste factory, :class:`delfin.wsgi.Router` doesn't have.\"\"\"\n        return cls()\n\n    def __init__(self, ext_mgr=None):\n        if ext_mgr is None:\n            if self.ExtensionManager:\n                # pylint: disable=not-callable\n                ext_mgr = self.ExtensionManager()\n            else:\n                raise Exception(_(\"Must specify an ExtensionManager class\"))\n\n        mapper = ProjectMapper()\n        self.resources = {}\n        self._setup_routes(mapper)\n        self._setup_ext_routes(mapper, ext_mgr)\n        self._setup_extensions(ext_mgr)\n        super(APIRouter, self).__init__(mapper)\n\n    def _setup_ext_routes(self, mapper, ext_mgr):\n        for resource in ext_mgr.get_resources():\n            LOG.debug('Extended resource: %s',\n                      resource.collection)\n\n            wsgi_resource = wsgi.Resource(resource.controller)\n            self.resources[resource.collection] = wsgi_resource\n            kargs = dict(\n                controller=wsgi_resource,\n                collection=resource.collection_actions,\n                member=resource.member_actions)\n\n            if resource.parent:\n                kargs['parent_resource'] = resource.parent\n\n            mapper.resource(resource.collection, resource.collection, **kargs)\n\n            if resource.custom_routes_fn:\n                resource.custom_routes_fn(mapper, wsgi_resource)\n\n    def _setup_extensions(self, ext_mgr):\n        for extension in ext_mgr.get_controller_extensions():\n            ext_name = extension.extension.name\n            collection = extension.collection\n            controller = extension.controller\n\n            if collection not in self.resources:\n                LOG.warning('Extension %(ext_name)s: Cannot extend '\n                            'resource %(collection)s: No such resource',\n                            {'ext_name': ext_name, 'collection': collection})\n                continue\n\n            LOG.debug('Extension %(ext_name)s extending resource: '\n                      '%(collection)s',\n                      {'ext_name': ext_name, 'collection': collection})\n\n            resource = self.resources[collection]\n            resource.register_actions(controller)\n            resource.register_extensions(controller)\n\n    def _setup_routes(self, mapper):\n        raise NotImplementedError\n"
  },
  {
    "path": "delfin/api/common/wsgi.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2011 OpenStack LLC.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport inspect\n\nfrom oslo_log import log\nfrom oslo_serialization import jsonutils\nimport six\nimport webob\nimport webob.exc\n\nfrom delfin import exception\nfrom delfin.i18n import _\nfrom delfin.wsgi import common as wsgi\n\nLOG = log.getLogger(__name__)\n\nSUPPORTED_CONTENT_TYPES = (\n    'application/json',\n)\n\n_MEDIA_TYPE_MAP = {\n    'application/json': 'json',\n}\n\n\nclass Request(webob.Request):\n    \"\"\"Add some OpenStack API-specific logic to the base webob.Request.\"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super(Request, self).__init__(*args, **kwargs)\n        self._resource_cache = {}\n\n    def cache_resource(self, resource_to_cache, id_attribute='id', name=None):\n        \"\"\"Cache the given resource.\n\n        Allow API methods to cache objects, such as results from a DB query,\n        to be used by API extensions within the same API request.\n\n        The resource_to_cache can be a list or an individual resource,\n        but ultimately resources are cached individually using the given\n        id_attribute.\n\n        Different resources types might need to be cached during the same\n        request, they can be cached using the name parameter. For example:\n\n            Controller 1:\n                request.cache_resource(db_volumes, 'volumes')\n                request.cache_resource(db_volume_types, 'types')\n            Controller 2:\n                db_volumes = request.cached_resource('volumes')\n                db_type_1 = request.cached_resource_by_id('1', 'types')\n\n        If no name is given, a default name will be used for the resource.\n\n        An instance of this class only lives for the lifetime of a\n        single API request, so there's no need to implement full\n        cache management.\n        \"\"\"\n        if not isinstance(resource_to_cache, list):\n            resource_to_cache = [resource_to_cache]\n        if not name:\n            name = self.path\n        cached_resources = self._resource_cache.setdefault(name, {})\n        for resource in resource_to_cache:\n            cached_resources[resource[id_attribute]] = resource\n\n    def cached_resource(self, name=None):\n        \"\"\"Get the cached resources cached under the given resource name.\n\n        Allow an API extension to get previously stored objects within\n        the same API request.\n\n        Note that the object data will be slightly stale.\n\n        :returns: a dict of id_attribute to the resource from the cached\n                  resources, an empty map if an empty collection was cached,\n                  or None if nothing has been cached yet under this name\n        \"\"\"\n        if not name:\n            name = self.path\n        if name not in self._resource_cache:\n            # Nothing has been cached for this key yet\n            return None\n        return self._resource_cache[name]\n\n    def cached_resource_by_id(self, resource_id, name=None):\n        \"\"\"Get a resource by ID cached under the given resource name.\n\n        Allow an API extension to get a previously stored object\n        within the same API request. This is basically a convenience method\n        to lookup by ID on the dictionary of all cached resources.\n\n        Note that the object data will be slightly stale.\n\n        :returns: the cached resource or None if the item is not in the cache\n        \"\"\"\n        resources = self.cached_resource(name)\n        if not resources:\n            # Nothing has been cached yet for this key yet\n            return None\n        return resources.get(resource_id)\n\n    def cache_db_items(self, key, items, item_key='id'):\n        \"\"\"Cache db items.\n\n        Allow API methods to store objects from a DB query to be\n        used by API extensions within the same API request.\n        An instance of this class only lives for the lifetime of a\n        single API request, so there's no need to implement full\n        cache management.\n        \"\"\"\n        self.cache_resource(items, item_key, key)\n\n    def get_db_items(self, key):\n        \"\"\"Get db item by key.\n\n        Allow an API extension to get previously stored objects within\n        the same API request.\n        Note that the object data will be slightly stale.\n        \"\"\"\n        return self.cached_resource(key)\n\n    def get_db_item(self, key, item_key):\n        \"\"\"Get db item by key and item key.\n\n        Allow an API extension to get a previously stored object\n        within the same API request.\n        Note that the object data will be slightly stale.\n        \"\"\"\n        return self.get_db_items(key).get(item_key)\n\n    def cache_db_share_types(self, share_types):\n        self.cache_db_items('share_types', share_types, 'id')\n\n    def cache_db_share_type(self, share_type):\n        self.cache_db_items('share_types', [share_type], 'id')\n\n    def get_db_share_types(self):\n        return self.get_db_items('share_types')\n\n    def get_db_share_type(self, share_type_id):\n        return self.get_db_item('share_types', share_type_id)\n\n    def best_match_content_type(self):\n        \"\"\"Determine the requested response content-type.\"\"\"\n        if 'delfin.best_content_type' not in self.environ:\n            # Calculate the best MIME type\n            content_type = None\n\n            # Check URL path suffix\n            parts = self.path.rsplit('.', 1)\n            if len(parts) > 1:\n                possible_type = 'application/' + parts[1]\n                if possible_type in SUPPORTED_CONTENT_TYPES:\n                    content_type = possible_type\n\n            if not content_type:\n                content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)\n\n            self.environ['delfin.best_content_type'] = (content_type or\n                                                        'application/json')\n\n        return self.environ['delfin.best_content_type']\n\n    def get_content_type(self):\n        \"\"\"Determine content type of the request body.\n\n        Does not do any body introspection, only checks header.\n        \"\"\"\n        if \"Content-Type\" not in self.headers:\n            return None\n\n        allowed_types = SUPPORTED_CONTENT_TYPES\n        content_type = self.content_type\n\n        if content_type not in allowed_types:\n            raise exception.InvalidContentType(content_type)\n\n        return content_type\n\n\nclass ActionDispatcher(object):\n    \"\"\"Maps method name to local methods through action name.\"\"\"\n\n    def dispatch(self, *args, **kwargs):\n        \"\"\"Find and call local method.\"\"\"\n        action = kwargs.pop('action', 'default')\n        action_method = getattr(self, six.text_type(action), self.default)\n        return action_method(*args, **kwargs)\n\n    def default(self, data):\n        raise NotImplementedError()\n\n\nclass TextDeserializer(ActionDispatcher):\n    \"\"\"Default request body deserialization.\"\"\"\n\n    def deserialize(self, datastring, action='default'):\n        return self.dispatch(datastring, action=action)\n\n    def default(self, datastring):\n        return {}\n\n\nclass JSONDeserializer(TextDeserializer):\n\n    def _from_json(self, datastring):\n        try:\n            return jsonutils.loads(datastring)\n        except ValueError:\n            msg = _(\"cannot understand JSON\")\n            raise exception.MalformedRequestBody(msg)\n\n    def default(self, datastring):\n        return {'body': self._from_json(datastring)}\n\n\nclass DictSerializer(ActionDispatcher):\n    \"\"\"Default request body serialization.\"\"\"\n\n    def serialize(self, data, action='default'):\n        return self.dispatch(data, action=action)\n\n    def default(self, data):\n        return \"\"\n\n\nclass JSONDictSerializer(DictSerializer):\n    \"\"\"Default JSON request body serialization.\"\"\"\n\n    def default(self, data):\n        return six.b(jsonutils.dumps(data))\n\n\ndef serializers(**serializers):\n    \"\"\"Attaches serializers to a method.\n\n    This decorator associates a dictionary of serializers with a\n    method.  Note that the function attributes are directly\n    manipulated; the method is not wrapped.\n    \"\"\"\n\n    def decorator(func):\n        if not hasattr(func, 'wsgi_serializers'):\n            func.wsgi_serializers = {}\n        func.wsgi_serializers.update(serializers)\n        return func\n\n    return decorator\n\n\ndef deserializers(**deserializers):\n    \"\"\"Attaches deserializers to a method.\n\n    This decorator associates a dictionary of deserializers with a\n    method.  Note that the function attributes are directly\n    manipulated; the method is not wrapped.\n    \"\"\"\n\n    def decorator(func):\n        if not hasattr(func, 'wsgi_deserializers'):\n            func.wsgi_deserializers = {}\n        func.wsgi_deserializers.update(deserializers)\n        return func\n\n    return decorator\n\n\ndef response(code):\n    \"\"\"Attaches response code to a method.\n\n    This decorator associates a response code with a method.  Note\n    that the function attributes are directly manipulated; the method\n    is not wrapped.\n    \"\"\"\n\n    def decorator(func):\n        func.wsgi_code = code\n        return func\n\n    return decorator\n\n\nclass ResponseObject(object):\n    \"\"\"Bundles a response object with appropriate serializers.\n\n    Object that app methods may return in order to bind alternate\n    serializers with a response object to be serialized.  Its use is\n    optional.\n    \"\"\"\n\n    def __init__(self, obj, code=None, headers=None, **serializers):\n        \"\"\"Binds serializers with an object.\n\n        Takes keyword arguments akin to the @serializer() decorator\n        for specifying serializers.  Serializers specified will be\n        given preference over default serializers or method-specific\n        serializers on return.\n        \"\"\"\n\n        self.obj = obj\n        self.serializers = serializers\n        self._default_code = 200\n        self._code = code\n        self._headers = headers or {}\n        self.serializer = None\n        self.media_type = None\n\n    def __getitem__(self, key):\n        \"\"\"Retrieves a header with the given name.\"\"\"\n\n        return self._headers[key.lower()]\n\n    def __setitem__(self, key, value):\n        \"\"\"Sets a header with the given name to the given value.\"\"\"\n\n        self._headers[key.lower()] = value\n\n    def __delitem__(self, key):\n        \"\"\"Deletes the header with the given name.\"\"\"\n\n        del self._headers[key.lower()]\n\n    def _bind_method_serializers(self, meth_serializers):\n        \"\"\"Binds method serializers with the response object.\n\n        Binds the method serializers with the response object.\n        Serializers specified to the constructor will take precedence\n        over serializers specified to this method.\n\n        :param meth_serializers: A dictionary with keys mapping to\n                                 response types and values containing\n                                 serializer objects.\n        \"\"\"\n\n        # We can't use update because that would be the wrong\n        # precedence\n        for mtype, serializer in meth_serializers.items():\n            self.serializers.setdefault(mtype, serializer)\n\n    def get_serializer(self, content_type, default_serializers=None):\n        \"\"\"Returns the serializer for the wrapped object.\n\n        Returns the serializer for the wrapped object subject to the\n        indicated content type.  If no serializer matching the content\n        type is attached, an appropriate serializer drawn from the\n        default serializers will be used.  If no appropriate\n        serializer is available, raises InvalidContentType.\n        \"\"\"\n\n        default_serializers = default_serializers or {}\n\n        try:\n            mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)\n            if mtype in self.serializers:\n                return mtype, self.serializers[mtype]\n            else:\n                return mtype, default_serializers[mtype]\n        except (KeyError, TypeError):\n            raise exception.InvalidContentType(content_type)\n\n    def preserialize(self, content_type, default_serializers=None):\n        \"\"\"Prepares the serializer that will be used to serialize.\n\n        Determines the serializer that will be used and prepares an\n        instance of it for later call.  This allows the serializer to\n        be accessed by extensions for, e.g., template extension.\n        \"\"\"\n\n        mtype, serializer = self.get_serializer(content_type,\n                                                default_serializers)\n        self.media_type = mtype\n        self.serializer = serializer()\n\n    def attach(self, **kwargs):\n        \"\"\"Attach slave templates to serializers.\"\"\"\n\n        if self.media_type in kwargs:\n            self.serializer.attach(kwargs[self.media_type])\n\n    def serialize(self, request, content_type, default_serializers=None):\n        \"\"\"Serializes the wrapped object.\n\n        Utility method for serializing the wrapped object.  Returns a\n        webob.Response object.\n        \"\"\"\n\n        if self.serializer:\n            serializer = self.serializer\n        else:\n            _mtype, _serializer = self.get_serializer(content_type,\n                                                      default_serializers)\n            serializer = _serializer()\n\n        response = webob.Response()\n        response.status_int = self.code\n        for hdr, value in self._headers.items():\n            response.headers[hdr] = six.text_type(value)\n        response.headers['Content-Type'] = six.text_type(content_type)\n        if self.obj is not None:\n            response.body = serializer.serialize(self.obj)\n\n        return response\n\n    @property\n    def code(self):\n        \"\"\"Retrieve the response status.\"\"\"\n\n        return self._code or self._default_code\n\n    @property\n    def headers(self):\n        \"\"\"Retrieve the headers.\"\"\"\n\n        return self._headers.copy()\n\n\ndef action_peek_json(body):\n    \"\"\"Determine action to invoke.\"\"\"\n\n    try:\n        decoded = jsonutils.loads(body)\n    except ValueError:\n        msg = _(\"cannot understand JSON\")\n        raise exception.MalformedRequestBody(msg)\n\n    # Make sure there's exactly one key...\n    if len(decoded) != 1:\n        msg = _(\"too many body keys\")\n        raise exception.MalformedRequestBody(msg)\n\n    # Return the action and the decoded body...\n    return list(decoded.keys())[0]\n\n\nclass ResourceExceptionHandler(object):\n    \"\"\"Context manager to handle Resource exceptions.\n\n    Used when processing exceptions generated by API implementation\n    methods (or their extensions).  Converts most exceptions to Fault\n    exceptions, with the appropriate logging.\n    \"\"\"\n\n    def __enter__(self):\n        return None\n\n    def __exit__(self, ex_type, ex_value, ex_traceback):\n        if not ex_value:\n            return True\n\n        if isinstance(ex_value, exception.DelfinException):\n            raise Fault(exception.ConvertedException(ex_value))\n        elif isinstance(ex_value, TypeError):\n            exc_info = (ex_type, ex_value, ex_traceback)\n            LOG.error('Exception handling resource: %s',\n                      ex_value, exc_info=exc_info)\n            exc = exception.BadRequest()\n            raise Fault(exception.ConvertedException(exc))\n        elif isinstance(ex_value, Fault):\n            LOG.info(\"Fault thrown: %s\", ex_value)\n            raise ex_value\n        elif isinstance(ex_value, webob.exc.HTTPException):\n            LOG.info(\"HTTP exception thrown: %s\", ex_value)\n            raise Fault(ex_value)\n\n        # We didn't handle the exception\n        return False\n\n\nclass Resource(wsgi.Application):\n    \"\"\"WSGI app that handles (de)serialization and controller dispatch.\n\n    WSGI app that reads routing information supplied by RoutesMiddleware\n    and calls the requested action method upon its controller.  All\n    controller action methods must accept a 'req' argument, which is the\n    incoming wsgi.Request. If the operation is a PUT or POST, the controller\n    method must also accept a 'body' argument (the deserialized request body).\n    They may raise a webob.exc exception or return a dict, which will be\n    serialized by requested content type.\n\n    Exceptions derived from webob.exc.HTTPException will be automatically\n    wrapped in Fault() to provide API friendly error responses.\n    \"\"\"\n    support_api_request_version = True\n\n    def __init__(self, controller, action_peek=None, **deserializers):\n        \"\"\"init method of Resource.\n\n        :param controller: object that implement methods created by routes lib\n        :param action_peek: dictionary of routines for peeking into an action\n                            request body to determine the desired action\n        \"\"\"\n\n        self.controller = controller\n\n        default_deserializers = dict(json=JSONDeserializer)\n        default_deserializers.update(deserializers)\n\n        self.default_deserializers = default_deserializers\n        self.default_serializers = dict(json=JSONDictSerializer)\n\n        self.action_peek = dict(json=action_peek_json)\n        self.action_peek.update(action_peek or {})\n\n        # Copy over the actions dictionary\n        self.wsgi_actions = {}\n        if controller:\n            self.register_actions(controller)\n\n        # Save a mapping of extensions\n        self.wsgi_extensions = {}\n        self.wsgi_action_extensions = {}\n\n    def register_actions(self, controller):\n        \"\"\"Registers controller actions with this resource.\"\"\"\n\n        actions = getattr(controller, 'wsgi_actions', {})\n        for key, method_name in actions.items():\n            self.wsgi_actions[key] = getattr(controller, method_name)\n\n    def register_extensions(self, controller):\n        \"\"\"Registers controller extensions with this resource.\"\"\"\n\n        extensions = getattr(controller, 'wsgi_extensions', [])\n        for method_name, action_name in extensions:\n            # Look up the extending method\n            extension = getattr(controller, method_name)\n\n            if action_name:\n                # Extending an action...\n                if action_name not in self.wsgi_action_extensions:\n                    self.wsgi_action_extensions[action_name] = []\n                self.wsgi_action_extensions[action_name].append(extension)\n            else:\n                # Extending a regular method\n                if method_name not in self.wsgi_extensions:\n                    self.wsgi_extensions[method_name] = []\n                self.wsgi_extensions[method_name].append(extension)\n\n    def get_action_args(self, request_environment):\n        \"\"\"Parse dictionary created by routes library.\"\"\"\n\n        # NOTE(Vek): Check for get_action_args() override in the\n        # controller\n        if hasattr(self.controller, 'get_action_args'):\n            return self.controller.get_action_args(request_environment)\n\n        try:\n            args = request_environment['wsgiorg.routing_args'][1].copy()\n        except (KeyError, IndexError, AttributeError):\n            return {}\n\n        try:\n            del args['controller']\n        except KeyError:\n            pass\n\n        try:\n            del args['format']\n        except KeyError:\n            pass\n\n        return args\n\n    def get_body(self, request):\n        try:\n            content_type = request.get_content_type()\n        except exception.InvalidContentType:\n            LOG.debug(\"Unrecognized Content-Type provided in request\")\n            return None, ''\n\n        if not content_type:\n            LOG.debug(\"No Content-Type provided in request\")\n            return None, ''\n\n        if len(request.body) <= 0:\n            LOG.debug(\"Empty body provided in request\")\n            return None, ''\n\n        return content_type, request.body\n\n    def deserialize(self, meth, content_type, body):\n        meth_deserializers = getattr(meth, 'wsgi_deserializers', {})\n        try:\n            mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)\n            if mtype in meth_deserializers:\n                deserializer = meth_deserializers[mtype]\n            else:\n                deserializer = self.default_deserializers[mtype]\n        except (KeyError, TypeError):\n            raise exception.InvalidContentType(content_type)\n\n        return deserializer().deserialize(body)\n\n    def pre_process_extensions(self, extensions, request, action_args):\n        # List of callables for post-processing extensions\n        post = []\n\n        for ext in extensions:\n            if inspect.isgeneratorfunction(ext):\n                response = None\n\n                # If it's a generator function, the part before the\n                # yield is the preprocessing stage\n                try:\n                    with ResourceExceptionHandler():\n                        gen = ext(req=request, **action_args)\n                        response = next(gen)\n                except Fault as ex:\n                    response = ex\n\n                # We had a response...\n                if response:\n                    return response, []\n\n                # No response, queue up generator for post-processing\n                post.append(gen)\n            else:\n                # Regular functions only perform post-processing\n                post.append(ext)\n\n        # Run post-processing in the reverse order\n        return None, reversed(post)\n\n    def post_process_extensions(self, extensions, resp_obj, request,\n                                action_args):\n        for ext in extensions:\n            response = None\n            if inspect.isgenerator(ext):\n                # If it's a generator, run the second half of\n                # processing\n                try:\n                    with ResourceExceptionHandler():\n                        response = ext.send(resp_obj)\n                except StopIteration:\n                    # Normal exit of generator\n                    continue\n                except Fault as ex:\n                    response = ex\n            else:\n                # Regular functions get post-processing...\n                try:\n                    with ResourceExceptionHandler():\n                        response = ext(req=request, resp_obj=resp_obj,\n                                       **action_args)\n                except Fault as ex:\n                    response = ex\n\n            # We had a response...\n            if response:\n                return response\n\n        return None\n\n    @webob.dec.wsgify(RequestClass=Request)\n    def __call__(self, request):\n        \"\"\"WSGI method that controls (de)serialization and method dispatch.\"\"\"\n\n        LOG.info(\"%(method)s %(url)s\", {\"method\": request.method,\n                                        \"url\": request.url})\n\n        # Identify the action, its arguments, and the requested\n        # content type\n        action_args = self.get_action_args(request.environ)\n        action = action_args.pop('action', None)\n        content_type, body = self.get_body(request)\n        accept = request.best_match_content_type()\n\n        # NOTE(Vek): Splitting the function up this way allows for\n        #            auditing by external tools that wrap the existing\n        #            function.  If we try to audit __call__(), we can\n        #            run into troubles due to the @webob.dec.wsgify()\n        #            decorator.\n        return self._process_stack(request, action, action_args,\n                                   content_type, body, accept)\n\n    def _process_stack(self, request, action, action_args,\n                       content_type, body, accept):\n        \"\"\"Implement the processing stack.\"\"\"\n\n        # Get the implementing method\n        try:\n            meth, extensions = self.get_method(request, action,\n                                               content_type, body)\n        except (AttributeError, TypeError):\n            ex = exception.ConvertedException(exception.NotFound())\n            return Fault(ex)\n        except KeyError as ex:\n            ex = exception.ConvertedException(\n                exception.NoSuchAction(ex.args[0]))\n            return Fault(ex)\n        except exception.MalformedRequestBody as ex:\n            ex = exception.ConvertedException(ex)\n            return Fault(ex)\n\n        # Now, deserialize the request body...\n        try:\n            if content_type:\n                contents = self.deserialize(meth, content_type, body)\n            else:\n                contents = {}\n        except exception.InvalidContentType as ex:\n            ex = exception.ConvertedException(ex)\n            return Fault(ex)\n        except exception.MalformedRequestBody as ex:\n            ex = exception.ConvertedException(ex)\n            return Fault(ex)\n\n        # Update the action args\n        action_args.update(contents)\n\n        project_id = action_args.pop(\"project_id\", None)\n        context = request.environ.get('delfin.context')\n        if (context and project_id and (project_id != context.project_id)):\n            ex = exception.ConvertedException(exception.MalformedRequestUrl())\n            return Fault(ex)\n\n        # Run pre-processing extensions\n        response, post = self.pre_process_extensions(extensions,\n                                                     request, action_args)\n\n        if not response:\n            try:\n                with ResourceExceptionHandler():\n                    action_result = self.dispatch(meth, request, action_args)\n            except Fault as ex:\n                response = ex\n\n        if not response:\n            # No exceptions; convert action_result into a\n            # ResponseObject\n            resp_obj = None\n            if type(action_result) is dict or action_result is None:\n                resp_obj = ResponseObject(action_result)\n            elif isinstance(action_result, ResponseObject):\n                resp_obj = action_result\n            else:\n                response = action_result\n\n            # Run post-processing extensions\n            if resp_obj:\n                _set_request_id_header(request, resp_obj)\n                # Do a preserialize to set up the response object\n                serializers = getattr(meth, 'wsgi_serializers', {})\n                resp_obj._bind_method_serializers(serializers)\n                if hasattr(meth, 'wsgi_code'):\n                    resp_obj._default_code = meth.wsgi_code\n                resp_obj.preserialize(accept, self.default_serializers)\n\n                # Process post-processing extensions\n                response = self.post_process_extensions(post, resp_obj,\n                                                        request, action_args)\n\n            if resp_obj and not response:\n                response = resp_obj.serialize(request, accept,\n                                              self.default_serializers)\n\n        try:\n            msg_dict = dict(url=request.url, status=response.status_int)\n            msg = _(\"%(url)s returned with HTTP %(status)s\") % msg_dict\n        except AttributeError as e:\n            msg_dict = dict(url=request.url, e=e)\n            msg = _(\"%(url)s returned a fault: %(e)s\") % msg_dict\n\n        LOG.info(msg)\n        return response\n\n    def get_method(self, request, action, content_type, body):\n        \"\"\"Look up the action-specific method and its extensions.\"\"\"\n\n        # Look up the method\n        try:\n            if not self.controller:\n                meth = getattr(self, action)\n            else:\n                meth = getattr(self.controller, action)\n        except AttributeError:\n            if (not self.wsgi_actions or\n                    action not in ['action', 'create', 'delete']):\n                # Propagate the error\n                raise\n        else:\n            return meth, self.wsgi_extensions.get(action, [])\n\n        if action == 'action':\n            # OK, it's an action; figure out which action...\n            mtype = _MEDIA_TYPE_MAP.get(content_type)\n            action_name = self.action_peek[mtype](body)\n            LOG.debug(\"Action body: %s\", body)\n        else:\n            action_name = action\n\n        # Look up the action method\n        return (self.wsgi_actions[action_name],\n                self.wsgi_action_extensions.get(action_name, []))\n\n    def dispatch(self, method, request, action_args):\n        \"\"\"Dispatch a call to the action-specific method.\"\"\"\n\n        return method(req=request, **action_args)\n\n\ndef action(name):\n    \"\"\"Mark a function as an action.\n\n    The given name will be taken as the action key in the body.\n\n    This is also overloaded to allow extensions to provide\n    non-extending definitions of create and delete operations.\n    \"\"\"\n\n    def decorator(func):\n        func.wsgi_action = name\n        return func\n\n    return decorator\n\n\ndef extends(*args, **kwargs):\n    \"\"\"Indicate a function extends an operation.\n\n    Can be used as either::\n\n        @extends\n        def index(...):\n            pass\n\n    or as::\n\n        @extends(action='resize')\n        def _action_resize(...):\n            pass\n    \"\"\"\n\n    def decorator(func):\n        # Store enough information to find what we're extending\n        func.wsgi_extends = (func.__name__, kwargs.get('action'))\n        return func\n\n    # If we have positional arguments, call the decorator\n    if args:\n        return decorator(*args)\n\n    # OK, return the decorator instead\n    return decorator\n\n\nclass ControllerMetaclass(type):\n    \"\"\"Controller metaclass.\n\n    This metaclass automates the task of assembling a dictionary\n    mapping action keys to method names.\n    \"\"\"\n\n    def __new__(mcs, name, bases, cls_dict):\n        \"\"\"Adds the wsgi_actions dictionary to the class.\"\"\"\n\n        # Find all actions\n        actions = {}\n        extensions = []\n        # start with wsgi actions from base classes\n        for base in bases:\n            actions.update(getattr(base, 'wsgi_actions', {}))\n\n        for key, value in cls_dict.items():\n            if not callable(value):\n                continue\n            if getattr(value, 'wsgi_action', None):\n                actions[value.wsgi_action] = key\n            elif getattr(value, 'wsgi_extends', None):\n                extensions.append(value.wsgi_extends)\n\n        # Add the actions and extensions to the class dict\n        cls_dict['wsgi_actions'] = actions\n        cls_dict['wsgi_extensions'] = extensions\n\n        return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,\n                                                       cls_dict)\n\n\n@six.add_metaclass(ControllerMetaclass)\nclass Controller(object):\n    \"\"\"Default controller.\"\"\"\n\n    _view_builder_class = None\n\n    def __init__(self, view_builder=None):\n        \"\"\"Initialize controller with a view builder instance.\"\"\"\n        if view_builder:\n            self._view_builder = view_builder\n        elif self._view_builder_class:\n            # pylint: disable=not-callable\n            self._view_builder = self._view_builder_class()\n        else:\n            self._view_builder = None\n\n    @staticmethod\n    def is_valid_body(body, entity_name):\n        if not (body and entity_name in body):\n            return False\n\n        def is_dict(d):\n            try:\n                d.get(None)\n                return True\n            except AttributeError:\n                return False\n\n        if not is_dict(body[entity_name]):\n            return False\n\n        return True\n\n\nclass Fault(webob.exc.HTTPException):\n    \"\"\"Wrap webob.exc.HTTPException to provide API friendly response.\"\"\"\n\n    def __init__(self, exception):\n        \"\"\"Create a Fault for the given webob.exc.exception.\"\"\"\n        self.wrapped_exc = exception\n        self.status_int = exception.status_int\n\n    @webob.dec.wsgify(RequestClass=Request)\n    def __call__(self, req):\n        \"\"\"Generate a WSGI response based on the exception passed to ctor.\"\"\"\n        # Replace the body with fault details.\n        status_code = self.wrapped_exc.status_int\n        fault_data = {\n            'error_code': self.wrapped_exc.error_code,\n            'error_msg': self.wrapped_exc.explanation,\n            'error_args': self.wrapped_exc.error_args}\n        LOG.info(\"Exception response code: %(code)s, reason: %(reason)s\",\n                 {'code': status_code, 'reason': fault_data})\n        if status_code == 413:\n            retry = self.wrapped_exc.headers['Retry-After']\n            fault_data['retryAfter'] = '%s' % retry\n\n        content_type = req.best_match_content_type()\n        serializer = {\n            'application/json': JSONDictSerializer(),\n        }[content_type]\n\n        self.wrapped_exc.body = serializer.serialize(fault_data)\n        self.wrapped_exc.content_type = content_type\n        _set_request_id_header(req, self.wrapped_exc.headers)\n\n        return self.wrapped_exc\n\n    def __str__(self):\n        return self.wrapped_exc.__str__()\n\n\ndef _set_request_id_header(req, headers):\n    context = req.environ.get('delfin.context')\n    if context:\n        headers['x-compute-request-id'] = context.request_id\n"
  },
  {
    "path": "delfin/api/contrib/__init__.py",
    "content": "# Copyright 2011 Justin Santa Barbara\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Contrib contains extensions that are shipped with delfin.\n\nIt can't be called 'extensions' because that causes namespacing problems.\n\n\"\"\"\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom delfin.api import extensions\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\n\n\ndef standard_extensions(ext_mgr):\n    extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__)\n\n\ndef select_extensions(ext_mgr):\n    extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__,\n                                        CONF.delfin_api_ext_list)\n"
  },
  {
    "path": "delfin/api/extensions.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2011 OpenStack LLC.\n# Copyright 2011 Justin Santa Barbara\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport os\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_utils import importutils\n\nfrom delfin.api.common import wsgi\nfrom delfin import exception\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\n\n\nclass ExtensionDescriptor(object):\n    \"\"\"Base class that defines the contract for extensions.\n\n    Note that you don't have to derive from this class to have a valid\n    extension; it is purely a convenience.\n\n    \"\"\"\n\n    # The name of the extension, e.g., 'Fox In Socks'\n    name = None\n\n    # The alias for the extension, e.g., 'FOXNSOX'\n    alias = None\n\n    # Description comes from the docstring for the class\n\n    # The timestamp when the extension was last updated, e.g.,\n    # '2011-01-22T13:25:27-06:00'\n    updated = None\n\n    def __init__(self, ext_mgr):\n        \"\"\"Register extension with the extension manager.\"\"\"\n\n        ext_mgr.register(self)\n        self.ext_mgr = ext_mgr\n\n    def get_resources(self):\n        \"\"\"List of extensions.ResourceExtension extension objects.\n\n        Resources define new nouns, and are accessible through URLs.\n\n        \"\"\"\n        resources = []\n        return resources\n\n    def get_controller_extensions(self):\n        \"\"\"List of extensions.ControllerExtension extension objects.\n\n        Controller extensions are used to extend existing controllers.\n        \"\"\"\n        controller_exts = []\n        return controller_exts\n\n\nclass ExtensionsResource(wsgi.Resource):\n\n    def __init__(self, extension_manager):\n        self.extension_manager = extension_manager\n        super(ExtensionsResource, self).__init__(None)\n\n    def _translate(self, ext):\n        ext_data = {}\n        ext_data['name'] = ext.name\n        ext_data['alias'] = ext.alias\n        ext_data['description'] = ext.__doc__\n        ext_data['updated'] = ext.updated\n        ext_data['links'] = []  # TODO(dprince): implement extension links\n        return ext_data\n\n    def index(self, req):\n        extensions = []\n        for _alias, ext in self.extension_manager.extensions.items():\n            extensions.append(self._translate(ext))\n        return dict(extensions=extensions)\n\n    def show(self, req, id):\n        try:\n            # NOTE(dprince): the extensions alias is used as the 'id' for show\n            ext = self.extension_manager.extensions[id]\n        except KeyError:\n            raise exception.NotFound()\n\n        return dict(extension=self._translate(ext))\n\n    def delete(self, req, id):\n        raise exception.NotFound()\n\n    def create(self, req):\n        raise exception.NotFound()\n\n\nclass ExtensionManager(object):\n    \"\"\"Load extensions from the configured extension path.\n\n    See delfin/tests/api/extensions/foxinsocks/extension.py for an\n    example extension implementation.\n\n    \"\"\"\n\n    def __init__(self):\n        LOG.info('Initializing extension manager.')\n\n        self.cls_list = CONF.delfin_api_extension\n\n        self.extensions = {}\n        self._load_extensions()\n\n    def register(self, ext):\n        # Do nothing if the extension doesn't check out\n        if not self._check_extension(ext):\n            return\n\n        alias = ext.alias\n        LOG.info('Loaded extension: %s', alias)\n\n        if alias in self.extensions:\n            raise exception.DuplicateExtension(alias)\n        self.extensions[alias] = ext\n\n    def get_resources(self):\n        \"\"\"Returns a list of ResourceExtension objects.\"\"\"\n\n        resources = []\n        resources.append(ResourceExtension('extensions',\n                                           ExtensionsResource(self)))\n\n        for ext in self.extensions.values():\n            try:\n                resources.extend(ext.get_resources())\n            except AttributeError:\n                # NOTE(dprince): Extension aren't required to have resource\n                # extensions\n                pass\n        return resources\n\n    def get_controller_extensions(self):\n        \"\"\"Returns a list of ControllerExtension objects.\"\"\"\n        controller_exts = []\n        for ext in self.extensions.values():\n            try:\n                get_ext_method = ext.get_controller_extensions\n            except AttributeError:\n                # NOTE(Vek): Extensions aren't required to have\n                # controller extensions\n                continue\n            controller_exts.extend(get_ext_method())\n        return controller_exts\n\n    def _check_extension(self, extension):\n        \"\"\"Checks for required methods in extension objects.\"\"\"\n        try:\n            LOG.debug('Ext name: %s', extension.name)\n            LOG.debug('Ext alias: %s', extension.alias)\n            LOG.debug('Ext description: %s',\n                      ' '.join(extension.__doc__.strip().split()))\n            LOG.debug('Ext updated: %s', extension.updated)\n        except AttributeError:\n            LOG.exception(\"Exception loading extension.\")\n            return False\n\n        return True\n\n    def load_extension(self, ext_factory):\n        \"\"\"Execute an extension factory.\n\n        Loads an extension.  The 'ext_factory' is the name of a\n        callable that will be imported and called with one\n        argument--the extension manager.  The factory callable is\n        expected to call the register() method at least once.\n        \"\"\"\n\n        LOG.debug(\"Loading extension %s\", ext_factory)\n\n        # Load the factory\n        factory = importutils.import_class(ext_factory)\n\n        # Call it\n        LOG.debug(\"Calling extension factory %s\", ext_factory)\n        factory(self)\n\n    def _load_extensions(self):\n        \"\"\"Load extensions specified on the command line.\"\"\"\n\n        extensions = list(self.cls_list)\n\n        # NOTE(thingee): Backwards compat for the old extension loader path.\n        # We can drop this post-grizzly in the H release.\n        old_contrib_path = ('delfin.api.common.share.contrib.'\n                            'standard_extensions')\n        new_contrib_path = 'delfin.api.contrib.standard_extensions'\n        if old_contrib_path in extensions:\n            LOG.warning('delfin_api_extension is set to deprecated path: '\n                        '%s.',\n                        old_contrib_path)\n            LOG.warning('Please set your flag or delfin.conf settings for '\n                        'delfin_api_extension to: %s.', new_contrib_path)\n            extensions = [e.replace(old_contrib_path, new_contrib_path)\n                          for e in extensions]\n\n        for ext_factory in extensions:\n            try:\n                self.load_extension(ext_factory)\n            except Exception as exc:\n                LOG.warning('Failed to load extension %(ext_factory)s: '\n                            '%(exc)s.',\n                            {\"ext_factory\": ext_factory, \"exc\": exc})\n\n\nclass ControllerExtension(object):\n    \"\"\"Extend core controllers of delfin OpenStack API.\n\n    Provide a way to extend existing delfin OpenStack API core\n    controllers.\n    \"\"\"\n\n    def __init__(self, extension, collection, controller):\n        self.extension = extension\n        self.collection = collection\n        self.controller = controller\n\n\nclass ResourceExtension(object):\n    \"\"\"Add top level resources to the OpenStack API in delfin.\"\"\"\n\n    def __init__(self, collection, controller, parent=None,\n                 collection_actions=None, member_actions=None,\n                 custom_routes_fn=None):\n        if not collection_actions:\n            collection_actions = {}\n        if not member_actions:\n            member_actions = {}\n        self.collection = collection\n        self.controller = controller\n        self.parent = parent\n        self.collection_actions = collection_actions\n        self.member_actions = member_actions\n        self.custom_routes_fn = custom_routes_fn\n\n\ndef load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):\n    \"\"\"Registers all standard API extensions.\"\"\"\n\n    # Walk through all the modules in our directory...\n    our_dir = path[0]\n    for dirpath, dirnames, filenames in os.walk(our_dir):\n        # Compute the relative package name from the dirpath\n        relpath = os.path.relpath(dirpath, our_dir)\n        if relpath == '.':\n            relpkg = ''\n        else:\n            relpkg = '.%s' % '.'.join(relpath.split(os.sep))\n\n        # Now, consider each file in turn, only considering .py and .pyc files\n        for fname in filenames:\n            root, ext = os.path.splitext(fname)\n\n            # Skip __init__ and anything that's not .py and .pyc\n            if (ext not in ('.py', '.pyc')) or root == '__init__':\n                continue\n\n            # If .pyc and .py both exist, skip .pyc\n            if ext == '.pyc' and ((root + '.py') in filenames):\n                continue\n\n            # Try loading it\n            classname = \"%s%s\" % (root[0].upper(), root[1:])\n            classpath = (\"%s%s.%s.%s\" %\n                         (package, relpkg, root, classname))\n\n            if ext_list is not None and classname not in ext_list:\n                logger.debug(\"Skipping extension: %s\" % classpath)\n                continue\n\n            try:\n                ext_mgr.load_extension(classpath)\n            except Exception as exc:\n                logger.warning('Failed to load extension %(classpath)s: '\n                               '%(exc)s.',\n                               {\"classpath\": classpath, \"exc\": exc})\n\n        # Now, let's consider any subdirectories we may have...\n        subdirs = []\n        for dname in dirnames:\n            # Skip it if it does not have __init__.py\n            if not os.path.exists(os.path.join(dirpath, dname,\n                                               '__init__.py')):\n                continue\n\n            # If it has extension(), delegate...\n            ext_name = (\"%s%s.%s.extension\" %\n                        (package, relpkg, dname))\n            try:\n                ext = importutils.import_class(ext_name)\n            except ImportError:\n                # extension() doesn't exist on it, so we'll explore\n                # the directory for ourselves\n                subdirs.append(dname)\n            else:\n                try:\n                    ext(ext_mgr)\n                except Exception as exc:\n                    logger.warning('Failed to load extension '\n                                   '%(ext_name)s: %(exc)s.',\n                                   {\"ext_name\": ext_name, \"exc\": exc})\n\n        # Update the list of directories we'll explore...\n        dirnames[:] = subdirs\n"
  },
  {
    "path": "delfin/api/middlewares.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport webob.dec\n\nfrom delfin import context\nfrom delfin.wsgi import common as wsgi\n\n\nclass ContextWrapper(wsgi.Middleware):\n    \"\"\"Add 'delfin.context' to req.environ\"\"\"\n\n    @webob.dec.wsgify(RequestClass=wsgi.Request)\n    def __call__(self, req):\n        req.environ['delfin.context'] = context.RequestContext()\n        return self.application\n"
  },
  {
    "path": "delfin/api/schemas/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/api/schemas/access_info.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.api.validation import parameter_types\n\nupdate = {\n    'type': 'object',\n    'properties': {\n        'rest': {\n            'type': 'object',\n            'properties': {\n                'host': parameter_types.hostname_or_ip_address,\n                'port': parameter_types.tcp_udp_port,\n                'username': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'password': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255}\n            },\n            'required': ['host', 'port', 'username', 'password'],\n            'additionalProperties': False\n        },\n        'ssh': {\n            'type': 'object',\n            'properties': {\n                'host': parameter_types.hostname_or_ip_address,\n                'port': parameter_types.tcp_udp_port,\n                'username': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'password': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'pub_key': {'type': 'string', 'minLength': 1,\n                            'maxLength': 4096},\n                'pub_key_type': parameter_types.host_key_type\n            },\n            'required': ['host', 'port', 'username'],\n            'additionalProperties': False\n        },\n        'cli': {\n            'type': 'object',\n            'properties': {\n                'host': parameter_types.hostname_or_ip_address,\n                'port': parameter_types.tcp_udp_port,\n                'username': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'password': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255}\n            },\n            'required': ['host', 'username', 'password'],\n            'additionalProperties': False\n        },\n        'smis': {\n            'type': 'object',\n            'properties': {\n                'host': parameter_types.hostname_or_ip_address,\n                'port': parameter_types.tcp_udp_port,\n                'username': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'password': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'namespace': {'type': 'string', 'minLength': 1,\n                              'maxLength': 255}\n            },\n            'required': ['host', 'username', 'password'],\n            'additionalProperties': False\n        },\n        'extra_attributes': {\n            'type': 'object',\n            'patternProperties': {\n                '^[a-zA-Z0-9-_:. ]{1,255}$': {\n                    'type': 'string', 'maxLength': 255\n                }\n            }\n        }\n    },\n    'anyOf': [\n        {'required': ['rest']},\n        {'required': ['ssh']},\n        {'required': ['cli']},\n        {'required': ['smis']}\n    ],\n    'additionalProperties': False\n}\n"
  },
  {
    "path": "delfin/api/schemas/alert_source.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.api.validation import parameter_types\n\n# engineId is in range (5-32) octet which is 10-64 hex characters\n# If it is odd length, 0 will be prefixed to last octet, so minimum length is 9\nput = {\n    'type': 'object',\n    'properties': {\n        'host': parameter_types.hostname_or_ip_address,\n        'version': parameter_types.snmp_version,\n        'community_string': {'type': 'string',\n                             'minLength': 1,\n                             'maxLength': 32},\n        'username': {'type': 'string', 'minLength': 1, 'maxLength': 32},\n        'security_level': parameter_types.snmp_security_level,\n        'auth_key': {'type': 'string', 'minLength': 8, 'maxLength': 65535},\n        'auth_protocol': parameter_types.snmp_auth_protocol,\n        'privacy_protocol': parameter_types.snmp_privacy_protocol,\n        'privacy_key': {'type': 'string', 'minLength': 8, 'maxLength': 65535},\n        'engine_id': {'type': 'string', 'minLength': 9, 'maxLength': 64},\n        'context_name': {'type': 'string', 'minLength': 0, 'maxLength': 32},\n        'retry_num': {'type': 'integer'},\n        'expiration': {'type': 'integer'},\n        'port': parameter_types.tcp_udp_port\n    },\n    'required': ['host', 'version'],\n    'additionalProperties': False,\n}\n"
  },
  {
    "path": "delfin/api/schemas/alerts.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# begin_time and end_time are time in milliseconds\npost = {\n    'type': 'object',\n    'properties': {\n        'begin_time': {'type': 'integer'},\n        'end_time': {'type': 'integer'}\n    },\n    'additionalProperties': False,\n}\n"
  },
  {
    "path": "delfin/api/schemas/storage_capabilities_schema.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.common.constants import ResourceType, StorageMetric, \\\n    StoragePoolMetric, VolumeMetric, ControllerMetric, PortMetric, \\\n    DiskMetric, FileSystemMetric\n\nSTORAGE_CAPABILITIES_SCHEMA = {\n    'type': 'object',\n    'properties': {\n        'is_historic': {'type': 'boolean'},\n        'performance_metric_retention_window': {'type': 'integer'},\n        'resource_metrics': {\n            'type': 'object',\n            'properties': {\n                ResourceType.STORAGE: {\n                    'type': 'object',\n                    'properties': {\n                        StorageMetric.THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StorageMetric.THROUGHPUT\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StorageMetric.RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StorageMetric.RESPONSE_TIME\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StorageMetric.READ_RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StorageMetric\n                                                  .READ_RESPONSE_TIME.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StorageMetric.WRITE_RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StorageMetric\n                                                  .WRITE_RESPONSE_TIME.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StorageMetric.IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StorageMetric.IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StorageMetric.READ_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StorageMetric\n                                                  .READ_THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StorageMetric.WRITE_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StorageMetric\n                                                  .WRITE_THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StorageMetric.READ_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StorageMetric.READ_IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StorageMetric.WRITE_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StorageMetric.WRITE_IOPS\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n\n                    },\n                    'additionalProperties': False\n                },\n                ResourceType.STORAGE_POOL: {\n                    'type': 'object',\n                    'properties': {\n                        StoragePoolMetric.THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StoragePoolMetric\n                                                  .THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StoragePoolMetric.RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StoragePoolMetric\n                                                  .RESPONSE_TIME.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StoragePoolMetric.IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StoragePoolMetric.IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StoragePoolMetric.READ_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StoragePoolMetric\n                                                  .READ_THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StoragePoolMetric.WRITE_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StoragePoolMetric\n                                                  .WRITE_THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StoragePoolMetric.READ_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StoragePoolMetric.READ_IOPS\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        StoragePoolMetric.WRITE_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [StoragePoolMetric.WRITE_IOPS\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                    },\n                    'additionalProperties': False\n                },\n                ResourceType.VOLUME: {\n                    'type': 'object',\n                    'properties': {\n                        VolumeMetric.THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.RESPONSE_TIME\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.READ_RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric\n                                                  .READ_RESPONSE_TIME.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.WRITE_RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric\n                                                  .WRITE_RESPONSE_TIME.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.READ_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.READ_THROUGHPUT\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.WRITE_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric\n                                                  .WRITE_THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.READ_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.READ_IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.WRITE_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.WRITE_IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.CACHE_HIT_RATIO.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.CACHE_HIT_RATIO\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.READ_CACHE_HIT_RATIO.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric\n                                                  .READ_CACHE_HIT_RATIO.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.WRITE_CACHE_HIT_RATIO.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric\n                                                  .WRITE_CACHE_HIT_RATIO.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.IO_SIZE.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.IO_SIZE.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.READ_IO_SIZE.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.READ_IO_SIZE\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        VolumeMetric.WRITE_IO_SIZE.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [VolumeMetric.WRITE_IO_SIZE\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n\n                    },\n                    'additionalProperties': False\n                },\n                ResourceType.CONTROLLER: {\n                    'type': 'object',\n                    'properties': {\n                        ControllerMetric.THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [ControllerMetric.THROUGHPUT\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        ControllerMetric.RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [ControllerMetric\n                                                  .RESPONSE_TIME.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        ControllerMetric.IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [ControllerMetric.IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        ControllerMetric.READ_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [ControllerMetric\n                                                  .READ_THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        ControllerMetric.WRITE_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [ControllerMetric\n                                                  .WRITE_THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        ControllerMetric.READ_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [ControllerMetric.READ_IOPS\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        ControllerMetric.WRITE_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [ControllerMetric.WRITE_IOPS\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        ControllerMetric.CPU_USAGE.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [ControllerMetric.CPU_USAGE\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        }\n                    },\n                    'additionalProperties': False\n                },\n                ResourceType.PORT: {\n                    'type': 'object',\n                    'properties': {\n                        PortMetric.THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [PortMetric.THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        PortMetric.RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [PortMetric.RESPONSE_TIME\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        PortMetric.IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [PortMetric.IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        PortMetric.READ_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [PortMetric.READ_THROUGHPUT\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        PortMetric.WRITE_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [PortMetric.WRITE_THROUGHPUT\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        PortMetric.READ_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [PortMetric.READ_IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        PortMetric.WRITE_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [PortMetric.WRITE_IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                    },\n                    'additionalProperties': False\n                },\n                ResourceType.DISK: {\n                    'type': 'object',\n                    'properties': {\n                        DiskMetric.THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [DiskMetric.THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        DiskMetric.RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [DiskMetric.RESPONSE_TIME\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        DiskMetric.IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [DiskMetric.IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        DiskMetric.READ_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [DiskMetric.READ_IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        DiskMetric.WRITE_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [DiskMetric.WRITE_IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        DiskMetric.READ_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [DiskMetric.READ_THROUGHPUT\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        DiskMetric.WRITE_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [DiskMetric.WRITE_THROUGHPUT\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                    },\n                    'additionalProperties': False\n                },\n                ResourceType.FILESYSTEM: {\n                    'type': 'object',\n                    'properties': {\n                        FileSystemMetric.THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric.THROUGHPUT\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric.IOPS.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.READ_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric\n                                                  .READ_THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.WRITE_THROUGHPUT.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric\n                                                  .WRITE_THROUGHPUT.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.READ_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric.READ_IOPS\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.WRITE_IOPS.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric.WRITE_IOPS\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.READ_RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric\n                                                  .READ_RESPONSE_TIME.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.WRITE_RESPONSE_TIME.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric\n                                                  .WRITE_RESPONSE_TIME.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.IO_SIZE.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric.IO_SIZE\n                                                  .unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.READ_IO_SIZE.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric\n                                                  .READ_IO_SIZE.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n                        FileSystemMetric.WRITE_IO_SIZE.name: {\n                            'type': 'object',\n                            'properties': {\n                                'unit': {'type': 'string',\n                                         'enum': [FileSystemMetric\n                                                  .WRITE_IO_SIZE.unit]\n                                         },\n                                'description': {'type': 'string',\n                                                'minLength': 1,\n                                                'maxLength': 255}\n                            },\n                        },\n\n                    },\n                    'additionalProperties': False\n                },\n            },\n            'additionalProperties': False\n        },\n    },\n    'additionalProperties': False,\n    'required': ['is_historic']\n}\n"
  },
  {
    "path": "delfin/api/schemas/storages.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.api.validation import parameter_types\n\ncreate = {\n    'type': 'object',\n    'properties': {\n        'vendor': {'type': 'string', 'minLength': 1, 'maxLength': 255},\n        'model': {'type': 'string', 'minLength': 1, 'maxLength': 255},\n        'storage_name': {'type': 'string', 'minLength': 1, 'maxLength': 255},\n        'rest': {\n            'type': 'object',\n            'properties': {\n                'host': parameter_types.hostname_or_ip_address,\n                'port': parameter_types.tcp_udp_port,\n                'username': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'password': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255}\n            },\n            'required': ['host', 'port', 'username', 'password'],\n            'additionalProperties': False\n        },\n        'ssh': {\n            'type': 'object',\n            'properties': {\n                'host': parameter_types.hostname_or_ip_address,\n                'port': parameter_types.tcp_udp_port,\n                'username': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'password': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'pub_key': {'type': 'string', 'minLength': 1,\n                            'maxLength': 4096},\n                'pub_key_type': parameter_types.host_key_type\n            },\n            'required': ['host', 'port', 'username', 'password', 'pub_key'],\n            'additionalProperties': False\n        },\n        'cli': {\n            'type': 'object',\n            'properties': {\n                'host': parameter_types.hostname_or_ip_address,\n                'port': parameter_types.tcp_udp_port,\n                'username': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'password': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255}\n            },\n            'required': ['host', 'username', 'password'],\n            'additionalProperties': False\n        },\n        'smis': {\n            'type': 'object',\n            'properties': {\n                'host': parameter_types.hostname_or_ip_address,\n                'port': parameter_types.tcp_udp_port,\n                'username': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'password': {'type': 'string', 'minLength': 1,\n                             'maxLength': 255},\n                'namespace': {'type': 'string', 'minLength': 1,\n                              'maxLength': 255}\n            },\n            'required': ['host', 'username', 'password'],\n            'additionalProperties': False\n        },\n        'extra_attributes': {\n            'type': 'object',\n            'patternProperties': {\n                '^[a-zA-Z0-9-_:. ]{1,255}$': {\n                    'type': 'string', 'maxLength': 255\n                }\n            }\n        }\n    },\n    'required': ['vendor', 'model'],\n    'anyOf': [\n        {'required': ['rest']},\n        {'required': ['ssh']},\n        {'required': ['cli']},\n        {'required': ['smis']}\n    ],\n    'additionalProperties': False\n}\n"
  },
  {
    "path": "delfin/api/v1/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/api/v1/access_info.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\nfrom delfin import db\nfrom delfin import cryptor\nfrom delfin.api import validation\nfrom delfin.api.common import wsgi\nfrom delfin.api.schemas import access_info as schema_access_info\nfrom delfin.api.views import access_info as access_info_viewer\nfrom delfin.db.sqlalchemy.models import AccessInfo\nfrom delfin.common import constants\nfrom delfin.drivers import api as driverapi\n\n\nclass AccessInfoController(wsgi.Controller):\n\n    def __init__(self):\n        super(AccessInfoController, self).__init__()\n        self._view_builder = access_info_viewer.ViewBuilder()\n        self.driver_api = driverapi.API()\n\n    def show(self, req, id):\n        \"\"\"Show access information by storage id.\"\"\"\n        ctxt = req.environ['delfin.context']\n        access_info = db.access_info_get(ctxt, id)\n        return self._view_builder.show(access_info)\n\n    def _cm_access_info_update(self, ctxt, access_info, body):\n        access_info_dict = copy.deepcopy(access_info)\n        unused = ['created_at', 'updated_at', 'storage_name',\n                  'storage_id', 'extra_attributes']\n        access_info_dict = AccessInfo.to_dict(access_info_dict)\n        for field in unused:\n            if access_info_dict.get(field):\n                access_info_dict.pop(field)\n        for access in constants.ACCESS_TYPE:\n            if access_info_dict.get(access):\n                access_info_dict.pop(access)\n\n        access_info_list = db.access_info_get_all(\n            ctxt, filters=access_info_dict)\n\n        for cm_access_info in access_info_list:\n            if cm_access_info['storage_id'] == access_info['storage_id']:\n                continue\n            for access in constants.ACCESS_TYPE:\n                if cm_access_info.get(access):\n                    cm_access_info[access]['password'] = cryptor.decode(\n                        cm_access_info[access]['password'])\n                if body.get(access):\n                    cm_access_info[access].update(body[access])\n            self.driver_api.update_access_info(ctxt, cm_access_info)\n\n    @validation.schema(schema_access_info.update)\n    def update(self, req, id, body):\n        \"\"\"Update storage access information.\"\"\"\n        ctxt = req.environ.get('delfin.context')\n        access_info = db.access_info_get(ctxt, id)\n        self._cm_access_info_update(ctxt, access_info, body)\n        for access in constants.ACCESS_TYPE:\n            if access_info.get(access):\n                access_info[access]['password'] = cryptor.decode(\n                    access_info[access]['password'])\n            if body.get(access):\n                access_info[access].update(body[access])\n        access_info = self.driver_api.update_access_info(ctxt, access_info)\n        return self._view_builder.show(access_info)\n\n    def show_all(self, req):\n        \"\"\"Show all access information.\"\"\"\n        ctxt = req.environ.get('delfin.context')\n        access_infos = db.access_info_get_all(ctxt)\n        return self._view_builder.show_all(access_infos)\n\n\ndef create_resource():\n    return wsgi.Resource(AccessInfoController())\n"
  },
  {
    "path": "delfin/api/v1/alert_source.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log\nfrom pyasn1.type.univ import OctetString\n\nfrom delfin import db, cryptor\nfrom delfin import exception\nfrom delfin.alert_manager import rpcapi\nfrom delfin.api import validation\nfrom delfin.api.common import wsgi\nfrom delfin.api.schemas import alert_source as schema_alert\nfrom delfin.api.views import alert_source as alert_view\nfrom delfin.common import constants\n\nLOG = log.getLogger(__name__)\n\nSNMPv3_keys = ('username', 'auth_key', 'security_level', 'auth_protocol',\n               'privacy_protocol', 'privacy_key', 'engine_id')\n\n\nclass AlertSourceController(wsgi.Controller):\n    def __init__(self):\n        super().__init__()\n        self.alert_rpcapi = rpcapi.AlertAPI()\n\n    @wsgi.response(200)\n    @validation.schema(schema_alert.put)\n    def put(self, req, id, body):\n        \"\"\"Create a new alert source or update an exist one.\"\"\"\n        ctx = req.environ['delfin.context']\n        alert_source = body\n\n        alert_source[\"storage_id\"] = id\n        db.storage_get(ctx, id)\n        alert_source = self._input_check(alert_source)\n\n        snmp_config_to_del = self._get_snmp_config_brief(ctx, id)\n        if snmp_config_to_del is not None:\n            alert_source = db.alert_source_update(ctx, id, alert_source)\n        else:\n            alert_source = db.alert_source_create(ctx, alert_source)\n        snmp_config_to_add = alert_source\n        self.alert_rpcapi.sync_snmp_config(ctx, snmp_config_to_del,\n                                           snmp_config_to_add)\n\n        return alert_view.build_alert_source(alert_source.to_dict())\n\n    @wsgi.response(200)\n    def show(self, req, id):\n        ctx = req.environ['delfin.context']\n        alert_source = db.alert_source_get(ctx, id)\n\n        return alert_view.build_alert_source(alert_source.to_dict())\n\n    @wsgi.response(200)\n    def delete(self, req, id):\n        ctx = req.environ['delfin.context']\n\n        snmp_config_to_del = self._get_snmp_config_brief(ctx, id)\n        if snmp_config_to_del is not None:\n            self.alert_rpcapi.sync_snmp_config(ctx, snmp_config_to_del,\n                                               None)\n            db.alert_source_delete(ctx, id)\n        else:\n            raise exception.AlertSourceNotFound(id)\n\n    def _input_check(self, alert_source):\n        version = alert_source.get('version')\n\n        if version.lower() == 'snmpv3':\n            user_name = alert_source.get('username')\n            security_level = alert_source.get('security_level')\n            engine_id = alert_source.get('engine_id')\n\n            # Validate engine_id, check octet string can be formed from it\n            if engine_id:\n                try:\n                    OctetString.fromHexString(engine_id)\n                except (TypeError, ValueError):\n                    msg = \"engine_id should be a set of octets in \" \\\n                          \"hexadecimal format.\"\n                    raise exception.InvalidInput(msg)\n\n            if not user_name or not security_level:\n                msg = \"If snmp version is SNMPv3, then username, \" \\\n                      \"security_level are required.\"\n                raise exception.InvalidInput(msg)\n\n            if security_level == constants.SecurityLevel.AUTHNOPRIV\\\n                    or security_level == constants.SecurityLevel.AUTHPRIV:\n                auth_protocol = alert_source.get('auth_protocol')\n                auth_key = alert_source.get('auth_key')\n                if not auth_protocol or not auth_key:\n                    msg = \"If snmp version is SNMPv3 and security_level is \" \\\n                          \"authPriv or authNoPriv, auth_protocol and \" \\\n                          \"auth_key are required.\"\n                    raise exception.InvalidInput(msg)\n                alert_source['auth_key'] = cryptor.encode(\n                    alert_source['auth_key'])\n\n                if security_level == constants.SecurityLevel.AUTHPRIV:\n                    privacy_protocol = alert_source.get('privacy_protocol')\n                    privacy_key = alert_source.get('privacy_key')\n                    if not privacy_protocol or not privacy_key:\n                        msg = \"If snmp version is SNMPv3 and security_level\" \\\n                              \" is authPriv, privacy_protocol and \" \\\n                              \"privacy_key are  required.\"\n                        raise exception.InvalidInput(msg)\n                    alert_source['privacy_key'] = cryptor.encode(\n                        alert_source['privacy_key'])\n                else:\n                    alert_source['privacy_key'] = None\n                    alert_source['privacy_protocol'] = None\n            else:\n                alert_source['auth_key'] = None\n                alert_source['auth_protocol'] = None\n                alert_source['privacy_key'] = None\n                alert_source['privacy_protocol'] = None\n\n            # Clear keys for other versions.\n            alert_source['community_string'] = None\n        else:\n            community_string = alert_source.get('community_string')\n            if not community_string:\n                msg = \"If snmp version is SNMPv1 or SNMPv2c, \" \\\n                      \"community_string is required.\"\n                raise exception.InvalidInput(msg)\n            alert_source['community_string'] = cryptor.encode(\n                alert_source['community_string'])\n\n            # Clear keys for SNMPv3\n            for k in SNMPv3_keys:\n                alert_source[k] = None\n\n        return alert_source\n\n    def _get_snmp_config_brief(self, ctx, storage_id):\n        \"\"\"\n        Get snmp configuration that will be used to delete from trap receiver.\n        Only community_index(storage_id) required for snmp v1/v2 deletion,\n        user_name and engine_id are required for snmp v3. So here we only get\n        those required parameters. Return None if configuration not found.\n        \"\"\"\n        try:\n            alert_source = db.alert_source_get(ctx, storage_id)\n            snmp_config = {\"storage_id\": alert_source[\"storage_id\"],\n                           \"version\": alert_source[\"version\"]}\n            if snmp_config[\"version\"].lower() == \"snmpv3\":\n                snmp_config[\"username\"] = alert_source[\"username\"]\n                snmp_config[\"engine_id\"] = alert_source[\"engine_id\"]\n            return snmp_config\n        except exception.AlertSourceNotFound:\n            return None\n\n    def _decrypt_auth_key(self, alert_source):\n        auth_key = alert_source.get('auth_key', None)\n        privacy_key = alert_source.get('privacy_key', None)\n        if auth_key:\n            alert_source['auth_key'] = cryptor.decode(auth_key)\n        if privacy_key:\n            alert_source['privacy_key'] = cryptor.decode(privacy_key)\n\n        return alert_source\n\n    def show_all(self, req):\n        \"\"\"Show all snmp configs.\"\"\"\n        ctx = req.environ['delfin.context']\n        snmp_configs = db.alert_source_get_all(ctx)\n\n        return alert_view.show_all_snmp_configs(snmp_configs)\n\n\ndef create_resource():\n    return wsgi.Resource(AlertSourceController())\n"
  },
  {
    "path": "delfin/api/v1/alerts.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin.api import validation\nfrom delfin.api.common import wsgi\nfrom delfin.api.schemas import alerts as schema_alerts\nfrom delfin.api.views import alerts as alerts_view\nfrom delfin.common import alert_util\nfrom delfin.drivers import api as driver_manager\nfrom delfin.task_manager import rpcapi as task_rpcapi\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertController(wsgi.Controller):\n    def __init__(self):\n        super().__init__()\n        self.task_rpcapi = task_rpcapi.TaskAPI()\n        self.driver_manager = driver_manager.API()\n\n    @wsgi.response(200)\n    def show(self, req, id):\n        ctx = req.environ['delfin.context']\n\n        query_para = {}\n        query_para.update(req.GET)\n\n        try:\n            begin_time = None\n            end_time = None\n\n            if query_para.get('begin_time'):\n                begin_time = int(query_para.get('begin_time'))\n\n            if query_para.get('end_time'):\n                end_time = int(query_para.get('end_time'))\n        except Exception:\n            msg = \"begin_time and end_time should be integer values in \" \\\n                  \"milliseconds.\"\n            raise exception.InvalidInput(msg)\n\n        # When both begin_time and end_time are provided, end_time should\n        # be greater than begin_time\n        if begin_time and end_time and end_time <= begin_time:\n            msg = \"end_time should be greater than begin_time.\"\n            raise exception.InvalidInput(msg)\n\n        storage = db.storage_get(ctx, id)\n        alert_list = self.driver_manager.list_alerts(ctx, id, query_para)\n\n        # Update storage attributes in each alert model\n        for alert in alert_list:\n            alert_util.fill_storage_attributes(alert, storage)\n\n        return alerts_view.build_alerts(alert_list)\n\n    @wsgi.response(200)\n    def delete(self, req, id, sequence_number):\n        ctx = req.environ['delfin.context']\n        _ = db.storage_get(ctx, id)\n        self.driver_manager.clear_alert(ctx, id, sequence_number)\n\n    @validation.schema(schema_alerts.post)\n    @wsgi.response(200)\n    def sync(self, req, id, body):\n        ctx = req.environ['delfin.context']\n\n        # begin_time and end_time are optional parameters\n        begin_time = body.get('begin_time')\n        end_time = body.get('end_time')\n\n        # When both begin_time and end_time are provided, end_time should\n        # be greater than begin_time\n        if begin_time and end_time and end_time <= begin_time:\n            msg = \"end_time should be greater than begin_time.\"\n            raise exception.InvalidInput(msg)\n\n        # Check for the storage existence\n        _ = db.storage_get(ctx, id)\n\n        query_para = {'begin_time': body.get('begin_time'),\n                      'end_time': body.get('end_time')}\n\n        # Trigger asynchronous alert syncing from storage backend\n        self.task_rpcapi.sync_storage_alerts(ctx, id, query_para)\n\n\ndef create_resource():\n    return wsgi.Resource(AlertController())\n"
  },
  {
    "path": "delfin/api/v1/controllers.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import controllers as controller_view\n\n\nclass ControllerController(wsgi.Controller):\n\n    def __init__(self):\n        super(ControllerController, self).__init__()\n        self.search_options = ['name', 'status', 'id', 'storage_id',\n                               'native_controller_id']\n\n    def _get_controllers_search_options(self):\n        \"\"\"Return controllers search options allowed .\"\"\"\n        return self.search_options\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(\n            ctxt, query_params, self._get_controllers_search_options())\n\n        controllers = db.controller_get_all(ctxt, marker, limit, sort_keys,\n                                            sort_dirs, query_params, offset)\n        return controller_view.build_controllers(controllers)\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        controller = db.controller_get(ctxt, id)\n        return controller_view.build_controller(controller)\n\n\ndef create_resource():\n    return wsgi.Resource(ControllerController())\n"
  },
  {
    "path": "delfin/api/v1/disks.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import disks as disk_view\n\n\nclass DiskController(wsgi.Controller):\n\n    def __init__(self):\n        super(DiskController, self).__init__()\n        self.search_options = ['name', 'status', 'id', 'storage_id',\n                               'native_disk_id']\n\n    def _get_disks_search_options(self):\n        \"\"\"Return disks search options allowed .\"\"\"\n        return self.search_options\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(ctxt, query_params,\n                                         self._get_disks_search_options())\n\n        disks = db.disk_get_all(ctxt, marker, limit, sort_keys,\n                                sort_dirs, query_params, offset)\n        return disk_view.build_disks(disks)\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        disk = db.disk_get(ctxt, id)\n        return disk_view.build_disk(disk)\n\n\ndef create_resource():\n    return wsgi.Resource(DiskController())\n"
  },
  {
    "path": "delfin/api/v1/filesystems.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import filesystems as filesystem_view\n\n\nclass FilesystemController(wsgi.Controller):\n\n    def __init__(self):\n        super(FilesystemController, self).__init__()\n        self.search_options = ['name', 'status', 'id', 'storage_id',\n                               'native_filesystem_id']\n\n    def _get_fs_search_options(self):\n        \"\"\"Return filesystems search options allowed .\"\"\"\n        return self.search_options\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(ctxt, query_params,\n                                         self._get_fs_search_options())\n\n        filesystems = db.filesystem_get_all(ctxt, marker, limit, sort_keys,\n                                            sort_dirs, query_params, offset)\n        return filesystem_view.build_filesystems(filesystems)\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        filesystem = db.filesystem_get(ctxt, id)\n        return filesystem_view.build_filesystem(filesystem)\n\n\ndef create_resource():\n    return wsgi.Resource(FilesystemController())\n"
  },
  {
    "path": "delfin/api/v1/masking_views.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import masking_views\n\n\nclass MaskingViewController(wsgi.Controller):\n\n    def __init__(self):\n        super(MaskingViewController, self).__init__()\n        self.search_options = ['name', 'id', 'storage_id',\n                               'native_storage_host_group_id',\n                               'native_storage_port_group_id',\n                               'native_storage_volume_group_id',\n                               'native_storage_host_id',\n                               'native_volume_id',\n                               'native_masking_view_id']\n\n    def _get_masking_view_search_options(self):\n        \"\"\"Return masking view search options allowed .\"\"\"\n        return self.search_options\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        query_params = {\"storage_id\": id}\n        query_params.update(req.GET)\n        # Update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # Strip out options except supported search  options\n        api_utils.remove_invalid_options(\n            ctxt, query_params, self._get_masking_view_search_options())\n\n        masking_view_lists = db.masking_views_get_all(ctxt, marker, limit,\n                                                      sort_keys, sort_dirs,\n                                                      query_params, offset)\n        return masking_views.build_masking_views(masking_view_lists)\n\n\ndef create_resource():\n    return wsgi.Resource(MaskingViewController())\n"
  },
  {
    "path": "delfin/api/v1/port_groups.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import port_groups as port_group_view\n\n\nclass PortGroupController(wsgi.Controller):\n\n    def __init__(self):\n        super(PortGroupController, self).__init__()\n        self.search_options = ['name', 'id', 'storage_id',\n                               'native_port_group_id']\n\n    def _get_port_group_search_options(self):\n        \"\"\"Return port group search options allowed .\"\"\"\n        return self.search_options\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        query_params = {\"storage_id\": id}\n        query_params.update(req.GET)\n        # Update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # Strip out options except supported search  options\n        api_utils.remove_invalid_options(\n            ctxt, query_params, self._get_port_group_search_options())\n\n        port_groups = db.port_groups_get_all(\n            ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset)\n\n        # Get Port Group to Port relation from DB\n        for port_group in port_groups:\n            params = {\n                \"native_port_group_id\":\n                port_group['native_port_group_id']\n            }\n            ports = db.port_grp_port_rels_get_all(\n                ctxt, filters=params)\n\n            native_port_id_list = []\n            for port in ports:\n                native_port_id_list.append(port['native_port_id'])\n\n            port_group['ports'] = native_port_id_list\n\n        return port_group_view.build_port_groups(port_groups)\n\n\ndef create_resource():\n    return wsgi.Resource(PortGroupController())\n"
  },
  {
    "path": "delfin/api/v1/ports.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import ports as port_view\n\n\nclass PortController(wsgi.Controller):\n\n    def __init__(self):\n        super(PortController, self).__init__()\n        self.search_options = ['name', 'status', 'id', 'storage_id', 'wwn',\n                               'native_controller_id', 'native_port_id']\n\n    def _get_ports_search_options(self):\n        \"\"\"Return ports search options allowed .\"\"\"\n        return self.search_options\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(ctxt, query_params,\n                                         self._get_ports_search_options())\n\n        ports = db.port_get_all(ctxt, marker, limit, sort_keys,\n                                sort_dirs, query_params, offset)\n        return port_view.build_ports(ports)\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        port = db.port_get(ctxt, id)\n        return port_view.build_port(port)\n\n\ndef create_resource():\n    return wsgi.Resource(PortController())\n"
  },
  {
    "path": "delfin/api/v1/qtrees.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import qtrees as qtree_view\n\n\nclass QtreeController(wsgi.Controller):\n\n    def __init__(self):\n        super(QtreeController, self).__init__()\n        self.search_options = ['name', 'state', 'id', 'storage_id',\n                               'native_filesystem_id',\n                               'native_qtree_id']\n\n    def _get_qtrees_search_options(self):\n        \"\"\"Return qtrees search options allowed .\"\"\"\n        return self.search_options\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(ctxt, query_params,\n                                         self._get_qtrees_search_options())\n\n        qtrees = db.qtree_get_all(ctxt, marker, limit, sort_keys,\n                                  sort_dirs, query_params, offset)\n        return qtree_view.build_qtrees(qtrees)\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        qtree = db.qtree_get(ctxt, id)\n        return qtree_view.build_qtree(qtree)\n\n\ndef create_resource():\n    return wsgi.Resource(QtreeController())\n"
  },
  {
    "path": "delfin/api/v1/quotas.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import quotas as quota_view\n\n\nclass QuotaController(wsgi.Controller):\n\n    def __init__(self):\n        super(QuotaController, self).__init__()\n        self.search_options = ['name', 'status', 'id', 'storage_id',\n                               'type', 'native_quota_id',\n                               'native_filesystem_id', 'native_qtree_id',\n                               'user_group_name']\n\n    def _get_fs_search_options(self):\n        \"\"\"Return quotas search options allowed .\"\"\"\n        return self.search_options\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(ctxt, query_params,\n                                         self._get_fs_search_options())\n\n        quotas = db.quota_get_all(ctxt, marker, limit, sort_keys,\n                                  sort_dirs, query_params, offset)\n        return quota_view.build_quotas(quotas)\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        quota = db.quota_get(ctxt, id)\n        return quota_view.build_quota(quota)\n\n\ndef create_resource():\n    return wsgi.Resource(QuotaController())\n"
  },
  {
    "path": "delfin/api/v1/router.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.api import common\nfrom delfin.api import extensions\nfrom delfin.api.v1 import access_info\nfrom delfin.api.v1 import alert_source\nfrom delfin.api.v1 import alerts\nfrom delfin.api.v1 import controllers\nfrom delfin.api.v1 import disks\nfrom delfin.api.v1 import filesystems\nfrom delfin.api.v1 import ports\nfrom delfin.api.v1 import qtrees\nfrom delfin.api.v1 import quotas\nfrom delfin.api.v1 import shares\nfrom delfin.api.v1 import storage_pools\nfrom delfin.api.v1 import storages\nfrom delfin.api.v1 import volumes\nfrom delfin.api.v1 import storage_hosts\nfrom delfin.api.v1 import storage_host_initiators\nfrom delfin.api.v1 import storage_host_groups\nfrom delfin.api.v1 import port_groups\nfrom delfin.api.v1 import volume_groups\nfrom delfin.api.v1 import masking_views\n\n\nclass APIRouter(common.APIRouter):\n\n    ExtensionManager = extensions.ExtensionManager\n\n    def _setup_routes(self, mapper):\n        mapper.redirect(\"\", \"/\")\n\n        self.resources['storages'] = storages.create_resource()\n        mapper.resource(\"storage\", \"storages\",\n                        controller=self.resources['storages'],\n                        member={'sync': 'POST'})\n\n        mapper.connect(\"storages\", \"/storages/sync\",\n                       controller=self.resources['storages'],\n                       action=\"sync_all\",\n                       conditions={\"method\": [\"POST\"]})\n\n        mapper.connect(\"storages\", \"/storages/{id}/capabilities\",\n                       controller=self.resources['storages'],\n                       action=\"get_capabilities\",\n                       conditions={\"method\": [\"GET\"]})\n\n        self.resources['access_info'] = access_info.create_resource()\n        mapper.connect(\"storages\", \"/storages/{id}/access-info\",\n                       controller=self.resources['access_info'],\n                       action=\"show\",\n                       conditions={\"method\": [\"GET\"]})\n\n        mapper.connect(\"storages\", \"/storages/{id}/access-info\",\n                       controller=self.resources['access_info'],\n                       action=\"update\",\n                       conditions={\"method\": [\"PUT\"]})\n\n        mapper.connect(\"storages\", \"/access-infos\",\n                       controller=self.resources['access_info'],\n                       action=\"show_all\",\n                       conditions={\"method\": [\"GET\"]})\n\n        self.resources['alert_sources'] = alert_source.create_resource()\n        mapper.connect(\"storages\", \"/storages/{id}/snmp-config\",\n                       controller=self.resources['alert_sources'],\n                       action=\"put\",\n                       conditions={\"method\": [\"PUT\"]})\n        mapper.connect(\"storages\", \"/storages/{id}/snmp-config\",\n                       controller=self.resources['alert_sources'],\n                       action=\"show\",\n                       conditions={\"method\": [\"GET\"]})\n        mapper.connect(\"storages\", \"/storages/{id}/snmp-config\",\n                       controller=self.resources['alert_sources'],\n                       action=\"delete\",\n                       conditions={\"method\": [\"DELETE\"]})\n        mapper.connect(\"storages\", \"/snmp-configs\",\n                       controller=self.resources['alert_sources'],\n                       action=\"show_all\",\n                       conditions={\"method\": [\"GET\"]})\n\n        self.resources['alerts'] = alerts.create_resource()\n        mapper.connect(\"storages\", \"/storages/{id}/alerts/{sequence_number}\",\n                       controller=self.resources['alerts'],\n                       action=\"delete\",\n                       conditions={\"method\": [\"DELETE\"]})\n\n        mapper.connect(\"storages\", \"/storages/{id}/alerts\",\n                       controller=self.resources['alerts'],\n                       action=\"show\",\n                       conditions={\"method\": [\"GET\"]})\n\n        mapper.connect(\"storages\", \"/storages/{id}/alerts/sync\",\n                       controller=self.resources['alerts'],\n                       action=\"sync\",\n                       conditions={\"method\": [\"POST\"]})\n\n        self.resources['storage-pools'] = storage_pools.create_resource()\n        mapper.resource(\"storage-pool\", \"storage-pools\",\n                        controller=self.resources['storage-pools'])\n\n        self.resources['volumes'] = volumes.create_resource()\n        mapper.resource(\"volume\", \"volumes\",\n                        controller=self.resources['volumes'])\n\n        self.resources['controllers'] = controllers.create_resource()\n        mapper.resource(\"controller\", \"controllers\",\n                        controller=self.resources['controllers'])\n\n        self.resources['ports'] = ports.create_resource()\n        mapper.resource(\"port\", \"ports\",\n                        controller=self.resources['ports'])\n\n        self.resources['disks'] = disks.create_resource()\n        mapper.resource(\"disk\", \"disks\",\n                        controller=self.resources['disks'])\n\n        self.resources['filesystems'] = filesystems.create_resource()\n        mapper.resource(\"filesystems\", \"filesystems\",\n                        controller=self.resources['filesystems'])\n\n        self.resources['qtrees'] = qtrees.create_resource()\n        mapper.resource(\"qtrees\", \"qtrees\",\n                        controller=self.resources['qtrees'])\n\n        self.resources['quotas'] = quotas.create_resource()\n        mapper.resource(\"quotas\", \"quotas\",\n                        controller=self.resources['quotas'])\n\n        self.resources['shares'] = shares.create_resource()\n        mapper.resource(\"shares\", \"shares\",\n                        controller=self.resources['shares'])\n\n        self.resources['storage_host_initiators'] \\\n            = storage_host_initiators.create_resource()\n        mapper.connect(\"storages\", \"/storages/{id}/storage-host-initiators\",\n                       controller=self.resources['storage_host_initiators'],\n                       action=\"show\",\n                       conditions={\"method\": [\"GET\"]})\n\n        self.resources['storage_hosts'] = storage_hosts.create_resource()\n        mapper.connect(\"storages\", \"/storages/{id}/storage-hosts\",\n                       controller=self.resources['storage_hosts'],\n                       action=\"show\",\n                       conditions={\"method\": [\"GET\"]})\n\n        self.resources['storage_host_groups'] \\\n            = storage_host_groups.create_resource()\n        mapper.connect(\"storages\", \"/storages/{id}/storage-host-groups\",\n                       controller=self.resources['storage_host_groups'],\n                       action=\"show\",\n                       conditions={\"method\": [\"GET\"]})\n\n        self.resources['port_groups'] \\\n            = port_groups.create_resource()\n        mapper.connect(\"storages\", \"/storages/{id}/port-groups\",\n                       controller=self.resources['port_groups'],\n                       action=\"show\",\n                       conditions={\"method\": [\"GET\"]})\n\n        self.resources['volume_groups'] \\\n            = volume_groups.create_resource()\n        mapper.connect(\"storages\", \"/storages/{id}/volume-groups\",\n                       controller=self.resources['volume_groups'],\n                       action=\"show\",\n                       conditions={\"method\": [\"GET\"]})\n\n        self.resources['masking_views'] \\\n            = masking_views.create_resource()\n        mapper.connect(\"storages\", \"/storages/{id}/masking-views\",\n                       controller=self.resources['masking_views'],\n                       action=\"show\",\n                       conditions={\"method\": [\"GET\"]})\n"
  },
  {
    "path": "delfin/api/v1/shares.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import shares as share_view\n\n\nclass ShareController(wsgi.Controller):\n\n    def __init__(self):\n        super(ShareController, self).__init__()\n        self.search_options = ['name', 'status', 'id', 'storage_id',\n                               'native_share_id']\n\n    def _get_fs_search_options(self):\n        \"\"\"Return shares search options allowed .\"\"\"\n        return self.search_options\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(ctxt, query_params,\n                                         self._get_fs_search_options())\n\n        shares = db.share_get_all(ctxt, marker, limit, sort_keys,\n                                  sort_dirs, query_params, offset)\n        return share_view.build_shares(shares)\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        share = db.share_get(ctxt, id)\n        return share_view.build_share(share)\n\n\ndef create_resource():\n    return wsgi.Resource(ShareController())\n"
  },
  {
    "path": "delfin/api/v1/storage_host_groups.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import storage_host_groups as storage_host_group_view\n\n\nclass StorageHostGroupController(wsgi.Controller):\n\n    def __init__(self):\n        super(StorageHostGroupController, self).__init__()\n        self.search_options = ['name', 'id', 'storage_id',\n                               'native_storage_host_group_id']\n\n    def _get_storage_host_group_search_options(self):\n        \"\"\"Return storage host group search options allowed .\"\"\"\n        return self.search_options\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        query_params = {\"storage_id\": id}\n        query_params.update(req.GET)\n        # Update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # Strip out options except supported search  options\n        api_utils.remove_invalid_options(\n            ctxt, query_params, self._get_storage_host_group_search_options())\n\n        storage_host_groups = db.storage_host_groups_get_all(\n            ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset)\n\n        # Get Storage Host Group to Host relation from DB\n        for host_group in storage_host_groups:\n            params = {\n                \"native_storage_host_group_id\":\n                host_group['native_storage_host_group_id']\n            }\n            hosts = db.storage_host_grp_host_rels_get_all(\n                ctxt, filters=params)\n\n            native_storage_host_id_list = []\n            for host in hosts:\n                native_storage_host_id_list.append(\n                    host['native_storage_host_id'])\n\n            host_group['storage_hosts'] = native_storage_host_id_list\n\n        return storage_host_group_view\\\n            .build_storage_host_groups(storage_host_groups)\n\n\ndef create_resource():\n    return wsgi.Resource(StorageHostGroupController())\n"
  },
  {
    "path": "delfin/api/v1/storage_host_initiators.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import storage_host_initiators as \\\n    storage_host_initiator_view\n\n\nclass StorageHostInitiatorController(wsgi.Controller):\n\n    def __init__(self):\n        super(StorageHostInitiatorController, self).__init__()\n        self.search_options = ['name', 'status', 'wwn', 'id', 'storage_id',\n                               'native_storage_host_id',\n                               'native_storage_host_initiator_id']\n\n    def _get_storage_host_initiator_search_options(self):\n        \"\"\"Return storage host initiator search options allowed .\"\"\"\n        return self.search_options\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        query_params = {\"storage_id\": id}\n        query_params.update(req.GET)\n        # Update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # Strip out options except supported search  options\n        api_utils.remove_invalid_options(\n            ctxt, query_params,\n            self._get_storage_host_initiator_search_options())\n\n        storage_host_initiators = db.storage_host_initiators_get_all(\n            ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset)\n        return storage_host_initiator_view.build_storage_host_initiators(\n            storage_host_initiators)\n\n\ndef create_resource():\n    return wsgi.Resource(StorageHostInitiatorController())\n"
  },
  {
    "path": "delfin/api/v1/storage_hosts.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import storage_hosts as storage_host_view\n\n\nclass StorageHostController(wsgi.Controller):\n\n    def __init__(self):\n        super(StorageHostController, self).__init__()\n        self.search_options = ['name', 'status', 'id', 'storage_id',\n                               'native_storage_host_id']\n\n    def _get_storage_host_search_options(self):\n        \"\"\"Return storage host search options allowed .\"\"\"\n        return self.search_options\n\n    def _fill_storage_host_initiators(self, ctxt, storage_host, storage_id):\n        \"\"\"Fills initiator list for storage host.\"\"\"\n\n        storage_host_initiators = db.storage_host_initiators_get_all(\n            ctxt, filters={\"storage_id\": storage_id,\n                           \"native_storage_host_id\":\n                               storage_host['native_storage_host_id']})\n        storage_host_initiator_list = []\n        for storage_host_initiator in storage_host_initiators:\n            storage_host_initiator_list.append(\n                storage_host_initiator['native_storage_host_initiator_id'])\n        return storage_host_initiator_list\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        query_params = {\"storage_id\": id}\n        query_params.update(req.GET)\n        # Update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # Strip out options except supported search  options\n        api_utils.remove_invalid_options(\n            ctxt, query_params, self._get_storage_host_search_options())\n\n        storage_hosts = db.storage_hosts_get_all(ctxt, marker, limit,\n                                                 sort_keys, sort_dirs,\n                                                 query_params, offset)\n        for storage_host in storage_hosts:\n            storage_host['storage_host_initiators'] \\\n                = self._fill_storage_host_initiators(ctxt, storage_host, id)\n\n        return storage_host_view.build_storage_hosts(storage_hosts)\n\n\ndef create_resource():\n    return wsgi.Resource(StorageHostController())\n"
  },
  {
    "path": "delfin/api/v1/storage_pools.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import storage_pools as storage_pool_view\n\n\nclass StoragePoolController(wsgi.Controller):\n    def __init__(self):\n        super(StoragePoolController, self).__init__()\n        self.search_options = ['name', 'status', 'id', 'storage_id',\n                               'native_storage_pool_id']\n\n    def _get_storage_pools_search_options(self):\n        \"\"\"Return storage_pools search options allowed .\"\"\"\n        return self.search_options\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        pool = db.storage_pool_get(ctxt, id)\n        return storage_pool_view.build_storage_pool(pool)\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(\n            ctxt, query_params, self._get_storage_pools_search_options())\n\n        storage_pools = db.storage_pool_get_all(\n            ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset)\n        return storage_pool_view.build_storage_pools(storage_pools)\n\n\ndef create_resource():\n    return wsgi.Resource(StoragePoolController())\n"
  },
  {
    "path": "delfin/api/v1/storages.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport six\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_utils import timeutils\n\nfrom delfin import coordination\nfrom delfin import db\nfrom delfin import exception\nfrom delfin.api import api_utils\nfrom delfin.api import validation\nfrom delfin.api.common import wsgi\nfrom delfin.api.schemas import storages as schema_storages\nfrom delfin.api.views import storages as storage_view\nfrom delfin.common import constants\nfrom delfin.drivers import api as driverapi\nfrom delfin.i18n import _\nfrom delfin.task_manager import perf_job_controller\nfrom delfin.task_manager import rpcapi as task_rpcapi\nfrom delfin.task_manager.tasks import resources\n\nLOG = log.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass StorageController(wsgi.Controller):\n    def __init__(self):\n        super().__init__()\n        self.task_rpcapi = task_rpcapi.TaskAPI()\n        self.driver_api = driverapi.API()\n        self.search_options = ['name', 'vendor', 'model', 'status',\n                               'serial_number']\n\n    def _get_storages_search_options(self):\n        \"\"\"Return storages search options allowed .\"\"\"\n        return self.search_options\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(ctxt, query_params,\n                                         self._get_storages_search_options())\n\n        storages = db.storage_get_all(ctxt, marker, limit, sort_keys,\n                                      sort_dirs, query_params, offset)\n        return storage_view.build_storages(storages)\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        storage = db.storage_get(ctxt, id)\n        return storage_view.build_storage(storage)\n\n    @wsgi.response(201)\n    @validation.schema(schema_storages.create)\n    def create(self, req, body):\n        \"\"\"Register a new storage device.\"\"\"\n        ctxt = req.environ['delfin.context']\n        access_info_dict = body\n\n        # Lock to avoid synchronous creating.\n        for access in constants.ACCESS_TYPE:\n            if access_info_dict.get(access) is not None:\n                host = access_info_dict.get(access).get('host')\n                break\n        lock_name = 'storage-create-' + host\n        lock = coordination.Lock(lock_name)\n\n        with lock:\n            if self._storage_exist(ctxt, access_info_dict):\n                raise exception.StorageAlreadyExists()\n            storage = self.driver_api.discover_storage(ctxt,\n                                                       access_info_dict)\n\n        # Registration success, sync resource collection for this storage\n        try:\n            self.sync(req, storage['id'])\n\n            # Post registration, trigger alert sync\n            self.task_rpcapi.sync_storage_alerts(ctxt, storage['id'],\n                                                 query_para=None)\n        except Exception as e:\n            # Unexpected error occurred, while syncing resources.\n            msg = _('Failed to sync resources for storage: %(storage)s. '\n                    'Error: %(err)s') % {'storage': storage['id'], 'err': e}\n            LOG.error(msg)\n\n        try:\n            # Trigger Performance monitoring\n            capabilities = self.driver_api.get_capabilities(\n                context=ctxt, storage_id=storage['id'])\n            validation.validate_capabilities(capabilities)\n            perf_job_controller.create_perf_job(ctxt, storage['id'],\n                                                capabilities)\n        except exception.EmptyResourceMetrics:\n            msg = _(\"Resource metric provided by capabilities is empty for \"\n                    \"storage: %s\") % storage['id']\n            LOG.info(msg)\n        except Exception as e:\n            # Unexpected error occurred, while performance monitoring.\n            msg = _('Failed to trigger performance monitoring for storage: '\n                    '%(storage)s. Error: %(err)s') % {'storage': storage['id'],\n                                                      'err': six.text_type(e)}\n            LOG.error(msg)\n        return storage_view.build_storage(storage)\n\n    @wsgi.response(202)\n    def delete(self, req, id):\n        ctxt = req.environ['delfin.context']\n        storage = db.storage_get(ctxt, id)\n\n        for subclass in resources.StorageResourceTask.__subclasses__():\n            self.task_rpcapi.remove_storage_resource(\n                ctxt,\n                storage['id'],\n                subclass.__module__ + '.' + subclass.__name__)\n\n        self.task_rpcapi.remove_storage_in_cache(ctxt, storage['id'])\n        perf_job_controller.delete_perf_job(ctxt, storage['id'])\n\n    @wsgi.response(202)\n    def sync_all(self, req):\n        \"\"\"\n        :param req:\n        :return: it's a Asynchronous call. so return 202 on success. sync_all\n        api performs the storage device info, storage_pool,\n         volume etc. tasks on each registered storage device.\n        \"\"\"\n        ctxt = req.environ['delfin.context']\n\n        storages = db.storage_get_all(ctxt)\n        LOG.debug(\"Total {0} registered storages found in database\".\n                  format(len(storages)))\n        resource_count = len(resources.StorageResourceTask.__subclasses__())\n\n        for storage in storages:\n            try:\n                _set_synced_if_ok(ctxt, storage['id'], resource_count)\n            except exception.InvalidInput as e:\n                LOG.warning('Can not start new sync task for %s, reason is %s'\n                            % (storage['id'], e.msg))\n                continue\n            else:\n                for subclass in \\\n                        resources.StorageResourceTask.__subclasses__():\n                    self.task_rpcapi.sync_storage_resource(\n                        ctxt,\n                        storage['id'],\n                        subclass.__module__ + '.' + subclass.__name__)\n\n    @wsgi.response(202)\n    def sync(self, req, id):\n        \"\"\"\n        :param req:\n        :param id:\n        :return:\n        \"\"\"\n        ctxt = req.environ['delfin.context']\n        storage = db.storage_get(ctxt, id)\n        resource_count = len(resources.StorageResourceTask.__subclasses__())\n        _set_synced_if_ok(ctxt, storage['id'], resource_count)\n        for subclass in resources.StorageResourceTask.__subclasses__():\n            self.task_rpcapi.sync_storage_resource(\n                ctxt,\n                storage['id'],\n                subclass.__module__ + '.' + subclass.__name__)\n\n    def _storage_exist(self, context, access_info):\n        access_info_dict = copy.deepcopy(access_info)\n        access_info_list = access_info_filter(\n            context, access_info_dict)\n\n        for _access_info in access_info_list:\n            try:\n                storage = db.storage_get(context, _access_info['storage_id'])\n                if storage:\n                    LOG.error(\"Storage %s has same access \"\n                              \"information.\" % storage['id'])\n                    return True\n            except exception.StorageNotFound:\n                # Suppose storage was not saved successfully after access\n                # information was saved in database when registering storage.\n                # Therefore, removing access info if storage doesn't exist to\n                # ensure the database has no residual data.\n                LOG.debug(\"Remove residual access information.\")\n                db.access_info_delete(context, _access_info['storage_id'])\n\n        return False\n\n    @wsgi.response(200)\n    def get_capabilities(self, req, id):\n        \"\"\"\n        The API fetches capabilities from driver\n          associated with the storage device.\n        \"\"\"\n        # Check and fetch storage with storage_id\n        ctx = req.environ['delfin.context']\n        storage_info = db.storage_get(ctx, id)\n\n        # Fetch supported driver's capability\n        capabilities = self.driver_api. \\\n            get_capabilities(ctx, storage_info['id'])\n\n        # validate capabilities\n        validation.validate_capabilities(capabilities)\n\n        return storage_view.build_capabilities(storage_info, capabilities)\n\n\ndef create_resource():\n    return wsgi.Resource(StorageController())\n\n\n@coordination.synchronized('{storage_id}')\ndef _set_synced_if_ok(context, storage_id, resource_count):\n    try:\n        storage = db.storage_get(context, storage_id)\n    except exception.StorageNotFound:\n        msg = 'Storage %s not found when try to set sync_status' \\\n              % storage_id\n        raise exception.InvalidInput(message=msg)\n    else:\n        last_update = storage['updated_at'] or storage['created_at']\n        current_time = timeutils.utcnow()\n        interval = (current_time - last_update).seconds\n        # If last synchronization was within\n        # CONF.sync_task_expiration(in seconds), and the sync status\n        # is bigger than 0, it means some sync task is still running,\n        # the new sync task should not launch\n        if interval < CONF.sync_task_expiration and \\\n                storage['sync_status'] > 0:\n            raise exception.StorageIsSyncing(storage['id'])\n        storage['sync_status'] = resource_count * constants.ResourceSync.START\n        storage['updated_at'] = current_time\n        db.storage_update(context, storage['id'], storage)\n\n\ndef access_info_filter(context, access_info):\n    access_info_dict = copy.deepcopy(access_info)\n\n    for access in constants.ACCESS_TYPE:\n        if access_info_dict.get(access):\n            access_info_dict.pop(access)\n\n    # Check if storage is registered\n    access_info_list = db.access_info_get_all(context,\n                                              filters=access_info_dict)\n    filtered_list = []\n    for access_info_db in access_info_list:\n        match = True\n        for access in constants.ACCESS_TYPE:\n            access_filter = access_info.get(access)\n            access_db = access_info_db.get(access)\n            if match and access_filter:\n                if not access_db or\\\n                        access_filter['host'] != access_db['host'] or\\\n                        access_filter['port'] != access_db['port']:\n                    match = False\n                    break\n        if match:\n            filtered_list.append(access_info_db)\n\n    return filtered_list\n"
  },
  {
    "path": "delfin/api/v1/volume_groups.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import volume_groups as volume_group_view\n\n\nclass VolumeGroupController(wsgi.Controller):\n\n    def __init__(self):\n        super(VolumeGroupController, self).__init__()\n        self.search_options = ['name', 'id', 'storage_id',\n                               'native_volume_group_id']\n\n    def _get_volume_group_search_options(self):\n        \"\"\"Return volume group search options allowed .\"\"\"\n        return self.search_options\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        query_params = {\"storage_id\": id}\n        query_params.update(req.GET)\n        # Update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # Strip out options except supported search  options\n        api_utils.remove_invalid_options(\n            ctxt, query_params, self._get_volume_group_search_options())\n\n        volume_groups = db.volume_groups_get_all(\n            ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset)\n\n        # Get Volume Group to Volume relation from DB\n        for volume_group in volume_groups:\n            params = {\n                \"native_volume_group_id\":\n                volume_group['native_volume_group_id']\n            }\n            volumes = db.vol_grp_vol_rels_get_all(\n                ctxt, filters=params)\n\n            native_volume_id_list = []\n            for volume in volumes:\n                native_volume_id_list.append(volume['native_volume_id'])\n\n            volume_group['volumes'] = native_volume_id_list\n\n        return volume_group_view.build_volume_groups(volume_groups)\n\n\ndef create_resource():\n    return wsgi.Resource(VolumeGroupController())\n"
  },
  {
    "path": "delfin/api/v1/volumes.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import db\nfrom delfin.api import api_utils\nfrom delfin.api.common import wsgi\nfrom delfin.api.views import volumes as volume_view\n\n\nclass VolumeController(wsgi.Controller):\n\n    def __init__(self):\n        super(VolumeController, self).__init__()\n        self.search_options = ['name', 'status', 'id', 'storage_id', 'wwn',\n                               'native_volume_id', 'native_storage_pool_id']\n\n    def _get_volumes_search_options(self):\n        \"\"\"Return volumes search options allowed .\"\"\"\n        return self.search_options\n\n    def index(self, req):\n        ctxt = req.environ['delfin.context']\n        query_params = {}\n        query_params.update(req.GET)\n        # update options  other than filters\n        sort_keys, sort_dirs = api_utils.get_sort_params(query_params)\n        marker, limit, offset = api_utils.get_pagination_params(query_params)\n        # strip out options except supported search  options\n        api_utils.remove_invalid_options(ctxt, query_params,\n                                         self._get_volumes_search_options())\n\n        volumes = db.volume_get_all(ctxt, marker, limit, sort_keys,\n                                    sort_dirs, query_params, offset)\n        return volume_view.build_volumes(volumes)\n\n    def show(self, req, id):\n        ctxt = req.environ['delfin.context']\n        volume = db.volume_get(ctxt, id)\n        return volume_view.build_volume(volume)\n\n\ndef create_resource():\n    return wsgi.Resource(VolumeController())\n"
  },
  {
    "path": "delfin/api/validation/__init__.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (C) 2017 NTT DATA\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nRequest Body validating middleware.\n\n\"\"\"\n\nimport functools\n\nfrom delfin.api.validation import validators\nfrom delfin.api.schemas.storage_capabilities_schema import \\\n    STORAGE_CAPABILITIES_SCHEMA\nfrom delfin import exception\n\n\ndef schema(request_body_schema):\n    \"\"\"Register a schema to validate request body.\n\n    Registered schema will be used for validating request body just before\n    API method executing.\n\n    :param dict request_body_schema: a schema to validate request body.\n\n    \"\"\"\n\n    def add_validator(func):\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            schema_validator = validators._SchemaValidator(request_body_schema)\n            schema_validator.validate(kwargs['body'])\n            return func(*args, **kwargs)\n        return wrapper\n\n    return add_validator\n\n\ndef validate_capabilities(capabilities):\n    if not capabilities:\n        raise exception.StorageCapabilityNotSupported()\n\n    schema_validator = validators._SchemaValidator(STORAGE_CAPABILITIES_SCHEMA)\n    try:\n        schema_validator.validate(capabilities)\n    except exception.InvalidInput as ex:\n        raise exception.InvalidStorageCapability(ex.msg)\n"
  },
  {
    "path": "delfin/api/validation/parameter_types.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (C) 2017 NTT DATA\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCommon parameter types for validating request Body.\n\n\"\"\"\n\nimport re\nimport unicodedata\nimport six\n\nfrom delfin.i18n import _\n\n\nclass ValidationRegex(object):\n    def __init__(self, regex, reason):\n        self.regex = regex\n        self.reason = reason\n\n\ndef _is_printable(char):\n    \"\"\"determine if a unicode code point is printable.\n\n    This checks if the character is either \"other\" (mostly control\n    codes), or a non-horizontal space. All characters that don't match\n    those criteria are considered printable; that is: letters;\n    combining marks; numbers; punctuation; symbols; (horizontal) space\n    separators.\n    \"\"\"\n    category = unicodedata.category(char)\n    return (not category.startswith(\"C\") and\n            (not category.startswith(\"Z\") or category == \"Zs\"))\n\n\ndef _get_all_chars():\n    for i in range(0xFFFF):\n        yield six.unichr(i)\n\n\n# build a regex that matches all printable characters. This allows\n# spaces in the middle of the name. Also note that the regexp below\n# deliberately allows the empty string. This is so only the constraint\n# which enforces a minimum length for the name is triggered when an\n# empty string is tested. Otherwise it is not deterministic which\n# constraint fails and this causes issues for some unittests when\n# PYTHONHASHSEED is set randomly.\n\ndef _build_regex_range(ws=True, invert=False, exclude=None):\n    \"\"\"Build a range regex for a set of characters in utf8.\n\n    This builds a valid range regex for characters in utf8 by\n    iterating the entire space and building up a set of x-y ranges for\n    all the characters we find which are valid.\n\n    :param ws: should we include whitespace in this range.\n    :param exclude: any characters we want to exclude\n    :param invert: invert the logic\n\n    The inversion is useful when we want to generate a set of ranges\n    which is everything that's not a certain class. For instance,\n    produce all the non printable characters as a set of ranges.\n    \"\"\"\n    if exclude is None:\n        exclude = []\n    regex = \"\"\n    # are we currently in a range\n    in_range = False\n    # last character we found, for closing ranges\n    last = None\n    # last character we added to the regex, this lets us know that we\n    # already have B in the range, which means we don't need to close\n    # it out with B-B. While the later seems to work, it's kind of bad form.\n    last_added = None\n\n    def valid_char(char):\n        if char in exclude:\n            result = False\n        elif ws:\n            result = _is_printable(char)\n        else:\n            # Zs is the unicode class for space characters, of which\n            # there are about 10 in this range.\n            result = (_is_printable(char) and\n                      unicodedata.category(char) != \"Zs\")\n        if invert is True:\n            return not result\n        return result\n\n    # iterate through the entire character range. in_\n    for c in _get_all_chars():\n        if valid_char(c):\n            if not in_range:\n                regex += re.escape(c)\n                last_added = c\n            in_range = True\n        else:\n            if in_range and last != last_added:\n                regex += \"-\" + re.escape(last)\n            in_range = False\n        last = c\n    else:\n        if in_range:\n            regex += \"-\" + re.escape(c)\n    return regex\n\n\nvalid_name_regex_base = '^(?![%s])[%s]*(?<![%s])$'\n\n\nvalid_name_regex = ValidationRegex(\n    valid_name_regex_base % (\n        _build_regex_range(ws=False, invert=True),\n        _build_regex_range(),\n        _build_regex_range(ws=False, invert=True)),\n    _(\"printable characters. Can not start or end with whitespace.\"))\n\n\n# This regex allows leading/trailing whitespace\nvalid_name_leading_trailing_spaces_regex_base = (\n    \"^[%(ws)s]*[%(no_ws)s]+[%(ws)s]*$|\"\n    \"^[%(ws)s]*[%(no_ws)s][%(no_ws)s%(ws)s]+[%(no_ws)s][%(ws)s]*$\")\n\n\nvalid_name_leading_trailing_spaces_regex = ValidationRegex(\n    valid_name_leading_trailing_spaces_regex_base % {\n        'ws': _build_regex_range(),\n        'no_ws': _build_regex_range(ws=False)},\n    _(\"printable characters with at least one non space character\"))\n\n\nvalid_description_regex_base = '^[%s]*$'\n\n\nvalid_description_regex = valid_description_regex_base % (\n    _build_regex_range())\n\n\nname = {\n    'type': 'string', 'minLength': 1, 'maxLength': 255,\n    'format': 'name'\n}\n\n\ndescription = {\n    'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255,\n    'pattern': valid_description_regex,\n}\n\nhostname_or_ip_address = {\n    # NOTE: Allow to specify hostname, ipv4 and ipv6.\n    'type': 'string', 'minLength': 0, 'maxLength': 255,\n    'pattern': '^[a-zA-Z0-9-_.:]*$'\n}\n\ntcp_udp_port = {\n    'type': 'integer',\n    'minimum': 0, 'maximum': 65535\n}\n\nsnmp_version = {\n    'type': 'string',\n    'enum': ['SNMPv1', 'SNMPv2c', 'SNMPv3', 'snmpv1', 'snmpv2c', 'snmpv3'],\n}\n\nsnmp_auth_protocol = {\n    'type': 'string',\n    'enum': ['HMACSHA', 'HMACMD5', 'HMCSHA2224',\n             'HMCSHA2256', 'HMCSHA2384', 'HMCSHA2512'],\n}\n\nsnmp_privacy_protocol = {\n    'type': 'string',\n    'enum': ['DES', 'AES', 'AES192', 'AES256', '3DES'],\n}\n\nsnmp_security_level = {\n    'type': 'string',\n    'enum': ['authPriv', 'authNoPriv', 'noAuthnoPriv'],\n}\n\nhost_key_type = {\n    'type': 'string',\n    'enum': ['ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384',\n             'ecdsa-sha2-nistp521', 'ssh-rsa', 'ssh-dss'],\n}\n"
  },
  {
    "path": "delfin/api/validation/validators.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (C) 2017 NTT DATA\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nInternal implementation of request Body validating middleware.\n\n\"\"\"\n\nimport re\n\nimport jsonschema\nfrom jsonschema import exceptions as jsonschema_exc\nfrom oslo_utils import timeutils\nfrom oslo_utils import uuidutils\nimport six\n\nfrom delfin import exception\nfrom delfin.i18n import _\nfrom delfin import utils\n\n\ndef _soft_validate_additional_properties(\n        validator, additional_properties_value, param_value, schema):\n    \"\"\"Validator function.\n\n    If there are not any properties on the param_value that are not specified\n    in the schema, this will return without any effect. If there are any such\n    extra properties, they will be handled as follows:\n\n    - if the validator passed to the method is not of type \"object\", this\n      method will return without any effect.\n    - if the 'additional_properties_value' parameter is True, this method will\n      return without any effect.\n    - if the schema has an additionalProperties value of True, the extra\n      properties on the param_value will not be touched.\n    - if the schema has an additionalProperties value of False and there\n      aren't patternProperties specified, the extra properties will be stripped\n      from the param_value.\n    - if the schema has an additionalProperties value of False and there\n      are patternProperties specified, the extra properties will not be\n      touched and raise validation error if pattern doesn't match.\n    \"\"\"\n    if (not validator.is_type(param_value, \"object\") or\n            additional_properties_value):\n        return\n\n    properties = schema.get(\"properties\", {})\n    patterns = \"|\".join(schema.get(\"patternProperties\", {}))\n    extra_properties = set()\n    for prop in param_value:\n        if prop not in properties:\n            if patterns:\n                if not re.search(patterns, prop):\n                    extra_properties.add(prop)\n            else:\n                extra_properties.add(prop)\n\n    if not extra_properties:\n        return\n\n    if patterns:\n        error = \"Additional properties are not allowed (%s %s unexpected)\"\n        if len(extra_properties) == 1:\n            verb = \"was\"\n        else:\n            verb = \"were\"\n        yield jsonschema_exc.ValidationError(\n            error % (\", \".join(repr(extra) for extra in extra_properties),\n                     verb))\n    else:\n        for prop in extra_properties:\n            del param_value[prop]\n\n\ndef _validate_string_length(value, entity_name, mandatory=False,\n                            min_length=0, max_length=None,\n                            remove_whitespaces=False):\n    \"\"\"Check the length of specified string.\n\n    :param value: the value of the string\n    :param entity_name: the name of the string\n    :mandatory: string is mandatory or not\n    :param min_length: the min_length of the string\n    :param max_length: the max_length of the string\n    :param remove_whitespaces: True if trimming whitespaces is needed\n                                   else False\n    \"\"\"\n    if not mandatory and not value:\n        return True\n\n    if mandatory and not value:\n        msg = _(\"The '%s' can not be None.\") % entity_name\n        raise exception.InvalidInput(msg)\n\n    if remove_whitespaces:\n        value = value.strip()\n\n    utils.check_string_length(value, entity_name,\n                              min_length=min_length,\n                              max_length=max_length)\n\n\n@jsonschema.FormatChecker.cls_checks('date-time')\ndef _validate_datetime_format(param_value):\n    try:\n        timeutils.parse_isotime(param_value)\n    except ValueError:\n        return False\n    else:\n        return True\n\n\n@jsonschema.FormatChecker.cls_checks('name', exception.InvalidName)\ndef _validate_name(param_value):\n    if not param_value:\n        msg = _(\"The 'name' can not be None.\")\n        raise exception.InvalidName(msg)\n    elif len(param_value.strip()) == 0:\n        msg = _(\"The 'name' can not be empty.\")\n        raise exception.InvalidName(msg)\n    return True\n\n\n@jsonschema.FormatChecker.cls_checks('name_skip_leading_trailing_spaces',\n                                     exception.InvalidName)\ndef _validate_name_skip_leading_trailing_spaces(param_value):\n    if not param_value:\n        msg = _(\"The 'name' can not be None.\")\n        raise exception.InvalidName(msg)\n    param_value = param_value.strip()\n    if len(param_value) == 0:\n        msg = _(\"The 'name' can not be empty.\")\n        raise exception.InvalidName(msg)\n    elif len(param_value) > 255:\n        msg = _(\"The 'name' can not be greater than 255 characters.\")\n        raise exception.InvalidName(msg)\n    return True\n\n\n@jsonschema.FormatChecker.cls_checks('uuid')\ndef _validate_uuid_format(instance):\n    return uuidutils.is_uuid_like(instance)\n\n\nclass FormatChecker(jsonschema.FormatChecker):\n    \"\"\"A FormatChecker can output the message from cause exception\n\n    We need understandable validation errors messages for users. When a\n    custom checker has an exception, the FormatChecker will output a\n    readable message provided by the checker.\n    \"\"\"\n\n    def check(self, param_value, format):\n        \"\"\"Check whether the param_value conforms to the given format.\n\n        :argument param_value: the param_value to check\n        :type: any primitive type (str, number, bool)\n        :argument str format: the format that param_value should conform to\n        :raises: :exc:`FormatError` if param_value does not conform to format\n        \"\"\"\n\n        if format not in self.checkers:\n            return\n\n        # For safety reasons custom checkers can be registered with\n        # allowed exception types. Anything else will fall into the\n        # default formatter.\n        func, raises = self.checkers[format]\n        result, cause = None, None\n\n        try:\n            result = func(param_value)\n        except raises as e:\n            cause = e\n        if not result:\n            msg = \"%r is not a %r\" % (param_value, format)\n            raise jsonschema_exc.FormatError(msg, cause=cause)\n\n\nclass _SchemaValidator(object):\n    \"\"\"A validator class\n\n    This class is changed from Draft4Validator to validate minimum/maximum\n    value of a string number(e.g. '10'). This changes can be removed when\n    we tighten up the API definition and the XML conversion.\n    Also FormatCheckers are added for checking data formats which would be\n    passed through cinder api commonly.\n\n    \"\"\"\n    validator = None\n    validator_org = jsonschema.Draft4Validator\n\n    def __init__(self, schema, relax_additional_properties=False):\n        validators = {\n            'minimum': self._validate_minimum,\n            'maximum': self._validate_maximum,\n        }\n        if relax_additional_properties:\n            validators[\n                'additionalProperties'] = _soft_validate_additional_properties\n\n        validator_cls = jsonschema.validators.extend(self.validator_org,\n                                                     validators)\n        format_checker = FormatChecker()\n        self.validator = validator_cls(schema, format_checker=format_checker)\n\n    def validate(self, *args, **kwargs):\n        try:\n            self.validator.validate(*args, **kwargs)\n        except jsonschema.ValidationError as ex:\n            if isinstance(ex.cause, exception.InvalidName):\n                raise ex.cause\n            elif len(ex.path) > 0:\n                detail = _(\"Invalid input for field/attribute %(path)s.\"\n                           \" %(message)s\") % {'path': ex.path.pop(),\n                                              'message': ex.message}\n            else:\n                detail = ex.message\n            raise exception.InvalidInput(detail)\n        except TypeError as ex:\n            # NOTE: If passing non string value to patternProperties parameter,\n            #       TypeError happens. Here is for catching the TypeError.\n            detail = six.text_type(ex)\n            raise exception.InvalidInput(detail)\n\n    def _number_from_str(self, param_value):\n        try:\n            value = int(param_value)\n        except (ValueError, TypeError):\n            try:\n                value = float(param_value)\n            except (ValueError, TypeError):\n                return None\n        return value\n\n    def _validate_minimum(self, validator, minimum, param_value, schema):\n        param_value = self._number_from_str(param_value)\n        if param_value is None:\n            return\n        return self.validator_org.VALIDATORS['minimum'](validator, minimum,\n                                                        param_value, schema)\n\n    def _validate_maximum(self, validator, maximum, param_value, schema):\n        param_value = self._number_from_str(param_value)\n        if param_value is None:\n            return\n        return self.validator_org.VALIDATORS['maximum'](validator, maximum,\n                                                        param_value, schema)\n"
  },
  {
    "path": "delfin/api/views/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/api/views/access_info.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.common import constants\n\n\nclass ViewBuilder(object):\n\n    def show(self, access_info):\n        access_info_dict = access_info.to_dict()\n        for access in constants.ACCESS_TYPE:\n            if access_info.get(access):\n                access_info[access].pop('password', None)\n        return access_info_dict\n\n    def show_all(self, access_infos):\n        infos = []\n        for access_info in access_infos:\n            access_info_dict = self.show(access_info)\n            infos.append(access_info_dict)\n        return dict(access_infos=infos)\n"
  },
  {
    "path": "delfin/api/views/alert_source.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nfrom delfin import cryptor\n\n\ndef build_alert_source(value):\n    view = copy.deepcopy(value)\n    view.pop(\"auth_key\")\n    view.pop(\"privacy_key\")\n    version = view['version']\n    if version.lower() == 'snmpv2c':\n        view['community_string'] = cryptor.decode(view['community_string'])\n        # Remove the key not belong to snmpv2c\n        view.pop('username')\n        view.pop('security_level')\n        view.pop('auth_protocol')\n        view.pop('privacy_protocol')\n        view.pop('engine_id')\n        view.pop('context_name')\n    elif version.lower() == 'snmpv3':\n        # Remove the key not belong to snmpv3\n        view.pop('community_string')\n    return dict(view)\n\n\ndef show_all_snmp_configs(values):\n    snmp_configs = []\n    for snmp_config in values:\n        snmp_config_dict = build_alert_source(dict(snmp_config))\n        snmp_configs.append(snmp_config_dict)\n    return dict(snmp_configs=snmp_configs)\n"
  },
  {
    "path": "delfin/api/views/alerts.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_alerts(alerts):\n    # Build list of alerts\n    views = [build_alert(alert)\n             for alert in alerts]\n    return dict(alerts=views)\n\n\ndef build_alert(alert):\n    view = copy.deepcopy(alert)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/controllers.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_controllers(controllers):\n    # Build list of controllers\n    views = [build_controller(controller)\n             for controller in controllers]\n    return dict(controllers=views)\n\n\ndef build_controller(controller):\n    view = copy.deepcopy(controller)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/disks.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_disks(disks):\n    # Build list of disks\n    views = [build_disk(disk)\n             for disk in disks]\n    return dict(disks=views)\n\n\ndef build_disk(disk):\n    view = copy.deepcopy(disk)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/filesystems.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_filesystems(filesystems):\n    # Build list of filesystems\n    views = [build_filesystem(filesystem)\n             for filesystem in filesystems]\n    return dict(filesystems=views)\n\n\ndef build_filesystem(filesystem):\n    view = copy.deepcopy(filesystem)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/masking_views.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_masking_views(masking_views):\n    # Build list of masking views\n    views = [build_masking_view(masking_view)\n             for masking_view in masking_views]\n    return dict(masking_views=views)\n\n\ndef build_masking_view(masking_view):\n    view = copy.deepcopy(masking_view)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/port_groups.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_port_groups(port_groups):\n    # Build list of port groups\n    views = [build_port_group(port_group)\n             for port_group in port_groups]\n    return dict(port_groups=views)\n\n\ndef build_port_group(port_group):\n    view = copy.deepcopy(port_group)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/ports.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_ports(ports):\n    # Build list of ports\n    views = [build_port(port)\n             for port in ports]\n    return dict(ports=views)\n\n\ndef build_port(port):\n    view = copy.deepcopy(port)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/qtrees.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_qtrees(qtrees):\n    # Build list of qtrees\n    views = [build_qtree(qtree)\n             for qtree in qtrees]\n    return dict(qtrees=views)\n\n\ndef build_qtree(qtree):\n    view = copy.deepcopy(qtree)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/quotas.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_quotas(quotas):\n    # Build list of quotas\n    views = [build_quota(quota)\n             for quota in quotas]\n    return dict(quotas=views)\n\n\ndef build_quota(quota):\n    view = copy.deepcopy(quota)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/shares.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_shares(shares):\n    # Build list of shares\n    views = [build_share(share)\n             for share in shares]\n    return dict(shares=views)\n\n\ndef build_share(share):\n    view = copy.deepcopy(share)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/storage_host_groups.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_storage_host_groups(storage_host_groups):\n    # Build list of storage host groups\n    views = [build_storage_host_group(storage_host_group)\n             for storage_host_group in storage_host_groups]\n    return dict(storage_host_groups=views)\n\n\ndef build_storage_host_group(storage_host_group):\n    view = copy.deepcopy(storage_host_group)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/storage_host_initiators.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_storage_host_initiators(storage_host_initiators):\n    # Build list of storage host initiators\n    views = [build_storage_host_initiator(storage_host_initiator)\n             for storage_host_initiator in storage_host_initiators]\n    return dict(storage_host_initiators=views)\n\n\ndef build_storage_host_initiator(storage_host_initiator):\n    view = copy.deepcopy(storage_host_initiator)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/storage_hosts.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_storage_hosts(storage_hosts):\n    # Build list of storage hosts\n    views = [build_storage_host(storage_host)\n             for storage_host in storage_hosts]\n    return dict(storage_hosts=views)\n\n\ndef build_storage_host(storage_host):\n    view = copy.deepcopy(storage_host)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/storage_pools.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_storage_pools(storage_pools):\n    # Build list of storage_pools\n    views = [build_storage_pool(storage_pool)\n             for storage_pool in storage_pools]\n    return dict(storage_pools=views)\n\n\ndef build_storage_pool(storage_pool):\n    view = copy.deepcopy(storage_pool)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/storages.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\nfrom delfin.common import constants\n\n\ndef build_storages(storages):\n    # Build list of storages\n    views = [build_storage(storage)\n             for storage in storages]\n    return dict(storages=views)\n\n\ndef build_storage(storage):\n    view = copy.deepcopy(storage)\n    if view['sync_status'] == constants.SyncStatus.SYNCED:\n        view['sync_status'] = 'SYNCED'\n    else:\n        view['sync_status'] = 'SYNCING'\n    return dict(view)\n\n\ndef build_capabilities(storage_info, capabilities):\n    \"\"\"build capability API response\"\"\"\n    # build metadata\n    metadata = dict()\n    metadata['vendor'] = storage_info['vendor']\n    metadata['model'] = storage_info['model']\n\n    # create final view\n    view = dict()\n    view['metadata'] = metadata\n    view['spec'] = capabilities\n    return view\n"
  },
  {
    "path": "delfin/api/views/volume_groups.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_volume_groups(volume_groups):\n    # Build list of volume groups\n    views = [build_volume_group(volume_group)\n             for volume_group in volume_groups]\n    return dict(volume_groups=views)\n\n\ndef build_volume_group(volume_group):\n    view = copy.deepcopy(volume_group)\n    return dict(view)\n"
  },
  {
    "path": "delfin/api/views/volumes.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\n\n\ndef build_volumes(volumes):\n    # Build list of volumes\n    views = [build_volume(volume)\n             for volume in volumes]\n    return dict(volumes=views)\n\n\ndef build_volume(volume):\n    view = copy.deepcopy(volume)\n    return dict(view)\n"
  },
  {
    "path": "delfin/cmd/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/cmd/alert.py",
    "content": "#!/usr/bin/env python\n\n# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\");\n#    you may not use this file except in compliance with the License.\n#    You may obtain a copy of the License at\n#\n#        http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS,\n#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#    See the License for the specific language governing permissions and\n#    limitations under the License.\n\n\"\"\"Starter script for delfin alert service.\"\"\"\n\nimport eventlet\neventlet.monkey_patch()\n\nimport sys\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom delfin.common import config  # noqa\nfrom delfin import service\nfrom delfin import utils\nfrom delfin import version\n\nCONF = cfg.CONF\n\n\ndef main():\n    log.register_options(CONF)\n    CONF(sys.argv[1:], project='delfin',\n         version=version.version_string())\n    log.setup(CONF, \"delfin\")\n    utils.monkey_patch()\n\n    # Launch alert manager service\n    alert_manager = service.AlertService.create(binary='delfin-alert',\n                                                coordination=True)\n    service.serve(alert_manager)\n    service.wait()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "delfin/cmd/api.py",
    "content": "#!/usr/bin/env python\n\n# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\");\n#    you may not use this file except in compliance with the License.\n#    You may obtain a copy of the License at\n#\n#        http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS,\n#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#    See the License for the specific language governing permissions and\n#    limitations under the License.\n\n\"\"\"Starter script for delfin OS API.\"\"\"\n\nimport eventlet\neventlet.monkey_patch()\n\nimport sys\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom delfin.common import config  # noqa\nfrom delfin import service\nfrom delfin import utils\nfrom delfin import version\n\nCONF = cfg.CONF\n\n\ndef main():\n    log.register_options(CONF)\n    CONF(sys.argv[1:], project='delfin',\n         version=version.version_string())\n    log.setup(CONF, \"delfin\")\n    utils.monkey_patch()\n\n    launcher = service.process_launcher()\n    api_server = service.WSGIService('delfin', coordination=True)\n    launcher.launch_service(api_server, workers=api_server.workers or 1)\n    launcher.wait()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "delfin/cmd/task.py",
    "content": "#!/usr/bin/env python\n\n# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\");\n#    you may not use this file except in compliance with the License.\n#    You may obtain a copy of the License at\n#\n#        http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS,\n#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#    See the License for the specific language governing permissions and\n#    limitations under the License.\n\n\"\"\"Starter script for delfin task service.\"\"\"\n\nimport eventlet\n\neventlet.monkey_patch()\n\nimport sys\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom delfin.common import config  # noqa\nfrom delfin import service\nfrom delfin import utils\nfrom delfin import version\n\nCONF = cfg.CONF\n\n\ndef main():\n    log.register_options(CONF)\n    CONF(sys.argv[1:], project='delfin',\n         version=version.version_string())\n    log.setup(CONF, \"delfin\")\n    utils.monkey_patch()\n\n    task_server = service.TaskService.create(binary='delfin-task',\n                                             coordination=True)\n    leader_election = service.LeaderElectionService.create()\n    metrics_task_server = service. \\\n        TaskService.create(binary='delfin-task',\n                           topic=CONF.host,\n                           manager='delfin.'\n                                   'task_manager.'\n                                   'metrics_manager.'\n                                   'MetricsTaskManager',\n                           coordination=True)\n\n    service.serve(task_server)\n    service.serve(leader_election)\n    service.serve(metrics_task_server)\n\n    service.wait()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "delfin/common/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/common/alert_util.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright 2010-2011 OpenStack Foundation\n# Copyright 2012 Justin Santa Barbara\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nfrom oslo_log import log as logging\n\nLOG = logging.getLogger(__name__)\n\n\ndef fill_storage_attributes(alert_model, storage):\n    \"\"\" Fills storage attributes for alert model \"\"\"\n    alert_model['storage_id'] = storage['id']\n    alert_model['storage_name'] = storage['name']\n    alert_model['vendor'] = storage['vendor']\n    alert_model['model'] = storage['model']\n    alert_model['serial_number'] = storage['serial_number']\n\n\ndef is_alert_in_time_range(query_para, occur_time):\n    # query_para contains optional begin_time and end_time\n    # This function checks for their existence and validates if occur_time\n    # falls in begin_time and end_time range\n    if not query_para:\n        return True\n    begin_time = None\n    end_time = None\n    try:\n        if query_para.get('begin_time'):\n            begin_time = int(query_para.get('begin_time'))\n\n        if query_para.get('end_time'):\n            end_time = int(query_para.get('end_time'))\n    except Exception:\n        LOG.warning(\"Invalid query parameters received, ignoring them\")\n        return True\n\n    if begin_time is not None and end_time is not None:\n        if begin_time <= occur_time <= end_time:\n            return True\n    elif begin_time is not None and begin_time <= occur_time:\n        return True\n    elif end_time is not None and end_time >= occur_time:\n        return True\n\n    return False\n"
  },
  {
    "path": "delfin/common/config.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n# Copyright 2012 Red Hat, Inc.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Command-line flag library.\n\nEmulates gflags by wrapping cfg.ConfigOpts.\n\nThe idea is to move fully to cfg eventually, and this wrapper is a\nstepping stone.\n\n\"\"\"\nimport socket\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_middleware import cors\nfrom oslo_utils import netutils\n\nfrom delfin.common import constants\n\nLOG = log.getLogger(__name__)\n\nCONF = cfg.CONF\nlog.register_options(CONF)\n\ncore_opts = [\n    cfg.StrOpt('state_path',\n               default='/var/lib/delfin',\n               help=\"Top-level directory for maintaining delfin's state.\"),\n]\n\nCONF.register_cli_opts(core_opts)\n\nglobal_opts = [\n    cfg.HostAddressOpt('my_ip',\n                       default=netutils.get_my_ipv4(),\n                       sample_default='<your_ip>',\n                       help='IP address of this host.'),\n    cfg.HostnameOpt('host',\n                    default=socket.gethostname(),\n                    sample_default='<your_hostname>',\n                    help='Name of this node.  This can be an opaque '\n                         'identifier. It is not necessarily a hostname, '\n                         'FQDN, or IP address.'),\n    cfg.ListOpt('delfin_api_ext_list',\n                default=[],\n                help='Specify list of extensions to load when using '\n                     'delfin_api_extension option with '\n                     'delfin.api.contrib.select_extensions.'),\n    cfg.ListOpt('delfin_api_extension',\n                default=['delfin.api.contrib.standard_extensions'],\n                help='The delfin api extensions to load.'),\n    cfg.BoolOpt('monkey_patch',\n                default=False,\n                help='Whether to log monkey patching.'),\n    cfg.ListOpt('monkey_patch_modules',\n                default=[],\n                help='List of modules or decorators to monkey patch.'),\n    cfg.IntOpt('service_down_time',\n               default=60,\n               help='Maximum time since last check-in for up service.'),\n    cfg.StrOpt('task_manager',\n               default='delfin.task_manager.manager.TaskManager',\n               help='Full class name for the task manager.'),\n    cfg.StrOpt('delfin_task_topic',\n               default='delfin-task',\n               help='The topic task manager nodes listen on.'),\n    cfg.StrOpt('delfin_alert_topic',\n               default='delfin-alert',\n               help='The topic alert manager nodes listen on.'),\n    cfg.StrOpt('alert_manager',\n               default='delfin.alert_manager.trap_receiver.TrapReceiver',\n               help='Full class name for the trap receiver.'),\n    cfg.StrOpt('delfin_cryptor',\n               default='delfin.cryptor._Base64',\n               help='cryptor type'),\n    cfg.IntOpt('sync_task_expiration',\n               default=1800,\n               help='Sync task expiration in seconds.'),\n    cfg.BoolOpt('snmp_validation_enabled',\n                default=True,\n                help='Whether alert source configuration to be validated '\n                     'through snmp connectivity.'),\n]\n\nCONF.register_opts(global_opts)\n\nstorage_driver_opts = [\n    cfg.StrOpt('ca_path',\n               default='',\n               help='\"\": Disable SSL certificate verification, '\n                    '/path/to/file: Use SSL certificate from file location')\n]\n\nCONF.register_opts(storage_driver_opts, group='storage_driver')\n\ntelemetry_opts = [\n    cfg.IntOpt('performance_collection_interval',\n               default=constants.TelemetryCollection\n               .DEF_PERFORMANCE_COLLECTION_INTERVAL,\n               help='default interval (in sec) for performance collection'),\n    cfg.IntOpt('performance_history_on_reschedule',\n               default=constants.TelemetryCollection\n               .DEF_PERFORMANCE_HISTORY_ON_RESCHEDULE,\n               help='default history(in sec) to be collected on a job '\n                    'reschedule'),\n    cfg.IntOpt('performance_timestamp_overlap',\n               default=constants.TelemetryCollection\n               .DEF_PERFORMANCE_TIMESTAMP_OVERLAP,\n               help='default overlap to be added on start_time in sec  '\n               ),\n    cfg.IntOpt('max_failed_task_retry_window',\n               default=constants.TelemetryCollection\n               .MAX_FAILED_TASK_RETRY_WINDOW,\n               help='Maximum time window (in sec) until which delfin supports '\n                    'collection for failed tasks'),\n    cfg.BoolOpt('enable_dynamic_subprocess',\n                default=False,\n                help='Enable dynamic subprocess metrics collection'),\n    cfg.IntOpt('process_cleanup_interval',\n               default=60,\n               help='Background process cleanup call interval in sec'),\n    cfg.IntOpt('task_cleanup_delay',\n               default=10,\n               help='Delay for task cleanup before killing child in sec'),\n    cfg.IntOpt('group_change_detect_interval',\n               default=30,\n               help='Local executor group change detect interval in sec'),\n    cfg.IntOpt('max_storages_in_child',\n               default=5,\n               help='Max storages handled by one local executor process'),\n    cfg.IntOpt('max_childs_in_node',\n               default=100000,\n               help='Max processes that can be spawned before forcing fail'),\n    cfg.IntOpt('node_weight',\n               default=100,\n               help='Weight for the node in the Hash Ring'),\n]\n\nCONF.register_opts(telemetry_opts, \"telemetry\")\n\n\ndef set_middleware_defaults():\n    \"\"\"Update default configuration options for oslo.middleware.\"\"\"\n    cors.set_defaults(\n        allow_headers=['X-Auth-Token',\n                       'X-OpenStack-Request-ID',\n                       'X-Identity-Status',\n                       'X-Roles',\n                       'X-Service-Catalog',\n                       'X-User-Id',\n                       'X-Tenant-Id'],\n        expose_headers=['X-Auth-Token',\n                        'X-OpenStack-Request-ID',\n                        'X-Subject-Token',\n                        'X-Service-Token'],\n        allow_methods=['GET',\n                       'PUT',\n                       'POST',\n                       'DELETE',\n                       'PATCH']\n    )\n"
  },
  {
    "path": "delfin/common/constants.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2016 Red Hat, Inc.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nfrom collections import namedtuple\n\nfrom pysnmp.entity import config\n\n# The maximum value a signed INT type may have\nDB_MAX_INT = 0x7FFFFFFF\n\n# Valid access type supported currently.\nACCESS_TYPE = ['rest', 'ssh', 'cli', 'smis']\n\n\n# Custom fields for Delfin objects\nclass StorageStatus(object):\n    NORMAL = 'normal'\n    OFFLINE = 'offline'\n    ABNORMAL = 'abnormal'\n    DEGRADED = 'degraded'\n    UNKNOWN = 'unknown'\n\n    ALL = (NORMAL, OFFLINE, ABNORMAL, DEGRADED, UNKNOWN)\n\n\nclass StoragePoolStatus(object):\n    NORMAL = 'normal'\n    OFFLINE = 'offline'\n    ABNORMAL = 'abnormal'\n    DEGRADED = 'degraded'\n    UNKNOWN = 'unknown'\n\n    ALL = (NORMAL, OFFLINE, ABNORMAL, DEGRADED, UNKNOWN)\n\n\nclass VolumeStatus(object):\n    AVAILABLE = 'available'\n    ERROR = 'error'\n\n    ALL = (AVAILABLE, ERROR)\n\n\nclass StorageType(object):\n    BLOCK = 'block'\n    FILE = 'file'\n    UNIFIED = 'unified'\n\n    ALL = (BLOCK, FILE, UNIFIED)\n\n\nclass SyncStatus(object):\n    SYNCED = 0\n\n\nclass VolumeType(object):\n    THICK = 'thick'\n    THIN = 'thin'\n\n    ALL = (THICK, THIN)\n\n\nclass PortConnectionStatus(object):\n    CONNECTED = 'connected'\n    DISCONNECTED = 'disconnected'\n    UNKNOWN = 'unknown'\n\n    ALL = (CONNECTED, DISCONNECTED, UNKNOWN)\n\n\nclass PortHealthStatus(object):\n    NORMAL = 'normal'\n    ABNORMAL = 'abnormal'\n    UNKNOWN = 'unknown'\n\n    ALL = (NORMAL, ABNORMAL, UNKNOWN)\n\n\nclass PortType(object):\n    FC = 'fc'\n    ISCSI = 'iscsi'\n    FICON = 'ficon'\n    FCOE = 'fcoe'\n    ETH = 'eth'\n    SAS = 'sas'\n    IB = 'ib'\n    LOGIC = 'logic'\n    CIFS = 'cifs'\n    NFS = 'nfs'\n    FCACHE = 'fcache'\n    COMBO = 'combo'\n    CNA = 'cna'\n    RCIP = 'rcip'\n    NFS_CIFS = 'nfs-cifs'\n    OTHER = 'other'\n\n    ALL = (FC, ISCSI, FICON, FCOE, ETH, SAS, IB, LOGIC,\n           CIFS, NFS, FCACHE, COMBO, CNA, RCIP, NFS_CIFS, OTHER)\n\n\nclass PortLogicalType(object):\n    FRONTEND = 'frontend'\n    BACKEND = 'backend'\n    SERVICE = 'service'\n    MANAGEMENT = 'management'\n    INTERNAL = 'internal'\n    MAINTENANCE = 'maintenance'\n    INTERCONNECT = 'interconnect'\n    CLUSTER = 'cluster'\n    DATA = 'data'\n    NODE_MGMT = 'node-mgmt'\n    INTERCLUSTER = 'intercluster'\n    CLUSTER_MGMT = 'cluster-mgmt'\n    PHYSICAL = 'physical'\n    IF_GROUP = 'if-group'\n    VLAN = 'vlan'\n    OTHER = 'other'\n\n    ALL = (FRONTEND, BACKEND, SERVICE, MANAGEMENT,\n           INTERNAL, MAINTENANCE, INTERCONNECT, CLUSTER, DATA, NODE_MGMT,\n           INTERCLUSTER, CLUSTER_MGMT, PHYSICAL, IF_GROUP, VLAN, OTHER)\n\n\nclass DiskStatus(object):\n    NORMAL = 'normal'\n    ABNORMAL = 'abnormal'\n    DEGRADED = 'degraded'\n    OFFLINE = 'offline'\n\n    ALL = (NORMAL, ABNORMAL, DEGRADED, OFFLINE)\n\n\nclass DiskPhysicalType(object):\n    SATA = 'sata'\n    SAS = 'sas'\n    SSD = 'ssd'\n    NL_SSD = 'nl-ssd'\n    FC = 'fc'\n    LUN = 'lun'\n    ATA = 'ata'\n    FLASH = 'flash'\n    VMDISK = 'vmdisk'\n    NL_SAS = 'nl-sas'\n    SSD_CARD = 'ssd-card'\n    SAS_FLASH_VP = 'sas-flash-vp'\n    HDD = 'hdd'\n    NVME_SSD = 'nvme-ssd'\n    UNKNOWN = 'unknown'\n\n    ALL = (\n        SATA, SAS, SSD, NL_SSD, FC, LUN, ATA, FLASH, VMDISK,\n        NL_SAS, SSD_CARD, SAS_FLASH_VP, HDD, NVME_SSD, UNKNOWN)\n\n\nclass DiskLogicalType(object):\n    FREE = 'free'\n    MEMBER = 'member'\n    HOTSPARE = 'hotspare'\n    CACHE = 'cache'\n    AGGREGATE = 'aggregate'\n    BROKEN = 'broken'\n    FOREIGN = 'foreign'\n    LABELMAINT = 'labelmaint'\n    MAINTENANCE = 'maintenance'\n    SHARED = 'shared'\n    SPARE = 'spare'\n    UNASSIGNED = 'unassigned'\n    UNSUPPORTED = 'unsupported'\n    REMOTE = 'remote'\n    MEDIATOR = 'mediator'\n    DATA = 'data'\n    UNKNOWN = 'unknown'\n\n    ALL = (FREE, MEMBER, HOTSPARE, CACHE, AGGREGATE, BROKEN, FOREIGN,\n           LABELMAINT, MAINTENANCE, SHARED, SPARE, UNASSIGNED, UNSUPPORTED,\n           REMOTE, MEDIATOR, DATA, UNKNOWN)\n\n\nclass FilesystemStatus(object):\n    NORMAL = 'normal'\n    FAULTY = 'faulty'\n\n    ALL = (NORMAL, FAULTY)\n\n\nclass WORMType(object):\n    NON_WORM = 'non_worm'\n    AUDIT_LOG = 'audit_log'\n    COMPLIANCE = 'compliance'\n    ENTERPRISE = 'enterprise'\n\n    ALL = (NON_WORM, AUDIT_LOG, COMPLIANCE, ENTERPRISE)\n\n\nclass NASSecurityMode(object):\n    MIXED = 'mixed'\n    NATIVE = 'native'\n    NTFS = 'ntfs'\n    UNIX = 'unix'\n\n    ALL = (MIXED, NATIVE, NTFS, UNIX)\n\n\nclass QuotaType(object):\n    TREE = 'tree'\n    USER = 'user'\n    GROUP = 'group'\n\n    ALL = (TREE, USER, GROUP)\n\n\nclass FSType(object):\n    THICK = 'thick'\n    THIN = 'thin'\n\n    ALL = (THICK, THIN)\n\n\nclass ShareProtocol(object):\n    CIFS = 'cifs'\n    NFS = 'nfs'\n    FTP = 'ftp'\n    HDFS = 'hdfs'\n\n    ALL = (CIFS, NFS, FTP, HDFS)\n\n\n# Enumerations for alert severity\nclass Severity(object):\n    FATAL = 'Fatal'\n    CRITICAL = 'Critical'\n    MAJOR = 'Major'\n    MINOR = 'Minor'\n    WARNING = 'Warning'\n    INFORMATIONAL = 'Informational'\n    NOT_SPECIFIED = 'NotSpecified'\n\n\n# Enumerations for alert category\nclass Category(object):\n    FAULT = 'Fault'\n    EVENT = 'Event'\n    RECOVERY = 'Recovery'\n    NOT_SPECIFIED = 'NotSpecified'\n\n\n# Enumerations for clear type\nclass ClearType(object):\n    AUTOMATIC = 'Automatic'\n    MANUAL = 'Manual'\n\n\nclass ControllerStatus(object):\n    NORMAL = 'normal'\n    OFFLINE = 'offline'\n    FAULT = 'fault'\n    DEGRADED = 'degraded'\n    UNKNOWN = 'unknown'\n\n    ALL = (NORMAL, OFFLINE, FAULT, DEGRADED, UNKNOWN)\n\n\nclass InitiatorType(object):\n    FC = 'fc'\n    ISCSI = 'iscsi'\n    NVME_OVER_ROCE = 'roce'\n    SAS = 'sas'\n    NVME_OVER_FABRIC = 'nvme-of'\n    UNKNOWN = 'unknown'\n\n    ALL = (FC, ISCSI, NVME_OVER_ROCE, SAS, NVME_OVER_FABRIC, UNKNOWN)\n\n\n# Enumerations for alert type based on X.733 Specification\nclass EventType(object):\n    COMMUNICATIONS_ALARM = 'CommunicationsAlarm'\n    EQUIPMENT_ALARM = 'EquipmentAlarm'\n    PROCESSING_ERROR_ALARM = 'ProcessingErrorAlarm'\n    QUALITY_OF_SERVICE_ALARM = 'QualityOfServiceAlarm'\n    ENVIRONMENTAL_ALARM = 'EnvironmentalAlarm'\n    INTEGRITY_VIOLATION = 'IntegrityViolation'\n    OPERATIONAL_VIOLATION = 'OperationalViolation'\n    PHYSICAL_VIOLATION = 'PhysicalViolation'\n    SECURITY_MECHANISM_VIOLATION = 'SecurityServiceOrMechanismViolation'\n    TIME_DOMAIN_VIOLATION = 'TimeDomainViolation'\n    NOT_SPECIFIED = 'NotSpecified'\n\n\n# Default resource type for alert\nDEFAULT_RESOURCE_TYPE = 'Storage'\n\n# Default port for connecting to alert source\nDEFAULT_SNMP_CONNECT_PORT = 161\n\n# Default retry count for connecting to alert source\nDEFAULT_SNMP_RETRY_NUM = 1\n\n# Default expiration time(in sec) for a alert source connect request\nDEFAULT_SNMP_EXPIRATION_TIME = 2\n\n# OID used for snmp query, Below oid refers to sysDescr\nSNMP_QUERY_OID = '1.3.6.1.2.1.1.1.0'\n\n# Alert id for internal alerts\nSNMP_CONNECTION_FAILED_ALERT_ID = '19660818'\n\n# Maps to convert config values to pysnmp values\nAUTH_PROTOCOL_MAP = {\"hmacsha\": config.usmHMACSHAAuthProtocol,\n                     \"hmacmd5\": config.usmHMACMD5AuthProtocol,\n                     \"hmcsha2224\": config.usmHMAC128SHA224AuthProtocol,\n                     \"hmcsha2256\": config.usmHMAC192SHA256AuthProtocol,\n                     \"hmcsha2384\": config.usmHMAC256SHA384AuthProtocol,\n                     \"hmcsha2512\": config.usmHMAC384SHA512AuthProtocol,\n                     \"none\": \"None\"}\n\nPRIVACY_PROTOCOL_MAP = {\"aes\": config.usmAesCfb128Protocol,\n                        \"des\": config.usmDESPrivProtocol,\n                        \"aes192\": config.usmAesCfb192Protocol,\n                        \"aes256\": config.usmAesCfb256Protocol,\n                        \"3des\": config.usm3DESEDEPrivProtocol,\n                        \"none\": \"None\"}\n\n\n# Enumerations for clear type\nclass SecurityLevel(object):\n    AUTHPRIV = 'authPriv'\n    AUTHNOPRIV = 'authNoPriv'\n    NOAUTHNOPRIV = 'noAuthnoPriv'\n\n\n# Performance collection constants and common models\n# Metric model\nmetric_struct = namedtuple(\"Metric\", \"name labels values\")\n\n\nclass ResourceType(object):\n    STORAGE = 'storage'\n    STORAGE_POOL = 'storagePool'\n    VOLUME = 'volume'\n    CONTROLLER = 'controller'\n    PORT = 'port'\n    DISK = 'disk'\n    FILESYSTEM = 'filesystem'\n    SHARE = 'share'\n\n    ALL = (STORAGE, STORAGE_POOL, VOLUME, CONTROLLER,\n           PORT, DISK, FILESYSTEM, SHARE)\n\n\n# Unified Array metrics model\nDELFIN_ARRAY_METRICS = [\n    \"responseTime\",\n    \"throughput\",\n    \"readThroughput\",\n    \"writeThroughput\",\n    \"requests\",\n    \"readRequests\",\n    \"writeRequests\"\n]\n\nBLOCK_SIZE = 4096\n\n\nclass ResourceSync(object):\n    START = 100\n    SUCCEED = 100\n    FAILED = 101\n\n\nclass TelemetryCollection(object):\n    \"\"\"Performance monitoring task name\"\"\"\n    PERFORMANCE_TASK_METHOD = \"delfin.task_manager.scheduler.schedulers.\" \\\n                              \"telemetry.performance_collection_handler.\" \\\n                              \"PerformanceCollectionHandler\"\n    \"\"\"Failed Performance monitoring job interval\"\"\"\n    FAILED_JOB_SCHEDULE_INTERVAL = 900\n    \"\"\"Failed Performance monitoring retry count\"\"\"\n    MAX_FAILED_JOB_RETRY_COUNT = 5\n    \"\"\"Default performance collection interval\"\"\"\n    DEF_PERFORMANCE_COLLECTION_INTERVAL = 900\n    DEF_PERFORMANCE_HISTORY_ON_RESCHEDULE = 1800\n    DEF_PERFORMANCE_TIMESTAMP_OVERLAP = 60\n    \"\"\"Maximum failed task retry window in seconds\"\"\"\n    MAX_FAILED_TASK_RETRY_WINDOW = 7200\n\n\nclass TelemetryTaskStatus(object):\n    \"\"\"Telemetry task enum\"\"\"\n    TASK_EXEC_STATUS_SUCCESS = True\n    TASK_EXEC_STATUS_FAILURE = False\n\n\nclass TelemetryJobStatus(object):\n    \"\"\"Telemetry jobs enum\"\"\"\n    FAILED_JOB_STATUS_SUCCESS = \"Success\"\n    FAILED_JOB_STATUS_RETRYING = \"Retrying\"\n    FAILED_JOB_STATUS_INIT = \"Initialized\"\n\n\nMetric = namedtuple('Metric', 'name unit description')\n\n\nclass MetricUnit:\n    IOPS = 'IOPS'\n    MBS = 'MB/s'\n    MS = 'ms'\n    KB = 'KB'\n    PERCENTAGE = '%'\n\n    ALL = (IOPS, MBS, MS, KB, PERCENTAGE)\n\n\nclass Metrics:\n    IOPS = Metric(name='iops',\n                  unit=MetricUnit.IOPS,\n                  description='Read/write operations per second')\n    READ_IOPS = Metric(name='readIops',\n                       unit=MetricUnit.IOPS,\n                       description='Read operations per second')\n    WRITE_IOPS = Metric(name='writeIops',\n                        unit=MetricUnit.IOPS,\n                        description='Write operations per second')\n    THROUGHPUT = Metric(name='throughput',\n                        unit=MetricUnit.MBS,\n                        description='Total data transferred per second')\n    READ_THROUGHPUT = Metric(name='readThroughput',\n                             unit=MetricUnit.MBS,\n                             description='Total read data transferred per '\n                                         'second')\n    WRITE_THROUGHPUT = Metric(name='writeThroughput',\n                              unit=MetricUnit.MBS,\n                              description='Total write data transferred per '\n                                          'second')\n    RESPONSE_TIME = Metric(name='responseTime',\n                           unit=MetricUnit.MS,\n                           description='Average time taken for an IO '\n                                       'operation in ms')\n    READ_RESPONSE_TIME = Metric(name='readResponseTime',\n                                unit=MetricUnit.MS,\n                                description='Read average time taken for an '\n                                            'IO operation in ms')\n    WRITE_RESPONSE_TIME = Metric(name='writeResponseTime',\n                                 unit=MetricUnit.MS,\n                                 description='Write average time taken for an '\n                                             'IO operation in ms')\n    IO_SIZE = Metric(name='ioSize',\n                     unit=MetricUnit.KB,\n                     description='The average size of IO requests in KB')\n    READ_IO_SIZE = Metric(name='readIoSize',\n                          unit=MetricUnit.KB,\n                          description='The average size of read IO requests '\n                                      'in KB')\n    WRITE_IO_SIZE = Metric(name='writeIoSize',\n                           unit=MetricUnit.KB,\n                           description='The average size of write IO requests '\n                                       'in KB')\n    CACHE_HIT_RATIO = Metric(name='cacheHitRatio',\n                             unit=MetricUnit.PERCENTAGE,\n                             description='Percentage of ops that are cache '\n                                         'hits')\n    READ_CACHE_HIT_RATIO = Metric(name='readCacheHitRatio',\n                                  unit=MetricUnit.PERCENTAGE,\n                                  description='Percentage of read ops that '\n                                              'are cache hits')\n    WRITE_CACHE_HIT_RATIO = Metric(name='writeCacheHitRatio',\n                                   unit=MetricUnit.PERCENTAGE,\n                                   description='Percentage of write ops that '\n                                               'are cache hits')\n    CPU_USAGE = Metric(name='cpuUsage',\n                       unit=MetricUnit.PERCENTAGE,\n                       description='Percentage of cpu usage.')\n\n\nclass StorageMetric:\n    \"\"\"Storage metrics\"\"\"\n    IOPS = Metrics.IOPS\n    READ_IOPS = Metrics.READ_IOPS\n    WRITE_IOPS = Metrics.WRITE_IOPS\n    THROUGHPUT = Metrics.THROUGHPUT\n    READ_THROUGHPUT = Metrics.READ_THROUGHPUT\n    WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT\n    RESPONSE_TIME = Metrics.RESPONSE_TIME\n    READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME\n    WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME\n    CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO\n    READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO\n    WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO\n    IO_SIZE = Metrics.IO_SIZE\n    READ_IO_SIZE = Metrics.READ_IO_SIZE\n    WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE\n\n\nclass StoragePoolMetric:\n    \"\"\"Storage pool metrics\"\"\"\n    IOPS = Metrics.IOPS\n    READ_IOPS = Metrics.READ_IOPS\n    WRITE_IOPS = Metrics.WRITE_IOPS\n    THROUGHPUT = Metrics.THROUGHPUT\n    READ_THROUGHPUT = Metrics.READ_THROUGHPUT\n    WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT\n    RESPONSE_TIME = Metrics.RESPONSE_TIME\n    READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME\n    WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME\n    CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO\n    READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO\n    WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO\n    IO_SIZE = Metrics.IO_SIZE\n    READ_IO_SIZE = Metrics.READ_IO_SIZE\n    WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE\n\n\nclass VolumeMetric:\n    \"\"\"Volume metrics\"\"\"\n    IOPS = Metrics.IOPS\n    READ_IOPS = Metrics.READ_IOPS\n    WRITE_IOPS = Metrics.WRITE_IOPS\n    THROUGHPUT = Metrics.THROUGHPUT\n    READ_THROUGHPUT = Metrics.READ_THROUGHPUT\n    WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT\n    RESPONSE_TIME = Metrics.RESPONSE_TIME\n    READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME\n    WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME\n    IO_SIZE = Metrics.IO_SIZE\n    READ_IO_SIZE = Metrics.READ_IO_SIZE\n    WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE\n    CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO\n    READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO\n    WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO\n\n\nclass ControllerMetric:\n    \"\"\"Controller metrics\"\"\"\n    IOPS = Metrics.IOPS\n    READ_IOPS = Metrics.READ_IOPS\n    WRITE_IOPS = Metrics.WRITE_IOPS\n    THROUGHPUT = Metrics.THROUGHPUT\n    READ_THROUGHPUT = Metrics.READ_THROUGHPUT\n    WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT\n    RESPONSE_TIME = Metrics.RESPONSE_TIME\n    READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME\n    WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME\n    CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO\n    READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO\n    WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO\n    IO_SIZE = Metrics.IO_SIZE\n    READ_IO_SIZE = Metrics.READ_IO_SIZE\n    WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE\n    CPU_USAGE = Metrics.CPU_USAGE\n\n\nclass PortMetric:\n    \"\"\"Port metrics\"\"\"\n    IOPS = Metrics.IOPS\n    READ_IOPS = Metrics.READ_IOPS\n    WRITE_IOPS = Metrics.WRITE_IOPS\n    THROUGHPUT = Metrics.THROUGHPUT\n    READ_THROUGHPUT = Metrics.READ_THROUGHPUT\n    WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT\n    RESPONSE_TIME = Metrics.RESPONSE_TIME\n    READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME\n    WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME\n    CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO\n    READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO\n    WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO\n    IO_SIZE = Metrics.IO_SIZE\n    READ_IO_SIZE = Metrics.READ_IO_SIZE\n    WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE\n\n\nclass DiskMetric:\n    \"\"\"Disk metrics\"\"\"\n    IOPS = Metrics.IOPS\n    READ_IOPS = Metrics.READ_IOPS\n    WRITE_IOPS = Metrics.WRITE_IOPS\n    THROUGHPUT = Metrics.THROUGHPUT\n    READ_THROUGHPUT = Metrics.READ_THROUGHPUT\n    WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT\n    RESPONSE_TIME = Metrics.RESPONSE_TIME\n    READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME\n    WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME\n    CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO\n    READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO\n    WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO\n\n\nclass FileSystemMetric:\n    \"\"\"File system metrics\"\"\"\n    IOPS = Metrics.IOPS\n    READ_IOPS = Metrics.READ_IOPS\n    WRITE_IOPS = Metrics.WRITE_IOPS\n    THROUGHPUT = Metrics.THROUGHPUT\n    READ_THROUGHPUT = Metrics.READ_THROUGHPUT\n    WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT\n    READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME\n    WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME\n    IO_SIZE = Metrics.IO_SIZE\n    READ_IO_SIZE = Metrics.READ_IO_SIZE\n    WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE\n\n\nSNMP_SUPPORTED_MODELS = ('vsp', '3par', 'cmode', 'msa', 'hnas')\n\n\nclass HostStatus(object):\n    NORMAL = 'normal'\n    OFFLINE = 'offline'\n    ABNORMAL = 'abnormal'\n    DEGRADED = 'degraded'\n\n    ALL = (NORMAL, OFFLINE, ABNORMAL, DEGRADED)\n\n\nclass HostOSTypes(object):\n    LINUX = 'Linux'\n    WINDOWS = 'Windows'\n    SOLARIS = 'Solaris'\n    HP_UX = 'HP-UX'\n    AIX = 'AIX'\n    XEN_SERVER = 'XenServer'\n    VMWARE_ESX = 'VMware ESX'\n    LINUX_VIS = 'LINUX_VIS'\n    WINDOWS_SERVER_2012 = 'Windows Server 2012'\n    ORACLE_VM = 'Oracle VM'\n    OPEN_VMS = 'Open VMS'\n    MAC_OS = 'Mac OS'\n    UNKNOWN = 'Unknown'\n\n    ALL = (LINUX, WINDOWS, SOLARIS, HP_UX, AIX, XEN_SERVER, VMWARE_ESX,\n           LINUX_VIS, WINDOWS_SERVER_2012, ORACLE_VM, OPEN_VMS, MAC_OS,\n           UNKNOWN)\n\n\nclass InitiatorStatus(object):\n    ONLINE = 'online'\n    OFFLINE = 'offline'\n    UNKNOWN = 'unknown'\n\n    ALL = (ONLINE, OFFLINE, UNKNOWN)\n"
  },
  {
    "path": "delfin/common/sqlalchemyutils.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright 2010-2011 OpenStack Foundation\n# Copyright 2012 Justin Santa Barbara\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Implementation of paginate query.\"\"\"\nimport datetime\n\nfrom oslo_log import log as logging\nfrom six.moves import range\nimport sqlalchemy\nimport sqlalchemy.sql as sa_sql\nfrom sqlalchemy.sql import type_api\n\nfrom delfin.db import api\nfrom delfin import exception\nfrom delfin.i18n import _\n\n\nLOG = logging.getLogger(__name__)\n\n_TYPE_SCHEMA = {\n    'datetime': datetime.datetime(1900, 1, 1),\n    'big_integer': 0,\n    'integer': 0,\n    'string': ''\n}\n\n\ndef _get_default_column_value(model, column_name):\n    \"\"\"Return the default value of the columns from DB table.\n\n    In postgreDB case, if no right default values are being set, an\n    psycopg2.DataError will be thrown.\n    \"\"\"\n    attr = getattr(model, column_name)\n    # Return the default value directly if the model contains. Otherwise return\n    # a default value which is not None.\n    if attr.default and isinstance(attr.default, type_api.TypeEngine):\n        return attr.default.arg\n\n    attr_type = attr.type\n    return _TYPE_SCHEMA[attr_type.__visit_name__]\n\n\n# TODO(wangxiyuan): Use oslo_db.sqlalchemy.utils.paginate_query once it is\n# stable and afforded by the minimum version in requirement.txt.\n# copied from glance/db/sqlalchemy/api.py\ndef paginate_query(query, model, limit, sort_keys, marker=None,\n                   sort_dir=None, sort_dirs=None, offset=None):\n    \"\"\"Returns a query with sorting / pagination criteria added.\n\n    Pagination works by requiring a unique sort_key, specified by sort_keys.\n    (If sort_keys is not unique, then we risk looping through values.)\n    We use the last row in the previous page as the 'marker' for pagination.\n    So we must return values that follow the passed marker in the order.\n    With a single-valued sort_key, this would be easy: sort_key > X.\n    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat\n    the lexicographical ordering:\n    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)\n\n    We also have to cope with different sort_directions.\n\n    Typically, the id of the last row is used as the client-facing pagination\n    marker, then the actual marker object must be fetched from the db and\n    passed in to us as marker.\n\n    :param query: the query object to which we should add paging/sorting\n    :param model: the ORM model class\n    :param limit: maximum number of items to return\n    :param sort_keys: array of attributes by which results should be sorted\n    :param marker: the last item of the previous page; we returns the next\n                    results after this value.\n    :param sort_dir: direction in which results should be sorted (asc, desc)\n    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys\n    :param offset: the number of items to skip from the marker or from the\n                    first element.\n\n    :rtype: sqlalchemy.orm.query.Query\n    :return: The query with sorting/pagination added.\n    \"\"\"\n\n    if sort_dir and sort_dirs:\n        raise AssertionError('Both sort_dir and sort_dirs specified.')\n\n    # Default the sort direction to ascending\n    if sort_dirs is None and sort_dir is None:\n        sort_dir = 'asc'\n\n    # Ensure a per-column sort direction\n    if sort_dirs is None:\n        sort_dirs = [sort_dir for _sort_key in sort_keys]\n\n    if len(sort_dirs) != len(sort_keys):\n        raise AssertionError(\n            'sort_dirs length is not equal to sort_keys length.')\n\n    # Add sorting\n    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):\n        sort_dir_func = {\n            'asc': sqlalchemy.asc,\n            'desc': sqlalchemy.desc,\n        }[current_sort_dir]\n\n        try:\n            sort_key_attr = getattr(model, current_sort_key)\n        except AttributeError:\n            raise exception.InvalidInput('Invalid sort key')\n        if not api.is_orm_value(sort_key_attr):\n            raise exception.InvalidInput('Invalid sort key')\n        query = query.order_by(sort_dir_func(sort_key_attr))\n\n    # Add pagination\n    if marker is not None:\n        marker_values = []\n        for sort_key in sort_keys:\n            v = getattr(marker, sort_key)\n            if v is None:\n                v = _get_default_column_value(model, sort_key)\n            marker_values.append(v)\n\n        # Build up an array of sort criteria as in the docstring\n        criteria_list = []\n        for i in range(0, len(sort_keys)):\n            crit_attrs = []\n            for j in range(0, i):\n                model_attr = getattr(model, sort_keys[j])\n                default = _get_default_column_value(model, sort_keys[j])\n                attr = sa_sql.expression.case([(model_attr.isnot(None),\n                                                model_attr), ],\n                                              else_=default)\n                crit_attrs.append((attr == marker_values[j]))\n\n            model_attr = getattr(model, sort_keys[i])\n            default = _get_default_column_value(model, sort_keys[i])\n            attr = sa_sql.expression.case([(model_attr.isnot(None),\n                                            model_attr), ],\n                                          else_=default)\n            if sort_dirs[i] == 'desc':\n                crit_attrs.append((attr < marker_values[i]))\n            elif sort_dirs[i] == 'asc':\n                crit_attrs.append((attr > marker_values[i]))\n            else:\n                raise ValueError(_(\"Unknown sort direction, \"\n                                   \"must be 'desc' or 'asc'\"))\n\n            criteria = sqlalchemy.sql.and_(*crit_attrs)\n            criteria_list.append(criteria)\n\n        f = sqlalchemy.sql.or_(*criteria_list)\n        query = query.filter(f)\n\n    if limit is not None:\n        query = query.limit(limit)\n\n    if offset:\n        query = query.offset(offset)\n\n    return query\n"
  },
  {
    "path": "delfin/context.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2011 OpenStack LLC.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"RequestContext: context for requests that persist through all of delfin.\"\"\"\n\nimport copy\n\nfrom oslo_context import context\nfrom oslo_utils import timeutils\nimport six\n\nfrom delfin.i18n import _\n\n\nclass RequestContext(context.RequestContext):\n    \"\"\"Security context and request information.\n\n    Represents the user taking a given action within the system.\n\n    \"\"\"\n\n    def __init__(self, user_id=None, project_id=None, is_admin=None,\n                 read_deleted=\"no\", roles=None, remote_address=None,\n                 timestamp=None, request_id=None, auth_token=None,\n                 overwrite=True, quota_class=None,\n                 service_catalog=None, **kwargs):\n        \"\"\"Initialize RequestContext.\n\n        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'\n            indicates deleted records are visible, 'only' indicates that\n            *only* deleted records are visible.\n\n        :param overwrite: Set to False to ensure that the greenthread local\n            copy of the index is not overwritten.\n\n        :param kwargs: Extra arguments that might be present, but we ignore\n            because they possibly came in from older rpc messages.\n        \"\"\"\n\n        user = kwargs.pop('user', None)\n        tenant = kwargs.pop('tenant', None)\n        super(RequestContext, self).__init__(\n            auth_token=auth_token,\n            user=user_id or user,\n            domain=kwargs.pop('domain', None),\n            user_domain=kwargs.pop('user_domain', None),\n            project_domain=kwargs.pop('project_domain', None),\n            is_admin=is_admin,\n            read_only=kwargs.pop('read_only', False),\n            show_deleted=kwargs.pop('show_deleted', False),\n            request_id=request_id,\n            resource_uuid=kwargs.pop('resource_uuid', None),\n            overwrite=overwrite,\n            roles=roles)\n\n        self.user_id = self.user\n        self.tenant = project_id or tenant\n        self.project_id = self.tenant\n        self.storage_id = None\n\n        self.read_deleted = read_deleted\n        self.remote_address = remote_address\n        if not timestamp:\n            timestamp = timeutils.utcnow()\n        if isinstance(timestamp, six.string_types):\n            timestamp = timeutils.parse_strtime(timestamp)\n        self.timestamp = timestamp\n        if service_catalog:\n            self.service_catalog = [s for s in service_catalog\n                                    if s.get('type') in ('compute', 'volume')]\n        else:\n            self.service_catalog = []\n\n        self.quota_class = quota_class\n\n    def _get_read_deleted(self):\n        return self._read_deleted\n\n    def _set_read_deleted(self, read_deleted):\n        if read_deleted not in ('no', 'yes', 'only'):\n            raise ValueError(_(\"read_deleted can only be one of 'no', \"\n                               \"'yes' or 'only', not %r\") % read_deleted)\n        self._read_deleted = read_deleted\n\n    def _del_read_deleted(self):\n        del self._read_deleted\n\n    read_deleted = property(_get_read_deleted, _set_read_deleted,\n                            _del_read_deleted)\n\n    def to_dict(self):\n        values = super(RequestContext, self).to_dict()\n        values.update({\n            'user_id': getattr(self, 'user_id', None),\n            'project_id': getattr(self, 'project_id', None),\n            'storage_id': getattr(self, 'storage_id', None),\n            'read_deleted': getattr(self, 'read_deleted', None),\n            'remote_address': getattr(self, 'remote_address', None),\n            'timestamp': self.timestamp.isoformat() if hasattr(\n                self, 'timestamp') else None,\n            'quota_class': getattr(self, 'quota_class', None),\n            'service_catalog': getattr(self, 'service_catalog', None)})\n        return values\n\n    @classmethod\n    def from_dict(cls, values):\n        return cls(**values)\n\n    def elevated(self, read_deleted=None, overwrite=False):\n        \"\"\"Return a version of this context with admin flag set.\"\"\"\n        ctx = copy.deepcopy(self)\n        ctx.is_admin = True\n\n        if 'admin' not in ctx.roles:\n            ctx.roles.append('admin')\n\n        if read_deleted is not None:\n            ctx.read_deleted = read_deleted\n\n        return ctx\n\n\ndef get_admin_context(read_deleted=\"no\"):\n    return RequestContext(user_id=None,\n                          project_id=None,\n                          is_admin=True,\n                          read_deleted=read_deleted,\n                          overwrite=False)\n"
  },
  {
    "path": "delfin/coordination.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Tooz Coordination and locking utilities.\"\"\"\n\nimport inspect\n\nimport decorator\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_utils import uuidutils\nimport six\nfrom tooz import coordination\nfrom tooz import locking\nfrom tooz import partitioner\n\nfrom delfin import cryptor\nfrom delfin import exception\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\ncoordination_opts = [\n    cfg.StrOpt('backend_type',\n               default='redis',\n               help='The backend type for distributed coordination.'\n                    'Backend could be redis, mysql, zookeeper and so on.'\n                    'For more supported backend, please check Tooz'),\n    cfg.StrOpt('backend_user',\n               default='',\n               help='The backend user for distributed coordination.'),\n    cfg.StrOpt('backend_password',\n               help='The backend password to use '\n                    'for distributed coordination.'),\n    cfg.StrOpt('backend_server',\n               default='127.0.0.1:6379',\n               help='The backend server for distributed coordination.'),\n    cfg.IntOpt('expiration',\n               default=100,\n               help='The expiration(in second) of the lock.'),\n    cfg.IntOpt('lease_timeout',\n               default=15,\n               help='The expiration(in second) of the lock.'),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(coordination_opts, group='coordination')\n\n\nclass Coordinator(object):\n    \"\"\"Tooz coordination wrapper.\n\n    Coordination member id is created from concatenated `prefix` and\n    `agent_id` parameters.\n\n    :param str agent_id: Agent identifier\n    :param str prefix: Used to provide member identifier with a\n    meaningful prefix.\n    \"\"\"\n\n    def __init__(self, agent_id=None, prefix=''):\n        self.coordinator = None\n        self.agent_id = agent_id or uuidutils.generate_uuid()\n        self.started = False\n        self.prefix = prefix\n\n    def start(self):\n        \"\"\"Connect to coordination back end.\"\"\"\n        if self.started:\n            return\n\n        # NOTE(gouthamr): Tooz expects member_id as a byte string.\n        member_id = (self.prefix + self.agent_id).encode('ascii')\n\n        LOG.info('Started Coordinator (Agent ID: %(agent)s, prefix: '\n                 '%(prefix)s)', {'agent': self.agent_id,\n                                 'prefix': self.prefix})\n\n        backend_url = _get_redis_backend_url()\n        self.coordinator = coordination.get_coordinator(\n            backend_url, member_id,\n            timeout=CONF.coordination.expiration)\n        self.coordinator.start(start_heart=True)\n        self.started = True\n\n    def stop(self):\n        \"\"\"Disconnect from coordination back end.\"\"\"\n        msg = 'Stopped Coordinator (Agent ID: %(agent)s, prefix: %(prefix)s)'\n        msg_args = {'agent': self.agent_id, 'prefix': self.prefix}\n        if self.started:\n            self.coordinator.stop()\n            self.coordinator = None\n            self.started = False\n\n        LOG.info(msg, msg_args)\n\n    def get_lock(self, name):\n        \"\"\"Return a Tooz back end lock.\n\n        :param str name: The lock name that is used to identify it\n            across all nodes.\n        \"\"\"\n        # NOTE(gouthamr): Tooz expects lock name as a byte string\n        lock_name = (self.prefix + name).encode('ascii')\n        if self.started:\n            return self.coordinator.get_lock(lock_name)\n        else:\n            raise exception.LockCreationFailed(_('Coordinator uninitialized.'))\n\n\nLOCK_COORDINATOR = Coordinator(prefix='delfin-')\n\n\nclass LeaderElectionCoordinator(Coordinator):\n\n    def __init__(self, agent_id=None):\n        super(LeaderElectionCoordinator, self). \\\n            __init__(agent_id=agent_id, prefix=\"leader_election\")\n        self.group = None\n\n    def start(self):\n        \"\"\"Connect to coordination back end.\"\"\"\n        if self.started:\n            return\n\n        # NOTE(gouthamr): Tooz expects member_id as a byte string.\n        member_id = (self.prefix + \"-\" + self.agent_id).encode('ascii')\n        LOG.info('Started Coordinator (Agent ID: %(agent)s, '\n                 'prefix: %(prefix)s)', {'agent': self.agent_id,\n                                         'prefix': self.prefix})\n\n        backend_url = _get_redis_backend_url()\n        self.coordinator = coordination.get_coordinator(\n            backend_url, member_id,\n            timeout=CONF.coordination.lease_timeout)\n        self.coordinator.start()\n        self.started = True\n\n    def ensure_group(self, group):\n        req = self.coordinator.get_groups()\n        groups = req.get()\n        try:\n            # Check if group exist\n            groups.index(group)\n        except Exception:\n            # Create a group if not exist\n            LOG.debug(\"Exception is expected as requested group not available \"\n                      \"in tooz backend. Creating the group\")\n            request = self.coordinator.create_group(group)\n            request.get()\n        else:\n            LOG.info(\"Leader group already exist\")\n\n        self.group = group\n\n    def join_group(self):\n        if self.group:\n            request = self.coordinator.join_group(self.group)\n            request.get()\n\n    def register_on_start_leading_callback(self, callback):\n        return self.coordinator.watch_elected_as_leader(self.group, callback)\n\n    def send_heartbeat(self):\n        return self.coordinator.heartbeat()\n\n    def start_leader_watch(self):\n        return self.coordinator.run_watchers()\n\n    def stop(self):\n        \"\"\"Disconnect from coordination back end.\"\"\"\n        if self.started:\n            self.coordinator.stop()\n            self.coordinator = None\n            self.started = False\n\n        LOG.info('Stopped Coordinator (Agent ID: %(agent)s',\n                 {'agent': self.agent_id})\n\n    def is_still_leader(self):\n        for acquired_lock in self.coordinator._acquired_locks:\n            return acquired_lock.is_still_owner()\n        return False\n\n\nclass Lock(locking.Lock):\n    \"\"\"Lock with dynamic name.\n\n    :param str lock_name: Lock name.\n    :param dict lock_data: Data for lock name formatting.\n    :param coordinator: Coordinator object to use when creating lock.\n        Defaults to the global coordinator.\n\n    Using it like so::\n\n        with Lock('mylock'):\n           ...\n\n    ensures that only one process at a time will execute code in context.\n    Lock name can be formatted using Python format string syntax::\n\n        Lock('foo-{share.id}, {'share': ...,}')\n\n    Available field names are keys of lock_data.\n    \"\"\"\n\n    def __init__(self, lock_name, lock_data=None, coordinator=None):\n        super(Lock, self).__init__(six.text_type(id(self)))\n        lock_data = lock_data or {}\n        self.coordinator = coordinator or LOCK_COORDINATOR\n        self.blocking = True\n        self.lock = self._prepare_lock(lock_name, lock_data)\n\n    def _prepare_lock(self, lock_name, lock_data):\n        if not isinstance(lock_name, six.string_types):\n            raise ValueError(_('Not a valid string: %s') % lock_name)\n        return self.coordinator.get_lock(lock_name.format(**lock_data))\n\n    def acquire(self, blocking=None):\n        \"\"\"Attempts to acquire lock.\n\n        :param blocking: If True, blocks until the lock is acquired. If False,\n            returns right away. Otherwise, the value is used as a timeout\n            value and the call returns maximum after this number of seconds.\n        :return: returns true if acquired (false if not)\n        :rtype: bool\n        \"\"\"\n        blocking = self.blocking if blocking is None else blocking\n        return self.lock.acquire(blocking=blocking)\n\n    def release(self):\n        \"\"\"Attempts to release lock.\n\n        The behavior of releasing a lock which was not acquired in the first\n        place is undefined.\n        \"\"\"\n        self.lock.release()\n\n\ndef synchronized(lock_name, blocking=True, coordinator=None):\n    \"\"\"Synchronization decorator.\n\n    :param str lock_name: Lock name.\n    :param blocking: If True, blocks until the lock is acquired.\n            If False, raises exception when not acquired. Otherwise,\n            the value is used as a timeout value and if lock is not acquired\n            after this number of seconds exception is raised.\n    :param coordinator: Coordinator object to use when creating lock.\n        Defaults to the global coordinator.\n    :raises tooz.coordination.LockAcquireFailed: if lock is not acquired\n\n    Decorating a method like so::\n\n        @synchronized('mylock')\n        def foo(self, *args):\n           ...\n\n    ensures that only one process will execute the foo method at a time.\n\n    Different methods can share the same lock::\n\n        @synchronized('mylock')\n        def foo(self, *args):\n           ...\n\n        @synchronized('mylock')\n        def bar(self, *args):\n           ...\n\n    This way only one of either foo or bar can be executing at a time.\n\n    Lock name can be formatted using Python format string syntax::\n\n        @synchronized('{f_name}-{shr.id}-{snap[name]}')\n        def foo(self, shr, snap):\n           ...\n\n    Available field names are: decorated function parameters and\n    `f_name` as a decorated function name.\n    \"\"\"\n\n    @decorator.decorator\n    def _synchronized(f, *a, **k):\n        call_args = inspect.getcallargs(f, *a, **k)\n        call_args['f_name'] = f.__name__\n        lock = Lock(lock_name, call_args, coordinator)\n        with lock(blocking):\n            LOG.info('Lock \"%(name)s\" acquired by \"%(function)s\".',\n                     {'name': lock_name, 'function': f.__name__})\n            return f(*a, **k)\n\n    return _synchronized\n\n\ndef _get_redis_backend_url():\n    cipher_password = getattr(CONF.coordination, 'backend_password', None)\n    if cipher_password is not None:\n        # If password is needed, the password should be\n        # set in config file with cipher text\n        # And in this scenario, these are also needed for backend:\n        # {backend_type}://[{user}]:{password}@{ip}:{port}.\n        plaintext_password = cryptor.decode(cipher_password)\n        # User could be null\n        backend_url = '{backend_type}://{user}:{password}@{server}' \\\n            .format(backend_type=CONF.coordination.backend_type,\n                    user=CONF.coordination.backend_user,\n                    password=plaintext_password,\n                    server=CONF.coordination.backend_server)\n\n    else:\n        backend_url = '{backend_type}://{server}' \\\n            .format(backend_type=CONF.coordination.backend_type,\n                    server=CONF.coordination.backend_server)\n    return backend_url\n\n\nclass ConsistentHashing(Coordinator):\n    GROUP_NAME = 'partitioner_group'\n    PARTITIONS = 2**5\n\n    def __init__(self):\n        super(ConsistentHashing, self). \\\n            __init__(agent_id=CONF.host, prefix=\"\")\n\n    def join_group(self):\n        try:\n            weight = CONF.telemetry.node_weight\n            self.coordinator.join_partitioned_group(self.GROUP_NAME,\n                                                    weight=weight,\n                                                    partitions=self.PARTITIONS)\n        except coordination.MemberAlreadyExist:\n            LOG.info('Member %s already in partitioner_group' % CONF.host)\n\n    def get_task_executor(self, task_id):\n        part = partitioner.Partitioner(self.coordinator, self.GROUP_NAME)\n        members = part.members_for_object(task_id)\n        for member in members:\n            LOG.info('For task id %s, host should be %s' % (task_id, member))\n            return member.decode('utf-8')\n\n    def register_watcher_func(self, on_node_join, on_node_leave):\n        self.coordinator.watch_join_group(self.GROUP_NAME, on_node_join)\n        self.coordinator.watch_leave_group(self.GROUP_NAME, on_node_leave)\n\n    def watch_group_change(self):\n        self.coordinator.run_watchers()\n\n\nclass GroupMembership(Coordinator):\n\n    def __init__(self, agent_id):\n        super(GroupMembership, self). \\\n            __init__(agent_id=agent_id, prefix=\"\")\n\n    def create_group(self, group):\n        try:\n            self.coordinator.create_group(group.encode()).get()\n        except coordination.GroupAlreadyExist:\n            LOG.info(\"Group {0} already exist\".format(group))\n\n    def delete_group(self, group):\n        try:\n            self.coordinator.delete_group(group.encode()).get()\n        except coordination.GroupNotCreated:\n            LOG.info(\"Group {0} not created\".format(group))\n        except coordination.GroupNotEmpty:\n            LOG.info(\"Group {0} not empty\".format(group))\n        except coordination.ToozError:\n            LOG.info(\"Group {0} internal error while delete\".format(group))\n\n    def join_group(self, group):\n        try:\n            self.coordinator.join_group(group.encode()).get()\n        except coordination.MemberAlreadyExist:\n            LOG.info('Member %s already in group' % group)\n\n    def leave_group(self, group):\n        try:\n            self.coordinator.leave_group(group.encode()).get()\n        except coordination.GroupNotCreated:\n            LOG.info('Group %s not created' % group)\n\n    def get_members(self, group):\n        try:\n            return self.coordinator.get_members(group.encode()).get()\n        except coordination.GroupNotCreated:\n            LOG.info('Group %s not created' % group)\n\n        return None\n\n    def register_watcher_func(self, group, on_process_join, on_process_leave):\n        self.coordinator.watch_join_group(group.encode(), on_process_join)\n        self.coordinator.watch_leave_group(group.encode(), on_process_leave)\n\n    def watch_group_change(self):\n        self.coordinator.run_watchers()\n"
  },
  {
    "path": "delfin/cryptor.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nfrom abc import ABCMeta, abstractmethod\n\nfrom oslo_config import cfg\nfrom oslo_utils import importutils\n\nCONF = cfg.CONF\n\n\nclass ICryptor(metaclass=ABCMeta):\n\n    @staticmethod\n    @abstractmethod\n    def encode(plain_text):\n        pass\n\n    @staticmethod\n    @abstractmethod\n    def decode(cipher_text):\n        pass\n\n\nclass _Base64(ICryptor):\n\n    @staticmethod\n    def encode(data):\n        \"\"\"Base64 encode\n\n        :param data: The plain text that need to be encode\n        :type str:\n        :return cipher data: The encoded cipher text\n        :type str:\n        \"\"\"\n        return base64.b64encode(data.encode()).decode('utf-8')\n\n    @staticmethod\n    def decode(data):\n        \"\"\"Base64 decode\n\n        :param data: The cipher text that need to be decode\n        :type str:\n        :return plain data: The decoded plain text\n        :type str:\n        \"\"\"\n        return base64.b64decode(data).decode('utf-8')\n\n\n_cryptor = importutils.import_class(CONF.delfin_cryptor)\n\n\ndef encode(plain_text):\n    return _cryptor.encode(plain_text)\n\n\ndef decode(cipher_text):\n    return _cryptor.decode(cipher_text)\n"
  },
  {
    "path": "delfin/db/__init__.py",
    "content": "# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\"\"\"\nDB abstraction for Delfin\n\"\"\"\nfrom delfin.db.api import *  # noqa\n"
  },
  {
    "path": "delfin/db/api.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Defines interface for DB access.\n\nThe underlying driver is loaded as a :class:`LazyPluggable`.\n\nFunctions in this module are imported into the delfin.db namespace. Call these\nfunctions from delfin.db namespace, not the delfin.db.api namespace.\n\nAll functions in this module return objects that implement a dictionary-like\ninterface. Currently, many of these objects are sqlalchemy objects that\nimplement a dictionary interface. However, a future goal is to have all of\nthese objects be simple dictionaries.\n\n\n**Related Flags**\n\n:backend:  string to lookup in the list of LazyPluggable backends.\n           `sqlalchemy` is the only supported backend right now.\n\n:connection:  string specifying the sqlalchemy connection to use, like:\n              `sqlite:///var/lib/delfin/delfin.sqlite`.\n\n:enable_new_services:  when adding a new service to the database, is it in the\n                       storage_pool of available hardware (Default: True)\n\n\"\"\"\nfrom oslo_config import cfg\nfrom oslo_db import api as db_api\n\ndb_opts = [\n    cfg.StrOpt('db_backend',\n               default='sqlalchemy',\n               help='The backend to use for database.'),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(db_opts, \"database\")\n\n_BACKEND_MAPPING = {'sqlalchemy': 'delfin.db.sqlalchemy.api'}\nIMPL = db_api.DBAPI(CONF.database.db_backend, backend_mapping=_BACKEND_MAPPING,\n                    lazy=True)\n\n\ndef register_db():\n    \"\"\"Create database and tables.\"\"\"\n    IMPL.register_db()\n\n\ndef storage_get(context, storage_id):\n    \"\"\"Retrieve a storage device.\"\"\"\n    return IMPL.storage_get(context, storage_id)\n\n\ndef storage_get_all(context, marker=None, limit=None, sort_keys=None,\n                    sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all storage devices.\n\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of storage\n    \"\"\"\n    return IMPL.storage_get_all(context, marker, limit, sort_keys, sort_dirs,\n                                filters, offset)\n\n\ndef storage_create(context, values):\n    \"\"\"Add a storage device from the values dictionary.\"\"\"\n    return IMPL.storage_create(context, values)\n\n\ndef storage_update(context, storage_id, values):\n    \"\"\"Update a storage device with the values dictionary.\"\"\"\n    return IMPL.storage_update(context, storage_id, values)\n\n\ndef storage_delete(context, storage_id):\n    \"\"\"Delete a storage device.\"\"\"\n    return IMPL.storage_delete(context, storage_id)\n\n\ndef volume_create(context, values):\n    \"\"\"Create a volume from the values dictionary.\"\"\"\n    return IMPL.volume_create(context, values)\n\n\ndef volumes_create(context, values):\n    \"\"\"Create multiple volumes.\"\"\"\n    return IMPL.volumes_create(context, values)\n\n\ndef volume_update(context, volume_id, values):\n    \"\"\"Update a volume with the values dictionary.\"\"\"\n    return IMPL.volume_update(context, volume_id, values)\n\n\ndef volumes_update(context, values):\n    \"\"\"Update multiple volumes.\"\"\"\n    return IMPL.volumes_update(context, values)\n\n\ndef volumes_delete(context, values):\n    \"\"\"Delete multiple volumes.\"\"\"\n    return IMPL.volumes_delete(context, values)\n\n\ndef volume_get(context, volume_id):\n    \"\"\"Get a volume or raise an exception if it does not exist.\"\"\"\n    return IMPL.volume_get(context, volume_id)\n\n\ndef volume_get_all(context, marker=None, limit=None, sort_keys=None,\n                   sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all volumes.\n\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of volumes\n    \"\"\"\n    return IMPL.volume_get_all(context, marker, limit, sort_keys,\n                               sort_dirs, filters, offset)\n\n\ndef volume_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the volumes of a device.\"\"\"\n    return IMPL.volume_delete_by_storage(context, storage_id)\n\n\ndef storage_pool_create(context, storage_pool):\n    \"\"\"Add a storage_storage_pool.\"\"\"\n    return IMPL.storage_pool_create(context, storage_pool)\n\n\ndef storage_pools_create(context, storage_pools):\n    \"\"\"Add multiple storage_pools.\"\"\"\n    return IMPL.storage_pools_create(context, storage_pools)\n\n\ndef storage_pool_update(context, storage_pool_id, storage_pool):\n    \"\"\"Update a storage_pool.\"\"\"\n    return IMPL.storage_pool_update(context, storage_pool_id, storage_pool)\n\n\ndef storage_pools_update(context, storage_pools):\n    \"\"\"Update multiple storage_pools\"\"\"\n    return IMPL.storage_pools_update(context, storage_pools)\n\n\ndef storage_pools_delete(context, storage_pools):\n    \"\"\"Delete storage_pools.\"\"\"\n    return IMPL.storage_pools_delete(context, storage_pools)\n\n\ndef storage_pool_get(context, storage_pool_id):\n    \"\"\"Get a storage_pool or raise an exception if it does not exist.\"\"\"\n    return IMPL.storage_pool_get(context, storage_pool_id)\n\n\ndef storage_pool_get_all(context, marker=None, limit=None, sort_keys=None,\n                         sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all  storage_pools.\n\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of  storage_pools\n    \"\"\"\n    return IMPL.storage_pool_get_all(context, marker, limit,\n                                     sort_keys, sort_dirs, filters, offset)\n\n\ndef storage_pool_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage_pool of a device.\"\"\"\n    return IMPL.storage_pool_delete_by_storage(context, storage_id)\n\n\ndef controllers_create(context, values):\n    \"\"\"Create multiple controllers.\"\"\"\n    return IMPL.controllers_create(context, values)\n\n\ndef controllers_update(context, values):\n    \"\"\"Update multiple controllers.\"\"\"\n    return IMPL.controllers_update(context, values)\n\n\ndef controllers_delete(context, values):\n    \"\"\"Delete multiple controllers.\"\"\"\n    return IMPL.controllers_delete(context, values)\n\n\ndef controller_create(context, values):\n    \"\"\"Create a controller from the values dictionary.\"\"\"\n    return IMPL.controller_create(context, values)\n\n\ndef controller_update(context, controller_id, values):\n    \"\"\"Update a controller with the values dictionary.\"\"\"\n    return IMPL.controller_update(context, controller_id, values)\n\n\ndef controller_get(context, controller_id):\n    \"\"\"Get a controller or raise an exception if it does not exist.\"\"\"\n    return IMPL.controller_get(context, controller_id)\n\n\ndef controller_delete_by_storage(context, storage_id):\n    \"\"\"Delete a controller or raise an exception if it does not exist.\"\"\"\n    return IMPL.controller_delete_by_storage(context, storage_id)\n\n\ndef controller_get_all(context, marker=None, limit=None, sort_keys=None,\n                       sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all controllers.\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of controllers\n    \"\"\"\n    return IMPL.controller_get_all(context, marker, limit, sort_keys,\n                                   sort_dirs, filters, offset)\n\n\ndef ports_create(context, values):\n    \"\"\"Create multiple ports.\"\"\"\n    return IMPL.ports_create(context, values)\n\n\ndef ports_update(context, values):\n    \"\"\"Update multiple ports.\"\"\"\n    return IMPL.ports_update(context, values)\n\n\ndef ports_delete(context, values):\n    \"\"\"Delete multiple ports.\"\"\"\n    return IMPL.ports_delete(context, values)\n\n\ndef port_create(context, values):\n    \"\"\"Create a port from the values dictionary.\"\"\"\n    return IMPL.port_create(context, values)\n\n\ndef port_update(context, port_id, values):\n    \"\"\"Update a port with the values dictionary.\"\"\"\n    return IMPL.port_update(context, port_id, values)\n\n\ndef port_get(context, port_id):\n    \"\"\"Get a port or raise an exception if it does not exist.\"\"\"\n    return IMPL.port_get(context, port_id)\n\n\ndef port_delete_by_storage(context, storage_id):\n    \"\"\"Delete a port or raise an exception if it does not exist.\"\"\"\n    return IMPL.port_delete_by_storage(context, storage_id)\n\n\ndef port_get_all(context, marker=None, limit=None, sort_keys=None,\n                 sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all ports.\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of controllers\n    \"\"\"\n    return IMPL.port_get_all(context, marker, limit, sort_keys,\n                             sort_dirs, filters, offset)\n\n\ndef disks_create(context, values):\n    \"\"\"Create multiple disks.\"\"\"\n    return IMPL.disks_create(context, values)\n\n\ndef disks_update(context, values):\n    \"\"\"Update multiple disks.\"\"\"\n    return IMPL.disks_update(context, values)\n\n\ndef disks_delete(context, values):\n    \"\"\"Delete multiple disks.\"\"\"\n    return IMPL.disks_delete(context, values)\n\n\ndef disk_create(context, values):\n    \"\"\"Create a disk from the values dictionary.\"\"\"\n    return IMPL.disk_create(context, values)\n\n\ndef disk_update(context, disk_id, values):\n    \"\"\"Update a disk withe the values dictionary.\"\"\"\n    return IMPL.disk_update(context, disk_id, values)\n\n\ndef disk_get(context, disk_id):\n    \"\"\"Get a disk or raise an exception if it does not exist.\"\"\"\n    return IMPL.disk_get(context, disk_id)\n\n\ndef disk_delete_by_storage(context, storage_id):\n    \"\"\"Delete a disk or raise an exception if it does not exist.\"\"\"\n    return IMPL.disk_delete_by_storage(context, storage_id)\n\n\ndef disk_get_all(context, marker=None, limit=None, sort_keys=None,\n                 sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all disks.\n\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of disks\n    \"\"\"\n    return IMPL.disk_get_all(context, marker, limit, sort_keys, sort_dirs,\n                             filters, offset)\n\n\ndef filesystems_create(context, values):\n    \"\"\"Create multiple filesystems.\"\"\"\n    return IMPL.filesystems_create(context, values)\n\n\ndef filesystems_update(context, values):\n    \"\"\"Update multiple filesystems.\"\"\"\n    return IMPL.filesystems_update(context, values)\n\n\ndef filesystems_delete(context, values):\n    \"\"\"Delete multiple filesystems.\"\"\"\n    return IMPL.filesystems_delete(context, values)\n\n\ndef filesystem_create(context, values):\n    \"\"\"Create a filesystem from the values dictionary.\"\"\"\n    return IMPL.filesystem_create(context, values)\n\n\ndef filesystem_update(context, filesystem_id, values):\n    \"\"\"Update a filesystem with the values dictionary.\"\"\"\n    return IMPL.filesystem_update(context, filesystem_id, values)\n\n\ndef filesystem_get(context, filesystem_id):\n    \"\"\"Get a filesystem or raise an exception if it does not exist.\"\"\"\n    return IMPL.filesystem_get(context, filesystem_id)\n\n\ndef filesystem_delete_by_storage(context, storage_id):\n    \"\"\"Delete a filesystem or raise an exception if it does not exist.\"\"\"\n    return IMPL.filesystem_delete_by_storage(context, storage_id)\n\n\ndef filesystem_get_all(context, marker=None, limit=None, sort_keys=None,\n                       sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all filesystems.\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of controllers\n    \"\"\"\n    return IMPL.filesystem_get_all(context, marker, limit, sort_keys,\n                                   sort_dirs, filters, offset)\n\n\ndef quotas_create(context, values):\n    \"\"\"Create multiple quotas.\"\"\"\n    return IMPL.quotas_create(context, values)\n\n\ndef quotas_update(context, values):\n    \"\"\"Update multiple quotas.\"\"\"\n    return IMPL.quotas_update(context, values)\n\n\ndef quotas_delete(context, values):\n    \"\"\"Delete multiple quotas.\"\"\"\n    return IMPL.quotas_delete(context, values)\n\n\ndef quota_create(context, values):\n    \"\"\"Create a quota from the values dictionary.\"\"\"\n    return IMPL.quota_create(context, values)\n\n\ndef quota_update(context, quota_id, values):\n    \"\"\"Update a quota with the values dictionary.\"\"\"\n    return IMPL.quota_update(context, quota_id, values)\n\n\ndef quota_get(context, quota_id):\n    \"\"\"Get a quota or raise an exception if it does not exist.\"\"\"\n    return IMPL.quota_get(context, quota_id)\n\n\ndef quota_delete_by_storage(context, storage_id):\n    \"\"\"Delete a quota or raise an exception if it does not exist.\"\"\"\n    return IMPL.quota_delete_by_storage(context, storage_id)\n\n\ndef quota_get_all(context, marker=None, limit=None, sort_keys=None,\n                  sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all quotas.\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of controllers\n    \"\"\"\n    return IMPL.quota_get_all(context, marker, limit, sort_keys,\n                              sort_dirs, filters, offset)\n\n\ndef qtrees_create(context, values):\n    \"\"\"Create multiple qtrees.\"\"\"\n    return IMPL.qtrees_create(context, values)\n\n\ndef qtrees_update(context, values):\n    \"\"\"Update multiple qtrees.\"\"\"\n    return IMPL.qtrees_update(context, values)\n\n\ndef qtrees_delete(context, values):\n    \"\"\"Delete multiple qtrees.\"\"\"\n    return IMPL.qtrees_delete(context, values)\n\n\ndef qtree_create(context, values):\n    \"\"\"Create a qtree from the values dictionary.\"\"\"\n    return IMPL.qtree_create(context, values)\n\n\ndef qtree_update(context, qtree_id, values):\n    \"\"\"Update a qtree with the values dictionary.\"\"\"\n    return IMPL.qtree_update(context, qtree_id, values)\n\n\ndef qtree_get(context, qtree_id):\n    \"\"\"Get a qtree or raise an exception if it does not exist.\"\"\"\n    return IMPL.qtree_get(context, qtree_id)\n\n\ndef qtree_delete_by_storage(context, storage_id):\n    \"\"\"Delete a qtree or raise an exception if it does not exist.\"\"\"\n    return IMPL.qtree_delete_by_storage(context, storage_id)\n\n\ndef qtree_get_all(context, marker=None, limit=None, sort_keys=None,\n                  sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all qtrees.\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of controllers\n    \"\"\"\n    return IMPL.qtree_get_all(context, marker, limit, sort_keys,\n                              sort_dirs, filters, offset)\n\n\ndef shares_create(context, values):\n    \"\"\"Create multiple shares.\"\"\"\n    return IMPL.shares_create(context, values)\n\n\ndef shares_update(context, values):\n    \"\"\"Update multiple shares.\"\"\"\n    return IMPL.shares_update(context, values)\n\n\ndef shares_delete(context, values):\n    \"\"\"Delete multiple shares.\"\"\"\n    return IMPL.shares_delete(context, values)\n\n\ndef share_create(context, values):\n    \"\"\"Create a share from the values dictionary.\"\"\"\n    return IMPL.share_create(context, values)\n\n\ndef share_update(context, share_id, values):\n    \"\"\"Update a share with the values dictionary.\"\"\"\n    return IMPL.share_update(context, share_id, values)\n\n\ndef share_get(context, share_id):\n    \"\"\"Get a share or raise an exception if it does not exist.\"\"\"\n    return IMPL.share_get(context, share_id)\n\n\ndef share_delete_by_storage(context, storage_id):\n    \"\"\"Delete a share or raise an exception if it does not exist.\"\"\"\n    return IMPL.share_delete_by_storage(context, storage_id)\n\n\ndef share_get_all(context, marker=None, limit=None, sort_keys=None,\n                  sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all shares.\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of controllers\n    \"\"\"\n    return IMPL.share_get_all(context, marker, limit, sort_keys,\n                              sort_dirs, filters, offset)\n\n\ndef access_info_create(context, values):\n    \"\"\"Create a storage access information that used to connect\n    to a specific storage device.\n    \"\"\"\n    return IMPL.access_info_create(context, values)\n\n\ndef access_info_update(context, storage_id, values):\n    \"\"\"Update a storage access information with the values dictionary.\"\"\"\n    return IMPL.access_info_update(context, storage_id, values)\n\n\ndef access_info_get(context, storage_id):\n    \"\"\"Get a storage access information.\"\"\"\n    return IMPL.access_info_get(context, storage_id)\n\n\ndef access_info_delete(context, storage_id):\n    \"\"\"Delete a storage access information.\"\"\"\n    return IMPL.access_info_delete(context, storage_id)\n\n\ndef access_info_get_all(context, marker=None, limit=None, sort_keys=None,\n                        sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all storage access information.\n\n    If no sort parameters are specified then the returned volumes are sorted\n    first by the 'created_at' key and then by the 'id' key in descending\n    order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of storage accesses\n    \"\"\"\n    return IMPL.access_info_get_all(context, marker, limit,\n                                    sort_keys, sort_dirs, filters, offset)\n\n\ndef is_orm_value(obj):\n    \"\"\"Check if object is an ORM field.\"\"\"\n    return IMPL.is_orm_value(obj)\n\n\ndef alert_source_create(context, values):\n    \"\"\"Create an alert source.\"\"\"\n    return IMPL.alert_source_create(context, values)\n\n\ndef alert_source_update(context, storage_id, values):\n    \"\"\"Update an alert source.\"\"\"\n    return IMPL.alert_source_update(context, storage_id, values)\n\n\ndef alert_source_get(context, storage_id):\n    \"\"\"Get an alert source.\"\"\"\n    return IMPL.alert_source_get(context, storage_id)\n\n\ndef alert_source_delete(context, storage_id):\n    \"\"\"Delete an alert source.\"\"\"\n    return IMPL.alert_source_delete(context, storage_id)\n\n\ndef alert_source_get_all(context, marker=None, limit=None, sort_keys=None,\n                         sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all alert sources.\n\n    If no sort parameters are specified then the returned alert sources are\n    sorted first by the 'created_at' key in descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of storage accesses\n    \"\"\"\n    return IMPL.alert_source_get_all(context, marker, limit, sort_keys,\n                                     sort_dirs, filters, offset)\n\n\ndef task_create(context, values):\n    \"\"\"Create a task entry from the values dictionary.\"\"\"\n    return IMPL.task_create(context, values)\n\n\ndef task_update(context, task_id, values):\n    \"\"\"Update a task entry with the values dictionary.\"\"\"\n    return IMPL.task_update(context, task_id, values)\n\n\ndef task_get(context, task_id):\n    \"\"\"Get a task or raise an exception if it does not exist.\"\"\"\n    return IMPL.task_get(context, task_id)\n\n\ndef task_get_all(context, marker=None, limit=None, sort_keys=None,\n                 sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all  tasks.\n    If no sort parameters are specified then the returned tasks are\n    sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of  tasks\n    \"\"\"\n    return IMPL.task_get_all(context, marker, limit,\n                             sort_keys, sort_dirs, filters, offset)\n\n\ndef task_delete_by_storage(context, storage_id):\n    \"\"\"Delete all tasks of given storage or raise an exception if it\n    does not exist.\n    \"\"\"\n    return IMPL.task_delete_by_storage(context, storage_id)\n\n\ndef task_delete(context, task_id):\n    \"\"\"Delete a given task or raise an exception if it does not\n    exist.\n    \"\"\"\n    return IMPL.task_delete(context, task_id)\n\n\ndef failed_task_create(context, values):\n    \"\"\"Create a failed task entry from the values dictionary.\"\"\"\n    return IMPL.failed_task_create(context, values)\n\n\ndef failed_task_update(context, failed_task_id, values):\n    \"\"\"Update a failed task with the values dictionary.\"\"\"\n    return IMPL.failed_task_update(context, failed_task_id, values)\n\n\ndef failed_task_get(context, failed_task_id):\n    \"\"\"Get a failed task or raise an exception if it does not exist.\"\"\"\n    return IMPL.failed_task_get(context, failed_task_id)\n\n\ndef failed_task_get_all(context, marker=None, limit=None, sort_keys=None,\n                        sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all  failed tasks.\n    If no sort parameters are specified then the returned failed tasks are\n    sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of failed tasks\n    \"\"\"\n    return IMPL.failed_task_get_all(context, marker, limit,\n                                    sort_keys, sort_dirs, filters, offset)\n\n\ndef failed_task_delete_by_task_id(context, task_id):\n    \"\"\"Delete all failed tasks of given task id or raise an exception\n    if it does not exist.\n    \"\"\"\n    return IMPL.failed_task_delete_by_task_id(context, task_id)\n\n\ndef failed_task_delete(context, failed_task_id):\n    \"\"\"Delete a given failed task or raise an exception if it does not\n    exist.\n    \"\"\"\n    return IMPL.failed_task_delete(context, failed_task_id)\n\n\ndef failed_task_delete_by_storage(context, storage_id):\n    \"\"\"Delete all failed tasks of given storage or raise an exception if it\n    does not exist.\n    \"\"\"\n    return IMPL.failed_task_delete_by_storage(context, storage_id)\n\n\ndef storage_host_initiators_create(context, values):\n    \"\"\"Create a storage host initiator entry from the values dictionary.\"\"\"\n    return IMPL.storage_host_initiators_create(context, values)\n\n\ndef storage_host_initiators_update(context, values):\n    \"\"\"Update a storage host initiator with the values dictionary.\"\"\"\n    return IMPL.storage_host_initiators_update(context, values)\n\n\ndef storage_host_initiators_delete(context, values):\n    \"\"\"Delete multiple storage initiators.\"\"\"\n    return IMPL.storage_host_initiators_delete(context, values)\n\n\ndef storage_host_initiators_get(context, storage_host_initiator_id):\n    \"\"\"Get a storage host initiator or raise an exception if it does not\n    exist.\n    \"\"\"\n    return IMPL.storage_host_initiators_get(context, storage_host_initiator_id)\n\n\ndef storage_host_initiators_get_all(context, marker=None, limit=None,\n                                    sort_keys=None, sort_dirs=None,\n                                    filters=None, offset=None):\n    \"\"\"Retrieves all storage initiators.\n\n    If no sort parameters are specified then the returned storage initiators\n    are sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of storage initiators\n    \"\"\"\n    return IMPL.storage_host_initiators_get_all(context, marker, limit,\n                                                sort_keys, sort_dirs,\n                                                filters, offset)\n\n\ndef storage_host_initiators_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage initiators of a device.\"\"\"\n    return IMPL.storage_host_initiators_delete_by_storage(context, storage_id)\n\n\ndef storage_hosts_create(context, values):\n    \"\"\"Create a storage host entry from the values dictionary.\"\"\"\n    return IMPL.storage_hosts_create(context, values)\n\n\ndef storage_hosts_update(context, values):\n    \"\"\"Update a storage host with the values dictionary.\"\"\"\n    return IMPL.storage_hosts_update(context, values)\n\n\ndef storage_hosts_delete(context, values):\n    \"\"\"Delete multiple storage hosts.\"\"\"\n    return IMPL.storage_hosts_delete(context, values)\n\n\ndef storage_hosts_get(context, storage_host_id):\n    \"\"\"Get a storage host or raise an exception if it does not exist.\"\"\"\n    return IMPL.storage_hosts_get(context, storage_host_id)\n\n\ndef storage_hosts_get_all(context, marker=None, limit=None, sort_keys=None,\n                          sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all storage hosts.\n\n    If no sort parameters are specified then the returned storage hosts are\n    sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of storage hosts\n    \"\"\"\n    return IMPL.storage_hosts_get_all(context, marker, limit, sort_keys,\n                                      sort_dirs, filters, offset)\n\n\ndef storage_hosts_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage hosts of a device.\"\"\"\n    return IMPL.storage_hosts_delete_by_storage(context, storage_id)\n\n\ndef storage_host_groups_create(context, values):\n    \"\"\"Create a storage host grp entry from the values dictionary.\"\"\"\n    return IMPL.storage_host_groups_create(context, values)\n\n\ndef storage_host_groups_update(context, values):\n    \"\"\"Update a storage host grp with the values dictionary.\"\"\"\n    return IMPL.storage_host_groups_update(context, values)\n\n\ndef storage_host_groups_delete(context, values):\n    \"\"\"Delete multiple storage host groups.\"\"\"\n    return IMPL.storage_host_groups_delete(context, values)\n\n\ndef storage_host_groups_get(context, storage_host_grp_id):\n    \"\"\"Get a storage host group or raise an exception if it does not exist.\"\"\"\n    return IMPL.storage_host_groups_get(context, storage_host_grp_id)\n\n\ndef storage_host_groups_get_all(context, marker=None, limit=None,\n                                sort_keys=None,\n                                sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all storage host groups.\n\n    If no sort parameters are specified then the returned storage host groups\n    are sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of storage host groups\n    \"\"\"\n    return IMPL.storage_host_groups_get_all(context, marker, limit, sort_keys,\n                                            sort_dirs, filters, offset)\n\n\ndef storage_host_groups_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage host groups of a device.\"\"\"\n    return IMPL.storage_host_groups_delete_by_storage(context, storage_id)\n\n\ndef port_groups_create(context, values):\n    \"\"\"Create a port group entry from the values dictionary.\"\"\"\n    return IMPL.port_groups_create(context, values)\n\n\ndef port_groups_update(context, values):\n    \"\"\"Update a port group with the values dictionary.\"\"\"\n    return IMPL.port_groups_update(context, values)\n\n\ndef port_groups_delete(context, values):\n    \"\"\"Delete multiple port groups.\"\"\"\n    return IMPL.port_groups_delete(context, values)\n\n\ndef port_groups_get(context, port_grp_id):\n    \"\"\"Get a port group or raise an exception if it does not exist.\"\"\"\n    return IMPL.port_groups_get(context, port_grp_id)\n\n\ndef port_groups_get_all(context, marker=None, limit=None,\n                        sort_keys=None,\n                        sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all port groups.\n\n    If no sort parameters are specified then the returned port groups are\n    sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of port groups\n    \"\"\"\n    return IMPL.port_groups_get_all(context, marker, limit, sort_keys,\n                                    sort_dirs, filters, offset)\n\n\ndef port_groups_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the port groups of a device.\"\"\"\n    return IMPL.port_groups_delete_by_storage(context, storage_id)\n\n\ndef volume_groups_create(context, values):\n    \"\"\"Create a volume group entry from the values dictionary.\"\"\"\n    return IMPL.volume_groups_create(context, values)\n\n\ndef volume_groups_update(context, values):\n    \"\"\"Update a volume group with the values dictionary.\"\"\"\n    return IMPL.volume_groups_update(context, values)\n\n\ndef volume_groups_delete(context, values):\n    \"\"\"Delete multiple volume groups.\"\"\"\n    return IMPL.volume_groups_delete(context, values)\n\n\ndef volume_groups_get(context, volume_grp_id):\n    \"\"\"Get a volume group or raise an exception if it does not exist.\"\"\"\n    return IMPL.volume_groups_get(context, volume_grp_id)\n\n\ndef volume_groups_get_all(context, marker=None, limit=None,\n                          sort_keys=None,\n                          sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all volume groups.\n\n    If no sort parameters are specified then the returned volume groups are\n    sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of volume groups\n    \"\"\"\n    return IMPL.volume_groups_get_all(context, marker, limit, sort_keys,\n                                      sort_dirs, filters, offset)\n\n\ndef volume_groups_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the volume groups of a device.\"\"\"\n    return IMPL.volume_groups_delete_by_storage(context, storage_id)\n\n\ndef masking_views_create(context, values):\n    \"\"\"Create a masking view entry from the values dictionary.\"\"\"\n    return IMPL.masking_views_create(context, values)\n\n\ndef masking_views_update(context, values):\n    \"\"\"Update a masking view with the values dictionary.\"\"\"\n    return IMPL.masking_views_update(context, values)\n\n\ndef masking_views_delete(context, values):\n    \"\"\"Delete multiple masking views.\"\"\"\n    return IMPL.masking_views_delete(context, values)\n\n\ndef masking_views_get(context, masking_view_id):\n    \"\"\"Get a masking view or raise an exception if it does not exist.\"\"\"\n    return IMPL.masking_views_get(context, masking_view_id)\n\n\ndef masking_views_get_all(context, marker=None, limit=None,\n                          sort_keys=None,\n                          sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all masking views.\n\n    If no sort parameters are specified then the returned masking views are\n    sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of masking views\n    \"\"\"\n    return IMPL.masking_views_get_all(context, marker, limit, sort_keys,\n                                      sort_dirs, filters, offset)\n\n\ndef masking_views_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the masking views of a device.\"\"\"\n    return IMPL.masking_views_delete_by_storage(context, storage_id)\n\n\ndef storage_host_grp_host_rels_create(context, values):\n    \"\"\"Create a storage host grp host relation entry from the values\n    dictionary.\n    \"\"\"\n    return IMPL.storage_host_grp_host_rels_create(context, values)\n\n\ndef storage_host_grp_host_rels_update(context, values):\n    \"\"\"Update a storage host grp host relation with the values dictionary.\"\"\"\n    return IMPL.storage_host_grp_host_rels_update(context, values)\n\n\ndef storage_host_grp_host_rels_delete(context, values):\n    \"\"\"Delete multiple storage host grp host relations.\"\"\"\n    return IMPL.storage_host_grp_host_rels_delete(context, values)\n\n\ndef storage_host_grp_host_rels_get(context, host_grp_host_relation_id):\n    \"\"\"Get a storage host grp host relation or raise an exception if it does\n    not exist.\n    \"\"\"\n    return IMPL.storage_host_grp_host_rels_get(context,\n                                               host_grp_host_relation_id)\n\n\ndef storage_host_grp_host_rels_get_all(context, marker=None, limit=None,\n                                       sort_keys=None,\n                                       sort_dirs=None, filters=None,\n                                       offset=None):\n    \"\"\"Retrieves all storage host grp host relation.\n\n    If no sort parameters are specified then the returned\n    storage host grp host relations are\n    sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of storage host grp host relations\n    \"\"\"\n    return IMPL.storage_host_grp_host_rels_get_all(context, marker, limit,\n                                                   sort_keys,\n                                                   sort_dirs, filters,\n                                                   offset)\n\n\ndef storage_host_grp_host_rels_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage host grp host relations of a device.\"\"\"\n    return IMPL.storage_host_grp_host_rels_delete_by_storage(context,\n                                                             storage_id)\n\n\ndef port_grp_port_rels_create(context, values):\n    \"\"\"Create a port grp port relation entry from the values\n    dictionary.\n    \"\"\"\n    return IMPL.port_grp_port_rels_create(context, values)\n\n\ndef port_grp_port_rels_update(context, values):\n    \"\"\"Update a port grp port relation with the values dictionary.\"\"\"\n    return IMPL.port_grp_port_rels_update(context, values)\n\n\ndef port_grp_port_rels_delete(context, values):\n    \"\"\"Delete multiple port grp port relations.\"\"\"\n    return IMPL.port_grp_port_rels_delete(context, values)\n\n\ndef port_grp_port_rels_get(context, port_grp_port_relation_id):\n    \"\"\"Get a port grp port relation or raise an exception if it does\n    not exist.\n    \"\"\"\n    return IMPL.port_grp_port_rels_get(context,\n                                       port_grp_port_relation_id)\n\n\ndef port_grp_port_rels_get_all(context, marker=None, limit=None,\n                               sort_keys=None,\n                               sort_dirs=None, filters=None,\n                               offset=None):\n    \"\"\"Retrieves all port grp port relation.\n\n    If no sort parameters are specified then the returned\n    port grp port relations are\n    sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of port grp port relations\n    \"\"\"\n    return IMPL.port_grp_port_rels_get_all(context, marker, limit,\n                                           sort_keys,\n                                           sort_dirs, filters,\n                                           offset)\n\n\ndef port_grp_port_rels_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the port grp port relations of a device.\"\"\"\n    return IMPL.port_grp_port_rels_delete_by_storage(context,\n                                                     storage_id)\n\n\ndef vol_grp_vol_rels_create(context, values):\n    \"\"\"Create a volume grp volume relation entry from the values\n    dictionary.\n    \"\"\"\n    return IMPL.vol_grp_vol_rels_create(context, values)\n\n\ndef vol_grp_vol_rels_update(context, values):\n    \"\"\"Update a volume grp volume relation with the values dictionary.\"\"\"\n    return IMPL.vol_grp_vol_rels_update(context, values)\n\n\ndef vol_grp_vol_rels_delete(context, values):\n    \"\"\"Delete multiple volume grp volume relations.\"\"\"\n    return IMPL.vol_grp_vol_rels_delete(context, values)\n\n\ndef vol_grp_vol_rels_get(context, volume_grp_volume_relation_id):\n    \"\"\"Get a volume grp volume relation or raise an exception if it does\n    not exist.\n    \"\"\"\n    return IMPL.vol_grp_vol_rels_get(context,\n                                     volume_grp_volume_relation_id)\n\n\ndef vol_grp_vol_rels_get_all(context, marker=None, limit=None,\n                             sort_keys=None,\n                             sort_dirs=None, filters=None,\n                             offset=None):\n    \"\"\"Retrieves all volume grp volume relation.\n\n    If no sort parameters are specified then the returned\n    volume grp volume relations are\n    sorted first by the 'created_at' key and then by the 'id' key in\n    descending order.\n\n    :param context: context of this request, it's helpful to trace the request\n    :param marker: the last item of the previous page, used to determine the\n                   next page of results to return\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys, for example\n                      'desc' for descending order\n    :param filters: dictionary of filters\n    :param offset: number of items to skip\n    :returns: list of volume grp volume relations\n    \"\"\"\n    return IMPL.vol_grp_vol_rels_get_all(context, marker, limit,\n                                         sort_keys,\n                                         sort_dirs, filters,\n                                         offset)\n\n\ndef vol_grp_vol_rels_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the volume grp volume relations of a device.\"\"\"\n    return IMPL.vol_grp_vol_rels_delete_by_storage(context,\n                                                   storage_id)\n"
  },
  {
    "path": "delfin/db/base.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Base class for classes that need modular database access.\"\"\"\n\nfrom oslo_config import cfg\nfrom oslo_utils import importutils\n\ndb_driver_opt = cfg.StrOpt('db_driver',\n                           default='delfin.db',\n                           help='Driver to use for database access.')\n\nCONF = cfg.CONF\nCONF.register_opt(db_driver_opt)\n\n\nclass Base(object):\n    \"\"\"DB driver is injected in the init method.\"\"\"\n\n    def __init__(self, db_driver=None):\n        super(Base, self).__init__()\n        if not db_driver:\n            db_driver = CONF.db_driver\n        self.db = importutils.import_module(db_driver)  # pylint: disable=C0103\n"
  },
  {
    "path": "delfin/db/sqlalchemy/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/db/sqlalchemy/api.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright (c) 2014 Mirantis, Inc.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Implementation of SQLAlchemy backend.\"\"\"\n\nimport sys\n\nimport six\nimport sqlalchemy\nfrom oslo_config import cfg\nfrom oslo_db import options as db_options\nfrom oslo_db.sqlalchemy import session\nfrom oslo_db.sqlalchemy import utils as db_utils\nfrom oslo_log import log\nfrom oslo_utils import uuidutils, timeutils\nfrom sqlalchemy import create_engine\n\nfrom delfin import exception\nfrom delfin.common import sqlalchemyutils\nfrom delfin.db.sqlalchemy import models\nfrom delfin.db.sqlalchemy.models import Storage, AccessInfo\nfrom delfin.i18n import _\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\n_FACADE = None\n\n_DEFAULT_SQL_CONNECTION = 'sqlite:///'\ndb_options.set_defaults(cfg.CONF,\n                        connection=_DEFAULT_SQL_CONNECTION)\n\n\ndef apply_sorting(model, query, sort_key, sort_dir):\n    if sort_dir.lower() not in ('desc', 'asc'):\n        msg = ((\"Wrong sorting data provided: sort key is '%(sort_key)s' \"\n                \"and sort order is '%(sort_dir)s'.\") %\n               {\"sort_key\": sort_key, \"sort_dir\": sort_dir})\n        raise exception.InvalidInput(msg)\n\n    sort_attr = getattr(model, sort_key)\n    sort_method = getattr(sort_attr, sort_dir.lower())\n    return query.order_by(sort_method())\n\n\ndef get_engine():\n    facade = _create_facade_lazily()\n    return facade.get_engine()\n\n\ndef get_session(**kwargs):\n    facade = _create_facade_lazily()\n    return facade.get_session(**kwargs)\n\n\ndef _create_facade_lazily():\n    global _FACADE\n    if _FACADE is None:\n        _FACADE = session.EngineFacade.from_config(cfg.CONF)\n    return _FACADE\n\n\ndef get_backend():\n    \"\"\"The backend is this module itself.\"\"\"\n    return sys.modules[__name__]\n\n\ndef register_db():\n    \"\"\"Create database and tables.\"\"\"\n    models = (Storage,\n              AccessInfo\n              )\n    engine = create_engine(CONF.database.connection, echo=False)\n    for model in models:\n        model.metadata.create_all(engine)\n\n\ndef _process_model_like_filter(model, query, filters):\n    \"\"\"Applies regex expression filtering to a query.\n\n    :param model: model to apply filters to\n    :param query: query to apply filters to\n    :param filters: dictionary of filters with regex values\n    :returns: the updated query.\n    \"\"\"\n    if query is None:\n        return query\n\n    for key in sorted(filters):\n        column_attr = getattr(model, key)\n        if 'property' == type(column_attr).__name__:\n            continue\n        value = filters[key]\n        if not (isinstance(value, (six.string_types, int))):\n            continue\n        query = query.filter(\n            column_attr.op('LIKE')(u'%%%s%%' % value))\n    return query\n\n\ndef apply_like_filters(model):\n    def decorator_filters(process_exact_filters):\n        def _decorator(query, filters):\n            exact_filters = filters.copy()\n            regex_filters = {}\n            for key, value in filters.items():\n                # NOTE(tommylikehu): For inexact match, the filter keys\n                # are in the format of 'key~=value'\n                if key.endswith('~'):\n                    exact_filters.pop(key)\n                    regex_filters[key.rstrip('~')] = value\n            query = process_exact_filters(query, exact_filters)\n            return _process_model_like_filter(model, query, regex_filters)\n\n        return _decorator\n\n    return decorator_filters\n\n\ndef is_valid_model_filters(model, filters, exclude_list=None):\n    \"\"\"Return True if filter values exist on the model\n\n    :param model: a Delfin model\n    :param filters: dictionary of filters\n    \"\"\"\n    for key in filters.keys():\n        if exclude_list and key in exclude_list:\n            continue\n        if key == 'metadata':\n            if not isinstance(filters[key], dict):\n                LOG.debug(\"Metadata filter value is not valid dictionary\")\n                return False\n            continue\n        try:\n            key = key.rstrip('~')\n            getattr(model, key)\n        except AttributeError:\n            LOG.debug(\"'%s' filter key is not valid.\", key)\n            return False\n    return True\n\n\ndef access_info_create(context, values):\n    \"\"\"Create a storage access information.\"\"\"\n    if not values.get('storage_id'):\n        values['storage_id'] = uuidutils.generate_uuid()\n\n    access_info_ref = models.AccessInfo()\n    access_info_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(access_info_ref)\n\n    return _access_info_get(context,\n                            access_info_ref['storage_id'],\n                            session=session)\n\n\ndef access_info_update(context, storage_id, values):\n    \"\"\"Update a storage access information with the values dictionary.\"\"\"\n    session = get_session()\n    with session.begin():\n        _access_info_get(context, storage_id, session).update(values)\n        return _access_info_get(context, storage_id, session)\n\n\ndef access_info_delete(context, storage_id):\n    \"\"\"Delete a storage access information.\"\"\"\n    _access_info_get_query(context). \\\n        filter_by(storage_id=storage_id).delete()\n\n\ndef access_info_get(context, storage_id):\n    \"\"\"Get a storage access information.\"\"\"\n    return _access_info_get(context, storage_id)\n\n\ndef _access_info_get(context, storage_id, session=None):\n    result = (_access_info_get_query(context, session=session)\n              .filter_by(storage_id=storage_id)\n              .first())\n\n    if not result:\n        raise exception.AccessInfoNotFound(storage_id)\n\n    return result\n\n\ndef _access_info_get_query(context, session=None):\n    return model_query(context, models.AccessInfo, session=session)\n\n\ndef access_info_get_all(context, marker=None, limit=None, sort_keys=None,\n                        sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all storage access information.\"\"\"\n    session = get_session()\n    with session.begin():\n        query = _generate_paginate_query(context, session, models.AccessInfo,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.AccessInfo)\ndef _process_access_info_filters(query, filters):\n    \"\"\"Common filter processing for AccessInfo queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.AccessInfo, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef storage_create(context, values):\n    \"\"\"Add a storage device from the values dictionary.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    storage_ref = models.Storage()\n    storage_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(storage_ref)\n\n    return _storage_get(context,\n                        storage_ref['id'],\n                        session=session)\n\n\ndef storage_update(context, storage_id, values):\n    \"\"\"Update a storage device with the values dictionary.\"\"\"\n    session = get_session()\n    with session.begin():\n        query = _storage_get_query(context, session)\n        result = query.filter_by(id=storage_id).update(values)\n    return result\n\n\ndef storage_get(context, storage_id):\n    \"\"\"Retrieve a storage device.\"\"\"\n    return _storage_get(context, storage_id)\n\n\ndef _storage_get(context, storage_id, session=None):\n    result = (_storage_get_query(context, session=session)\n              .filter_by(id=storage_id)\n              .first())\n\n    if not result:\n        raise exception.StorageNotFound(storage_id)\n\n    return result\n\n\ndef _storage_get_query(context, session=None):\n    read_deleted = context.read_deleted\n    kwargs = dict()\n\n    if read_deleted in ('no', 'n', False):\n        kwargs['deleted'] = False\n    elif read_deleted in ('yes', 'y', True):\n        kwargs['deleted'] = True\n\n    return model_query(context, models.Storage, session=session, **kwargs)\n\n\ndef storage_get_all(context, marker=None, limit=None, sort_keys=None,\n                    sort_dirs=None, filters=None, offset=None):\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Storage,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No storages   match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Storage)\ndef _process_storage_info_filters(query, filters):\n    \"\"\"Common filter processing for Storages queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Storage, filters):\n            return\n        query = query.filter_by(**filters)\n    return query\n\n\ndef storage_delete(context, storage_id):\n    \"\"\"Delete a storage device.\"\"\"\n    delete_info = {'deleted': True, 'deleted_at': timeutils.utcnow()}\n    _storage_get_query(context).filter_by(id=storage_id).update(delete_info)\n\n\ndef _volume_get_query(context, session=None):\n    return model_query(context, models.Volume, session=session)\n\n\ndef _volume_get(context, volume_id, session=None):\n    result = (_volume_get_query(context, session=session)\n              .filter_by(id=volume_id)\n              .first())\n\n    if not result:\n        raise exception.VolumeNotFound(volume_id)\n\n    return result\n\n\ndef volume_create(context, values):\n    \"\"\"Create a volume.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    vol_ref = models.Volume()\n    vol_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(vol_ref)\n\n    return _volume_get(context,\n                       vol_ref['id'],\n                       session=session)\n\n\ndef volumes_create(context, volumes):\n    \"\"\"Create multiple volumes.\"\"\"\n    session = get_session()\n    vol_refs = []\n    with session.begin():\n\n        for vol in volumes:\n            LOG.debug('adding new volume for native_volume_id {0}:'\n                      .format(vol.get('native_volume_id')))\n            if not vol.get('id'):\n                vol['id'] = uuidutils.generate_uuid()\n\n            vol_ref = models.Volume()\n            vol_ref.update(vol)\n            vol_refs.append(vol_ref)\n\n        session.add_all(vol_refs)\n\n    return vol_refs\n\n\ndef volumes_delete(context, volumes_id_list):\n    \"\"\"Delete multiple volumes.\"\"\"\n    session = get_session()\n    with session.begin():\n        for vol_id in volumes_id_list:\n            LOG.debug('deleting volume {0}:'.format(vol_id))\n            query = _volume_get_query(context, session)\n            result = query.filter_by(id=vol_id).delete()\n\n            if not result:\n                LOG.error(exception.VolumeNotFound(vol_id))\n    return\n\n\ndef volume_update(context, vol_id, values):\n    \"\"\"Update a volume.\"\"\"\n    session = get_session()\n    with session.begin():\n        _volume_get(context, vol_id, session).update(values)\n    return _volume_get(context, vol_id, session)\n\n\ndef volumes_update(context, volumes):\n    \"\"\"Update multiple volumes.\"\"\"\n    session = get_session()\n    with session.begin():\n        for vol in volumes:\n            LOG.debug('updating volume {0}:'.format(vol.get('id')))\n            query = _volume_get_query(context, session)\n            result = query.filter_by(id=vol.get('id')\n                                     ).update(vol)\n\n            if not result:\n                LOG.error(exception.VolumeNotFound(vol.get('id')))\n\n\ndef volume_get(context, volume_id):\n    \"\"\"Get a volume or raise an exception if it does not exist.\"\"\"\n    return _volume_get(context, volume_id)\n\n\ndef volume_get_all(context, marker=None, limit=None, sort_keys=None,\n                   sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all storage volumes.\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Volume,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        # No volume would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Volume)\ndef _process_volume_info_filters(query, filters):\n    \"\"\"Common filter processing for volumes queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Volume, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef volume_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the volumes of a device\"\"\"\n    _volume_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\ndef _storage_pool_get_query(context, session=None):\n    return model_query(context, models.StoragePool, session=session)\n\n\ndef _storage_pool_get(context, storage_pool_id, session=None):\n    result = (_storage_pool_get_query(context, session=session)\n              .filter_by(id=storage_pool_id)\n              .first())\n\n    if not result:\n        raise exception.StoragePoolNotFound(storage_pool_id)\n\n    return result\n\n\ndef storage_pool_create(context, values):\n    \"\"\"Create a storage_pool from the values dictionary.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    storage_pool_ref = models.StoragePool()\n    storage_pool_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(storage_pool_ref)\n\n    return _storage_pool_get(context,\n                             storage_pool_ref['id'],\n                             session=session)\n\n\ndef storage_pools_create(context, storage_pools):\n    \"\"\"Create a storage_pool from the values dictionary.\"\"\"\n    session = get_session()\n    storage_pool_refs = []\n    with session.begin():\n\n        for storage_pool in storage_pools:\n            LOG.debug('adding new storage_pool for native_storage_pool_id {0}:'\n                      .format(storage_pool.get('native_storage_pool_id')))\n            if not storage_pool.get('id'):\n                storage_pool['id'] = uuidutils.generate_uuid()\n\n            storage_pool_ref = models.StoragePool()\n            storage_pool_ref.update(storage_pool)\n            storage_pool_refs.append(storage_pool_ref)\n\n        session.add_all(storage_pool_refs)\n\n    return storage_pool_refs\n\n\ndef storage_pools_delete(context, storage_pools_id_list):\n    \"\"\"Delete multiple storage_pools with the storage_pools dictionary.\"\"\"\n    session = get_session()\n    with session.begin():\n        for storage_pool_id in storage_pools_id_list:\n            LOG.debug('deleting storage_pool {0}:'.format(storage_pool_id))\n            query = _storage_pool_get_query(context, session)\n            result = query.filter_by(id=storage_pool_id).delete()\n\n            if not result:\n                LOG.error(exception.StoragePoolNotFound(storage_pool_id))\n\n    return\n\n\ndef storage_pool_update(context, storage_pool_id, values):\n    \"\"\"Update a storage_pool withe the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _storage_pool_get_query(context, session)\n        result = query.filter_by(id=storage_pool_id).update(values)\n\n        if not result:\n            raise exception.StoragePoolNotFound(storage_pool_id)\n\n    return result\n\n\ndef storage_pools_update(context, storage_pools):\n    \"\"\"Update multiple storage_pools withe the storage_pools dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        storage_pool_refs = []\n\n        for storage_pool in storage_pools:\n            LOG.debug('updating storage_pool {0}:'.format(\n                storage_pool.get('id')))\n            query = _storage_pool_get_query(context, session)\n            result = query.filter_by(id=storage_pool.get('id')\n                                     ).update(storage_pool)\n\n            if not result:\n                LOG.error(exception.StoragePoolNotFound(storage_pool.get(\n                    'id')))\n            else:\n                storage_pool_refs.append(result)\n\n    return storage_pool_refs\n\n\ndef storage_pool_get(context, storage_pool_id):\n    \"\"\"Get a storage_pool or raise an exception if it does not exist.\"\"\"\n    return _storage_pool_get(context, storage_pool_id)\n\n\ndef storage_pool_get_all(context, marker=None, limit=None, sort_keys=None,\n                         sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all storage storage_pools.\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.StoragePool,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No storage_pool would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\ndef storage_pool_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage_pools of a storage device\"\"\"\n    _storage_pool_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\n@apply_like_filters(model=models.StoragePool)\ndef _process_storage_pool_info_filters(query, filters):\n    \"\"\"Common filter processing for storage_pools queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.StoragePool, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef controllers_create(context, controllers):\n    \"\"\"Create multiple controllers.\"\"\"\n    session = get_session()\n    controllers_refs = []\n    with session.begin():\n\n        for controller in controllers:\n            LOG.debug('adding new controller for native_controller_id {0}:'\n                      .format(controller.get('native_controller_id')))\n            if not controller.get('id'):\n                controller['id'] = uuidutils.generate_uuid()\n\n            controller_ref = models.Controller()\n            controller_ref.update(controller)\n            controllers_refs.append(controller_ref)\n\n        session.add_all(controllers_refs)\n\n    return controllers_refs\n\n\ndef controllers_update(context, controllers):\n    \"\"\"Update multiple controllers.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        controller_refs = []\n\n        for controller in controllers:\n            LOG.debug('updating controller {0}:'.format(\n                controller.get('id')))\n            query = _controller_get_query(context, session)\n            result = query.filter_by(id=controller.get('id')\n                                     ).update(controller)\n\n            if not result:\n                LOG.error(exception.ControllerNotFound(controller.get(\n                    'id')))\n            else:\n                controller_refs.append(result)\n\n    return controller_refs\n\n\ndef controllers_delete(context, controllers_id_list):\n    \"\"\"Delete multiple controllers.\"\"\"\n    session = get_session()\n    with session.begin():\n        for controller_id in controllers_id_list:\n            LOG.debug('deleting controller {0}:'.format(controller_id))\n            query = _controller_get_query(context, session)\n            result = query.filter_by(id=controller_id).delete()\n\n            if not result:\n                LOG.error(exception.ControllerNotFound(controller_id))\n\n    return\n\n\ndef _controller_get_query(context, session=None):\n    return model_query(context, models.Controller, session=session)\n\n\ndef _controller_get(context, controller_id, session=None):\n    result = (_controller_get_query(context, session=session)\n              .filter_by(id=controller_id)\n              .first())\n\n    if not result:\n        raise exception.ControllerNotFound(controller_id)\n\n    return result\n\n\ndef controller_create(context, values):\n    \"\"\"Create a controller from the values dictionary.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    controller_ref = models.Controller()\n    controller_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(controller_ref)\n\n    return _controller_get(context,\n                           controller_ref['id'],\n                           session=session)\n\n\ndef controller_update(context, controller_id, values):\n    \"\"\"Update a controller with the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _controller_get_query(context, session)\n        result = query.filter_by(id=controller_id).update(values)\n\n        if not result:\n            raise exception.ControllerNotFound(controller_id)\n\n    return result\n\n\ndef controller_get(context, controller_id):\n    \"\"\"Get a controller or raise an exception if it does not exist.\"\"\"\n    return _controller_get(context, controller_id)\n\n\ndef controller_delete_by_storage(context, storage_id):\n    \"\"\"Delete a controller or raise an exception if it does not exist.\"\"\"\n    _controller_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\ndef controller_get_all(context, marker=None, limit=None, sort_keys=None,\n                       sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all controllers.\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Controller,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No Controller would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Controller)\ndef _process_controller_info_filters(query, filters):\n    \"\"\"Common filter processing for controllers queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Controller, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef ports_create(context, ports):\n    \"\"\"Create multiple ports.\"\"\"\n    session = get_session()\n    ports_refs = []\n    with session.begin():\n\n        for port in ports:\n            LOG.debug('adding new port for native_port_id {0}:'\n                      .format(port.get('native_port_id')))\n            if not port.get('id'):\n                port['id'] = uuidutils.generate_uuid()\n\n            port_ref = models.Port()\n            port_ref.update(port)\n            ports_refs.append(port_ref)\n\n        session.add_all(ports_refs)\n\n    return ports_refs\n\n\ndef ports_update(context, ports):\n    \"\"\"Update multiple ports.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        port_refs = []\n\n        for port in ports:\n            LOG.debug('updating port {0}:'.format(\n                port.get('id')))\n            query = _port_get_query(context, session)\n            result = query.filter_by(id=port.get('id')\n                                     ).update(port)\n\n            if not result:\n                LOG.error(exception.PortNotFound(port.get(\n                    'id')))\n            else:\n                port_refs.append(result)\n\n    return port_refs\n\n\ndef ports_delete(context, ports_id_list):\n    \"\"\"Delete multiple ports.\"\"\"\n    session = get_session()\n    with session.begin():\n        for port_id in ports_id_list:\n            LOG.debug('deleting port {0}:'.format(port_id))\n            query = _port_get_query(context, session)\n            result = query.filter_by(id=port_id).delete()\n\n            if not result:\n                LOG.error(exception.PortNotFound(port_id))\n    return\n\n\ndef _port_get_query(context, session=None):\n    return model_query(context, models.Port, session=session)\n\n\ndef _port_get(context, port_id, session=None):\n    result = (_port_get_query(context, session=session)\n              .filter_by(id=port_id)\n              .first())\n\n    if not result:\n        raise exception.PortNotFound(port_id)\n\n    return result\n\n\ndef port_create(context, values):\n    \"\"\"Create a port from the values dictionary.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    port_ref = models.Port()\n    port_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(port_ref)\n\n    return _port_get(context,\n                     port_ref['id'],\n                     session=session)\n\n\ndef port_update(context, port_id, values):\n    \"\"\"Update a port with the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _port_get_query(context, session)\n        result = query.filter_by(id=port_id).update(values)\n\n        if not result:\n            raise exception.PortNotFound(port_id)\n\n    return result\n\n\ndef port_get(context, port_id):\n    \"\"\"Get a port or raise an exception if it does not exist.\"\"\"\n    return _port_get(context, port_id)\n\n\ndef port_delete_by_storage(context, storage_id):\n    \"\"\"Delete port or raise an exception if it does not exist.\"\"\"\n    _port_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\ndef port_get_all(context, marker=None, limit=None, sort_keys=None,\n                 sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all ports.\"\"\"\n\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Port,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No Port would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Port)\ndef _process_port_info_filters(query, filters):\n    \"\"\"Common filter processing for ports queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Port, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef disks_create(context, disks):\n    \"\"\"Create multiple disks.\"\"\"\n    session = get_session()\n    disks_refs = []\n    with session.begin():\n\n        for disk in disks:\n            LOG.debug('adding new disk for native_disk_id {0}:'\n                      .format(disk.get('native_disk_id')))\n            if not disk.get('id'):\n                disk['id'] = uuidutils.generate_uuid()\n\n            disk_ref = models.Disk()\n            disk_ref.update(disk)\n            disks_refs.append(disk_ref)\n\n        session.add_all(disks_refs)\n\n    return disks_refs\n\n\ndef disks_update(context, disks):\n    \"\"\"Update multiple disks.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        disk_refs = []\n\n        for disk in disks:\n            LOG.debug('updating disk {0}:'.format(\n                disk.get('id')))\n            query = _disk_get_query(context, session)\n            result = query.filter_by(id=disk.get('id')\n                                     ).update(disk)\n\n            if not result:\n                LOG.error(exception.DiskNotFound(disk.get(\n                    'id')))\n            else:\n                disk_refs.append(result)\n\n    return disk_refs\n\n\ndef disks_delete(context, disks_id_list):\n    \"\"\"Delete multiple disks.\"\"\"\n    session = get_session()\n    with session.begin():\n        for disk_id in disks_id_list:\n            LOG.debug('deleting disk {0}:'.format(disk_id))\n            query = _disk_get_query(context, session)\n            result = query.filter_by(id=disk_id).delete()\n\n            if not result:\n                LOG.error(exception.DiskNotFound(disk_id))\n\n    return\n\n\ndef _disk_get_query(context, session=None):\n    return model_query(context, models.Disk, session=session)\n\n\ndef _disk_get(context, disk_id, session=None):\n    result = (_disk_get_query(context, session=session)\n              .filter_by(id=disk_id)\n              .first())\n\n    if not result:\n        raise exception.DiskNotFound(disk_id)\n\n    return result\n\n\ndef disk_create(context, values):\n    \"\"\"Create a disk from the values dictionary.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    disk_ref = models.Disk()\n    disk_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(disk_ref)\n\n    return _disk_get(context,\n                     disk_ref['id'],\n                     session=session)\n\n\ndef disk_update(context, disk_id, values):\n    \"\"\"Update a disk with the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _disk_get_query(context, session)\n        result = query.filter_by(id=disk_id).update(values)\n\n        if not result:\n            raise exception.DiskNotFound(disk_id)\n\n    return result\n\n\ndef disk_get(context, disk_id):\n    \"\"\"Get a disk or raise an exception if it does not exist.\"\"\"\n    return _disk_get(context, disk_id)\n\n\ndef disk_delete_by_storage(context, storage_id):\n    \"\"\"Delete disk or raise an exception if it does not exist.\"\"\"\n    _disk_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\ndef disk_get_all(context, marker=None, limit=None, sort_keys=None,\n                 sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all disks.\"\"\"\n\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Disk,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No Disk would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Disk)\ndef _process_disk_info_filters(query, filters):\n    \"\"\"Common filter processing for disks queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Disk, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef filesystems_create(context, filesystems):\n    \"\"\"Create multiple filesystems.\"\"\"\n    session = get_session()\n    filesystems_refs = []\n    with session.begin():\n\n        for filesystem in filesystems:\n            LOG.debug('adding new filesystem for native_filesystem_id {0}:'\n                      .format(filesystem.get('native_filesystem_id')))\n            if not filesystem.get('id'):\n                filesystem['id'] = uuidutils.generate_uuid()\n\n            filesystem_ref = models.Filesystem()\n            filesystem_ref.update(filesystem)\n            filesystems_refs.append(filesystem_ref)\n\n        session.add_all(filesystems_refs)\n\n    return filesystems_refs\n\n\ndef filesystems_update(context, filesystems):\n    \"\"\"Update multiple filesystems.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        filesystem_refs = []\n\n        for filesystem in filesystems:\n            LOG.debug('updating filesystem {0}:'.format(\n                filesystem.get('id')))\n            query = _filesystem_get_query(context, session)\n            result = query.filter_by(id=filesystem.get('id')\n                                     ).update(filesystem)\n\n            if not result:\n                LOG.error(exception.FilesystemNotFound(filesystem.get(\n                    'id')))\n            else:\n                filesystem_refs.append(result)\n\n    return filesystem_refs\n\n\ndef filesystems_delete(context, filesystems_id_list):\n    \"\"\"Delete multiple filesystems.\"\"\"\n    session = get_session()\n    with session.begin():\n        for filesystem_id in filesystems_id_list:\n            LOG.debug('deleting filesystem {0}:'.format(filesystem_id))\n            query = _filesystem_get_query(context, session)\n            result = query.filter_by(id=filesystem_id).delete()\n\n            if not result:\n                LOG.error(exception.FilesystemNotFound(filesystem_id))\n    return\n\n\ndef _filesystem_get_query(context, session=None):\n    return model_query(context, models.Filesystem, session=session)\n\n\ndef _filesystem_get(context, filesystem_id, session=None):\n    result = (_filesystem_get_query(context, session=session)\n              .filter_by(id=filesystem_id)\n              .first())\n\n    if not result:\n        raise exception.FilesystemNotFound(filesystem_id)\n\n    return result\n\n\ndef filesystem_create(context, values):\n    \"\"\"Create a filesystem from the values dictionary.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    filesystem_ref = models.Filesystem()\n    filesystem_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(filesystem_ref)\n\n    return _filesystem_get(context,\n                           filesystem_ref['id'],\n                           session=session)\n\n\ndef filesystem_update(context, filesystem_id, values):\n    \"\"\"Update a filesystem with the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _filesystem_get_query(context, session)\n        result = query.filter_by(id=filesystem_id).update(values)\n\n        if not result:\n            raise exception.FilesystemNotFound(filesystem_id)\n\n    return result\n\n\ndef filesystem_get(context, filesystem_id):\n    \"\"\"Get a filesystem or raise an exception if it does not exist.\"\"\"\n    return _filesystem_get(context, filesystem_id)\n\n\ndef filesystem_delete_by_storage(context, storage_id):\n    \"\"\"Delete filesystem or raise an exception if it does not exist.\"\"\"\n    _filesystem_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\ndef filesystem_get_all(context, marker=None, limit=None, sort_keys=None,\n                       sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all filesystems.\"\"\"\n\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Filesystem,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No Filesystem would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Filesystem)\ndef _process_filesystem_info_filters(query, filters):\n    \"\"\"Common filter processing for filesystems queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Filesystem, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef quotas_create(context, quotas):\n    \"\"\"Create multiple quotas.\"\"\"\n    session = get_session()\n    quotas_refs = []\n    with session.begin():\n\n        for quota in quotas:\n            LOG.debug('adding new quota for native_quota_id {0}:'\n                      .format(quota.get('native_quota_id')))\n            if not quota.get('id'):\n                quota['id'] = uuidutils.generate_uuid()\n\n            quota_ref = models.Quota()\n            quota_ref.update(quota)\n            quotas_refs.append(quota_ref)\n\n        session.add_all(quotas_refs)\n\n    return quotas_refs\n\n\ndef quotas_update(context, quotas):\n    \"\"\"Update multiple quotas.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        quota_refs = []\n\n        for quota in quotas:\n            LOG.debug('updating quota {0}:'.format(\n                quota.get('id')))\n            query = _quota_get_query(context, session)\n            result = query.filter_by(id=quota.get('id')\n                                     ).update(quota)\n\n            if not result:\n                LOG.error(exception.QuotaNotFound(quota.get(\n                    'id')))\n            else:\n                quota_refs.append(result)\n\n    return quota_refs\n\n\ndef quotas_delete(context, quotas_id_list):\n    \"\"\"Delete multiple quotas.\"\"\"\n    session = get_session()\n    with session.begin():\n        for quota_id in quotas_id_list:\n            LOG.debug('deleting quota {0}:'.format(quota_id))\n            query = _quota_get_query(context, session)\n            result = query.filter_by(id=quota_id).delete()\n\n            if not result:\n                LOG.error(exception.QuotaNotFound(quota_id))\n    return\n\n\ndef _quota_get_query(context, session=None):\n    return model_query(context, models.Quota, session=session)\n\n\ndef _quota_get(context, quota_id, session=None):\n    result = (_quota_get_query(context, session=session)\n              .filter_by(id=quota_id)\n              .first())\n\n    if not result:\n        raise exception.QuotaNotFound(quota_id)\n\n    return result\n\n\ndef quota_create(context, values):\n    \"\"\"Create a quota from the values dictionary.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    quota_ref = models.Quota()\n    quota_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(quota_ref)\n\n    return _quota_get(context,\n                      quota_ref['id'],\n                      session=session)\n\n\ndef quota_update(context, quota_id, values):\n    \"\"\"Update a quota with the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _quota_get_query(context, session)\n        result = query.filter_by(id=quota_id).update(values)\n\n        if not result:\n            raise exception.QuotaNotFound(quota_id)\n\n    return result\n\n\ndef quota_get(context, quota_id):\n    \"\"\"Get a quota or raise an exception if it does not exist.\"\"\"\n    return _quota_get(context, quota_id)\n\n\ndef quota_delete_by_storage(context, storage_id):\n    \"\"\"Delete quota or raise an exception if it does not exist.\"\"\"\n    _quota_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\ndef quota_get_all(context, marker=None, limit=None, sort_keys=None,\n                  sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all quotas.\"\"\"\n\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Quota,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No Quota would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Quota)\ndef _process_quota_info_filters(query, filters):\n    \"\"\"Common filter processing for quotas queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Quota, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef qtrees_create(context, qtrees):\n    \"\"\"Create multiple qtrees.\"\"\"\n    session = get_session()\n    qtrees_refs = []\n    with session.begin():\n\n        for qtree in qtrees:\n            LOG.debug('adding new qtree for native_qtree_id {0}:'\n                      .format(qtree.get('native_qtree_id')))\n            if not qtree.get('id'):\n                qtree['id'] = uuidutils.generate_uuid()\n\n            qtree_ref = models.Qtree()\n            qtree_ref.update(qtree)\n            qtrees_refs.append(qtree_ref)\n\n        session.add_all(qtrees_refs)\n\n    return qtrees_refs\n\n\ndef qtrees_update(context, qtrees):\n    \"\"\"Update multiple qtrees.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        qtree_refs = []\n\n        for qtree in qtrees:\n            LOG.debug('updating qtree {0}:'.format(\n                qtree.get('id')))\n            query = _qtree_get_query(context, session)\n            result = query.filter_by(id=qtree.get('id')\n                                     ).update(qtree)\n\n            if not result:\n                LOG.error(exception.QtreeNotFound(qtree.get(\n                    'id')))\n            else:\n                qtree_refs.append(result)\n\n    return qtree_refs\n\n\ndef qtrees_delete(context, qtrees_id_list):\n    \"\"\"Delete multiple qtrees.\"\"\"\n    session = get_session()\n    with session.begin():\n        for qtree_id in qtrees_id_list:\n            LOG.debug('deleting qtree {0}:'.format(qtree_id))\n            query = _qtree_get_query(context, session)\n            result = query.filter_by(id=qtree_id).delete()\n\n            if not result:\n                LOG.error(exception.QtreeNotFound(qtree_id))\n    return\n\n\ndef _qtree_get_query(context, session=None):\n    return model_query(context, models.Qtree, session=session)\n\n\ndef _qtree_get(context, qtree_id, session=None):\n    result = (_qtree_get_query(context, session=session)\n              .filter_by(id=qtree_id)\n              .first())\n\n    if not result:\n        raise exception.QtreeNotFound(qtree_id)\n\n    return result\n\n\ndef qtree_create(context, values):\n    \"\"\"Create a qtree from the values dictionary.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    qtree_ref = models.Qtree()\n    qtree_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(qtree_ref)\n\n    return _qtree_get(context,\n                      qtree_ref['id'],\n                      session=session)\n\n\ndef qtree_update(context, qtree_id, values):\n    \"\"\"Update a qtree with the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _qtree_get_query(context, session)\n        result = query.filter_by(id=qtree_id).update(values)\n\n        if not result:\n            raise exception.QtreeNotFound(qtree_id)\n\n    return result\n\n\ndef qtree_get(context, qtree_id):\n    \"\"\"Get a qtree or raise an exception if it does not exist.\"\"\"\n    return _qtree_get(context, qtree_id)\n\n\ndef qtree_delete_by_storage(context, storage_id):\n    \"\"\"Delete qtree or raise an exception if it does not exist.\"\"\"\n    _qtree_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\ndef qtree_get_all(context, marker=None, limit=None, sort_keys=None,\n                  sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all qtrees.\"\"\"\n\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Qtree,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No Qtree would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Qtree)\ndef _process_qtree_info_filters(query, filters):\n    \"\"\"Common filter processing for qtrees queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Qtree, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef shares_create(context, shares):\n    \"\"\"Create multiple shares.\"\"\"\n    session = get_session()\n    shares_refs = []\n    with session.begin():\n\n        for share in shares:\n            LOG.debug('adding new share for native_share_id {0}:'\n                      .format(share.get('native_share_id')))\n            if not share.get('id'):\n                share['id'] = uuidutils.generate_uuid()\n\n            share_ref = models.Share()\n            share_ref.update(share)\n            shares_refs.append(share_ref)\n\n        session.add_all(shares_refs)\n\n    return shares_refs\n\n\ndef shares_update(context, shares):\n    \"\"\"Update multiple shares.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        share_refs = []\n\n        for share in shares:\n            LOG.debug('updating share {0}:'.format(\n                share.get('id')))\n            query = _share_get_query(context, session)\n            result = query.filter_by(id=share.get('id')\n                                     ).update(share)\n\n            if not result:\n                LOG.error(exception.ShareNotFound(share.get(\n                    'id')))\n            else:\n                share_refs.append(result)\n\n    return share_refs\n\n\ndef shares_delete(context, shares_id_list):\n    \"\"\"Delete multiple shares.\"\"\"\n    session = get_session()\n    with session.begin():\n        for share_id in shares_id_list:\n            LOG.debug('deleting share {0}:'.format(share_id))\n            query = _share_get_query(context, session)\n            result = query.filter_by(id=share_id).delete()\n\n            if not result:\n                LOG.error(exception.ShareNotFound(share_id))\n    return\n\n\ndef _share_get_query(context, session=None):\n    return model_query(context, models.Share, session=session)\n\n\ndef _share_get(context, share_id, session=None):\n    result = (_share_get_query(context, session=session)\n              .filter_by(id=share_id)\n              .first())\n\n    if not result:\n        raise exception.ShareNotFound(share_id)\n\n    return result\n\n\ndef share_create(context, values):\n    \"\"\"Create a share from the values dictionary.\"\"\"\n    if not values.get('id'):\n        values['id'] = uuidutils.generate_uuid()\n\n    share_ref = models.Share()\n    share_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(share_ref)\n\n    return _share_get(context,\n                      share_ref['id'],\n                      session=session)\n\n\ndef share_update(context, share_id, values):\n    \"\"\"Update a share with the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _share_get_query(context, session)\n        result = query.filter_by(id=share_id).update(values)\n\n        if not result:\n            raise exception.ShareNotFound(share_id)\n\n    return result\n\n\ndef share_get(context, share_id):\n    \"\"\"Get a share or raise an exception if it does not exist.\"\"\"\n    return _share_get(context, share_id)\n\n\ndef share_delete_by_storage(context, storage_id):\n    \"\"\"Delete share or raise an exception if it does not exist.\"\"\"\n    _share_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\ndef share_get_all(context, marker=None, limit=None, sort_keys=None,\n                  sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all shares.\"\"\"\n\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Share,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No Share would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Share)\ndef _process_share_info_filters(query, filters):\n    \"\"\"Common filter processing for shares queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Share, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef is_orm_value(obj):\n    \"\"\"Check if object is an ORM field or expression.\"\"\"\n    return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute,\n                            sqlalchemy.sql.expression.ColumnElement))\n\n\ndef model_query(context, model, *args, **kwargs):\n    \"\"\"Query helper for model query.\n\n    :param context: context to query under\n    :param model: model to query. Must be a subclass of ModelBase.\n    :param session: if present, the session to use\n    \"\"\"\n    session = kwargs.pop('session') or get_session()\n    return db_utils.model_query(\n        model=model, session=session, args=args, **kwargs)\n\n\ndef alert_source_get(context, storage_id):\n    \"\"\"Get an alert source or raise an exception if it does not exist.\"\"\"\n    return _alert_source_get(context, storage_id)\n\n\ndef _alert_source_get(context, storage_id, session=None):\n    result = (_alert_source_get_query(context, session=session)\n              .filter_by(storage_id=storage_id)\n              .first())\n\n    if not result:\n        raise exception.AlertSourceNotFound(storage_id)\n\n    return result\n\n\ndef _alert_source_get_query(context, session=None):\n    return model_query(context, models.AlertSource, session=session)\n\n\n@apply_like_filters(model=models.AlertSource)\ndef _process_alert_source_filters(query, filters):\n    \"\"\"Common filter processing for alert source queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.AlertSource, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef alert_source_create(context, values):\n    \"\"\"Add an alert source configuration.\"\"\"\n    alert_source_ref = models.AlertSource()\n    alert_source_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(alert_source_ref)\n\n    return _alert_source_get(context,\n                             alert_source_ref['storage_id'],\n                             session=session)\n\n\ndef alert_source_update(context, storage_id, values):\n    \"\"\"Update an alert source configuration.\"\"\"\n    session = get_session()\n    with session.begin():\n        _alert_source_get(context, storage_id, session).update(values)\n        return _alert_source_get(context, storage_id, session)\n\n\ndef alert_source_delete(context, storage_id):\n    session = get_session()\n    with session.begin():\n        query = _alert_source_get_query(context, session)\n        result = query.filter_by(storage_id=storage_id).delete()\n        if not result:\n            LOG.error(\"Cannot delete non-exist alert source[storage_id=%s].\" %\n                      storage_id)\n            raise exception.AlertSourceNotFound(storage_id)\n        else:\n            LOG.info(\"Delete alert source[storage_id=%s] successfully.\" %\n                     storage_id)\n\n\ndef alert_source_get_all(context, marker=None, limit=None, sort_keys=None,\n                         sort_dirs=None, filters=None, offset=None):\n    session = get_session()\n    with session.begin():\n        query = _generate_paginate_query(context, session, models.AlertSource,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        if query is None:\n            return []\n        return query.all()\n\n\ndef task_create(context, values):\n    \"\"\"Add task configuration.\"\"\"\n    tasks_ref = models.Task()\n    tasks_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(tasks_ref)\n\n    return _task_get(context, tasks_ref['id'], session=session)\n\n\ndef task_update(context, tasks_id, values):\n    \"\"\"Update a task attributes withe the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _task_get_query(context, session)\n        result = query.filter_by(id=tasks_id).update(values)\n\n        if not result:\n            raise exception.TaskNotFound(tasks_id)\n\n    return result\n\n\ndef _task_get(context, task_id, session=None):\n    result = (_task_get_query(context, session=session)\n              .filter_by(id=task_id)\n              .first())\n\n    if not result:\n        raise exception.TaskNotFound(task_id)\n\n    return result\n\n\ndef _task_get_query(context, session=None):\n    return model_query(context, models.Task, session=session)\n\n\ndef task_get(context, tasks_id):\n    \"\"\"Get a task  or raise an exception if it does not exist.\"\"\"\n    return _task_get(context, tasks_id)\n\n\ndef task_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the tasks of a storage device\"\"\"\n    delete_info = {'deleted': True, 'deleted_at': timeutils.utcnow()}\n    _task_get_query(context).filter_by(\n        storage_id=storage_id).update(delete_info)\n\n\ndef task_delete(context, tasks_id):\n    \"\"\"Delete a given task\"\"\"\n    _task_get_query(context).filter_by(id=tasks_id).delete()\n\n\ndef task_get_all(context, marker=None, limit=None, sort_keys=None,\n                 sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all tasks of a storage.\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.Task,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No task entry would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.Task)\ndef _process_tasks_info_filters(query, filters):\n    \"\"\"Common filter processing for task table queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.Task, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef failed_task_create(context, values):\n    \"\"\"Add failed task configuration.\"\"\"\n    failed_task_ref = models.FailedTask()\n    failed_task_ref.update(values)\n\n    session = get_session()\n    with session.begin():\n        session.add(failed_task_ref)\n\n    return _failed_tasks_get(context, failed_task_ref['id'], session=session)\n\n\ndef failed_task_update(context, failed_task_id, values):\n    \"\"\"Update a failed task withe the values dictionary.\"\"\"\n    session = get_session()\n\n    with session.begin():\n        query = _failed_tasks_get_query(context, session)\n        result = query.filter_by(id=failed_task_id).update(values)\n\n        if not result:\n            raise exception.FailedTaskNotFound(failed_task_id)\n\n    return result\n\n\ndef _failed_tasks_get(context, failed_task_id, session=None):\n    result = (_failed_tasks_get_query(context, session=session)\n              .filter_by(id=failed_task_id)\n              .first())\n\n    if not result:\n        raise exception.FailedTaskNotFound(failed_task_id)\n\n    return result\n\n\ndef _failed_tasks_get_query(context, session=None):\n    return model_query(context, models.FailedTask, session=session)\n\n\ndef failed_task_get(context, failed_task_id):\n    \"\"\"Get a failed task or raise an exception if it does not exist.\"\"\"\n    return _failed_tasks_get(context, failed_task_id)\n\n\ndef failed_task_delete_by_task_id(context, task_id):\n    \"\"\"Delete all the failed tasks of a given task id\"\"\"\n    _failed_tasks_get_query(context).filter_by(\n        task_id=task_id).delete()\n\n\ndef failed_task_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the failed tasks of a storage device\"\"\"\n    delete_info = {'deleted': True, 'deleted_at': timeutils.utcnow()}\n    _failed_tasks_get_query(context).filter_by(\n        storage_id=storage_id).update(delete_info)\n\n\ndef failed_task_delete(context, failed_task_id):\n    \"\"\"Delete a given failed task\"\"\"\n    _failed_tasks_get_query(context).filter_by(id=failed_task_id).delete()\n\n\ndef failed_task_get_all(context, marker=None, limit=None, sort_keys=None,\n                        sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all failed tasks.\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.FailedTask,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset,\n                                         )\n        # No failed task would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.FailedTask)\ndef _process_failed_tasks_info_filters(query, filters):\n    \"\"\"Common filter processing for failed task queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.FailedTask, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef _storage_host_initiators_get_query(context, session=None):\n    return model_query(context, models.StorageHostInitiator, session=session)\n\n\ndef _storage_host_initiators_get(context, storage_host_initiator_id,\n                                 session=None):\n    result = (_storage_host_initiators_get_query(context, session=session)\n              .filter_by(id=storage_host_initiator_id)\n              .first())\n\n    if not result:\n        raise exception.StorageHostInitiatorNotFound(storage_host_initiator_id)\n\n    return result\n\n\ndef storage_host_initiators_create(context, storage_host_initiators):\n    \"\"\"Create multiple storage initiators.\"\"\"\n    session = get_session()\n    initiator_refs = []\n    with session.begin():\n\n        for initiator in storage_host_initiators:\n            LOG.debug('Adding new storage host initiator for '\n                      'native_storage_host_initiator_id {0}:'\n                      .format(initiator\n                              .get('native_storage_host_initiator_id')))\n            if not initiator.get('id'):\n                initiator['id'] = uuidutils.generate_uuid()\n\n            initiator_ref = models.StorageHostInitiator()\n            initiator_ref.update(initiator)\n            initiator_refs.append(initiator_ref)\n\n        session.add_all(initiator_refs)\n\n    return initiator_refs\n\n\ndef storage_host_initiators_delete(context, storage_host_initiators_id_list):\n    \"\"\"Delete multiple storage initiators.\"\"\"\n    session = get_session()\n    with session.begin():\n        for initiator_id in storage_host_initiators_id_list:\n            LOG.debug('Deleting storage host initiator{0}:'\n                      .format(initiator_id))\n            query = _storage_host_initiators_get_query(context, session)\n            result = query.filter_by(id=initiator_id).delete()\n\n            if not result:\n                LOG.error(exception.StorageHostInitiatorNotFound(initiator_id))\n    return\n\n\ndef storage_host_initiators_update(context, storage_host_initiators):\n    \"\"\"Update multiple storage initiators.\"\"\"\n    session = get_session()\n    with session.begin():\n        for initiator in storage_host_initiators:\n            LOG.debug('Updating storage host initiator{0}:'\n                      .format(initiator.get('id')))\n            query = _storage_host_initiators_get_query(context, session)\n            result = query.filter_by(id=initiator.get('id')\n                                     ).update(initiator)\n\n            if not result:\n                LOG.error(exception.StorageHostInitiatorNotFound(initiator\n                                                                 .get('id')))\n\n\ndef storage_host_initiators_get(context, storage_host_initiator_id):\n    \"\"\"Get a storage host initiator or raise an exception if it does not\n    exist.\n    \"\"\"\n    return _storage_host_initiators_get(context, storage_host_initiator_id)\n\n\ndef storage_host_initiators_get_all(context, marker=None, limit=None,\n                                    sort_keys=None, sort_dirs=None,\n                                    filters=None,\n                                    offset=None):\n    \"\"\"Retrieves all storage initiators\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session,\n                                         models.StorageHostInitiator, marker,\n                                         limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        # No storage host initiator would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.StorageHostInitiator)\ndef _process_storage_host_initiators_info_filters(query, filters):\n    \"\"\"Common filter processing for storage initiators queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.StorageHostInitiator, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef storage_host_initiators_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage initiators of a device\"\"\"\n    _storage_host_initiators_get_query(context)\\\n        .filter_by(storage_id=storage_id).delete()\n\n\ndef _storage_hosts_get_query(context, session=None):\n    return model_query(context, models.StorageHost, session=session)\n\n\ndef _storage_hosts_get(context, storage_host_id, session=None):\n    result = (_storage_hosts_get_query(context, session=session)\n              .filter_by(id=storage_host_id)\n              .first())\n\n    if not result:\n        raise exception.StorageHostNotFound(storage_host_id)\n\n    return result\n\n\ndef storage_hosts_create(context, storage_hosts):\n    \"\"\"Create multiple storage hosts.\"\"\"\n    session = get_session()\n    host_refs = []\n    with session.begin():\n\n        for host in storage_hosts:\n            LOG.debug('Adding new storage host for '\n                      'native_host_id {0}:'\n                      .format(host.get('native_host_id')))\n            if not host.get('id'):\n                host['id'] = uuidutils.generate_uuid()\n\n            host_ref = models.StorageHost()\n            host_ref.update(host)\n            host_refs.append(host_ref)\n\n        session.add_all(host_refs)\n\n    return host_refs\n\n\ndef storage_hosts_delete(context, storage_hosts_id_list):\n    \"\"\"Delete multiple storage hosts.\"\"\"\n    session = get_session()\n    with session.begin():\n        for host_id in storage_hosts_id_list:\n            LOG.debug('Deleting volume {0}:'.format(host_id))\n            query = _storage_hosts_get_query(context, session)\n            result = query.filter_by(id=host_id).delete()\n\n            if not result:\n                LOG.error(exception.StorageHostNotFound(host_id))\n    return\n\n\ndef storage_hosts_update(context, storage_hosts):\n    \"\"\"Update multiple storage hosts.\"\"\"\n    session = get_session()\n    with session.begin():\n        for host in storage_hosts:\n            LOG.debug('Updating storage hosts {0}:'.format(host.get('id')))\n            query = _storage_hosts_get_query(context, session)\n            result = query.filter_by(id=host.get('id')\n                                     ).update(host)\n\n            if not result:\n                LOG.error(exception.StorageHostNotFound(host\n                                                        .get('id')))\n\n\ndef storage_hosts_get(context, storage_host_id):\n    \"\"\"Get a storage host or raise an exception if it does not exist.\"\"\"\n    return _storage_hosts_get(context, storage_host_id)\n\n\ndef storage_hosts_get_all(context, marker=None, limit=None, sort_keys=None,\n                          sort_dirs=None, filters=None, offset=None):\n    \"\"\"Retrieves all storage hosts\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session,\n                                         models.StorageHost, marker, limit,\n                                         sort_keys, sort_dirs, filters, offset)\n        # No storage host would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.StorageHost)\ndef _process_storage_hosts_info_filters(query, filters):\n    \"\"\"Common filter processing for storage hosts queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.StorageHost, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef storage_hosts_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage hosts of a device\"\"\"\n    _storage_hosts_get_query(context).filter_by(storage_id=storage_id) \\\n        .delete()\n\n\ndef _storage_host_groups_get_query(context, session=None):\n    return model_query(context, models.StorageHostGroup, session=session)\n\n\ndef _storage_host_groups_get(context, storage_host_grp_id, session=None):\n    result = (_storage_host_groups_get_query(context, session=session)\n              .filter_by(id=storage_host_grp_id)\n              .first())\n\n    if not result:\n        raise exception.StorageHostGroupNotFound(storage_host_grp_id)\n\n    return result\n\n\ndef storage_host_groups_create(context, storage_host_groups):\n    \"\"\"Create multiple storage host groups.\"\"\"\n    session = get_session()\n    host_groups_refs = []\n    with session.begin():\n\n        for host_group in storage_host_groups:\n            LOG.debug('Adding new storage host group for '\n                      'native_storage_host_group_id {0}:'\n                      .format(host_group.get('native_storage_host_group_id')))\n            if not host_group.get('id'):\n                host_group['id'] = uuidutils.generate_uuid()\n\n            host_group_ref = models.StorageHostGroup()\n            host_group_ref.update(host_group)\n            host_groups_refs.append(host_group_ref)\n\n        session.add_all(host_groups_refs)\n\n    return host_groups_refs\n\n\ndef storage_host_groups_delete(context, storage_host_groups_id_list):\n    \"\"\"Delete multiple storage host groups.\"\"\"\n    session = get_session()\n    with session.begin():\n        for host_group_id in storage_host_groups_id_list:\n            LOG.debug('Deleting storage host group {0}:'.format(host_group_id))\n            query = _storage_host_groups_get_query(context, session)\n            result = query.filter_by(id=host_group_id).delete()\n\n            if not result:\n                LOG.error(exception.StorageHostGroupNotFound(host_group_id))\n    return\n\n\ndef storage_host_groups_update(context, storage_host_groups):\n    \"\"\"Update multiple storage host groups.\"\"\"\n    session = get_session()\n    with session.begin():\n        for host_group in storage_host_groups:\n            LOG.debug('Updating storage host groups {0}:'\n                      .format(host_group.get('id')))\n            query = _storage_host_groups_get_query(context, session)\n            result = query.filter_by(id=host_group.get('id')\n                                     ).update(host_group)\n\n            if not result:\n                LOG.error(exception.StorageHostGroupNotFound(host_group\n                                                             .get('id')))\n\n\ndef storage_host_groups_get(context, storage_host_group_id):\n    \"\"\"Get a storage host group or raise an exception if it does not exist.\"\"\"\n    return _storage_host_groups_get(context, storage_host_group_id)\n\n\ndef storage_host_groups_get_all(context, marker=None, limit=None,\n                                sort_keys=None, sort_dirs=None,\n                                filters=None, offset=None):\n    \"\"\"Retrieves all storage host groups\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session,\n                                         models.StorageHostGroup, marker,\n                                         limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        # No storage host group would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.StorageHostGroup)\ndef _process_storage_host_groups_info_filters(query, filters):\n    \"\"\"Common filter processing for storage host groups queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.StorageHostGroup, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef storage_host_groups_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage host groups of a device\"\"\"\n    _storage_host_groups_get_query(context).filter_by(storage_id=storage_id)\\\n        .delete()\n\n\ndef _port_groups_get_query(context, session=None):\n    return model_query(context, models.PortGroup, session=session)\n\n\ndef _port_groups_get(context, port_grp_id, session=None):\n    result = (_port_groups_get_query(context, session=session)\n              .filter_by(id=port_grp_id)\n              .first())\n\n    if not result:\n        raise exception.PortGroupNotFound(port_grp_id)\n\n    return result\n\n\ndef port_groups_create(context, port_groups):\n    \"\"\"Create multiple port groups.\"\"\"\n    session = get_session()\n    port_groups_refs = []\n    with session.begin():\n\n        for port_group in port_groups:\n            LOG.debug('Adding new port group for '\n                      'native_port_group_id {0}:'\n                      .format(port_group.get('native_port_group_id')))\n            if not port_group.get('id'):\n                port_group['id'] = uuidutils.generate_uuid()\n\n            port_group_ref = models.PortGroup()\n            port_group_ref.update(port_group)\n            port_groups_refs.append(port_group_ref)\n\n        session.add_all(port_groups_refs)\n\n    return port_groups_refs\n\n\ndef port_groups_delete(context, port_groups_id_list):\n    \"\"\"Delete multiple port groups.\"\"\"\n    session = get_session()\n    with session.begin():\n        for port_group_id in port_groups_id_list:\n            LOG.debug('Deleting port group {0}:'.format(port_group_id))\n            query = _port_groups_get_query(context, session)\n            result = query.filter_by(id=port_group_id).delete()\n\n            if not result:\n                LOG.error(exception.PortGroupNotFound(port_group_id))\n    return\n\n\ndef port_groups_update(context, port_groups):\n    \"\"\"Update multiple port groups.\"\"\"\n    session = get_session()\n    with session.begin():\n        for port_group in port_groups:\n            LOG.debug('Updating port groups {0}:'\n                      .format(port_group.get('id')))\n            query = _port_groups_get_query(context, session)\n            result = query.filter_by(id=port_group.get('id')\n                                     ).update(port_group)\n\n            if not result:\n                LOG.error(exception.PortGroupNotFound(port_group\n                                                      .get('id')))\n\n\ndef port_groups_get(context, port_group_id):\n    \"\"\"Get a port group or raise an exception if it does not exist.\"\"\"\n    return _port_groups_get(context, port_group_id)\n\n\ndef port_groups_get_all(context, marker=None, limit=None,\n                        sort_keys=None, sort_dirs=None,\n                        filters=None, offset=None):\n    \"\"\"Retrieves all port groups\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session,\n                                         models.PortGroup, marker,\n                                         limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        # No port group would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.PortGroup)\ndef _process_port_groups_info_filters(query, filters):\n    \"\"\"Common filter processing for port groups queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.PortGroup, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef port_groups_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the port groups of a device\"\"\"\n    _port_groups_get_query(context).filter_by(storage_id=storage_id).delete()\n\n\ndef _volume_groups_get_query(context, session=None):\n    return model_query(context, models.VolumeGroup, session=session)\n\n\ndef _volume_groups_get(context, volume_grp_id, session=None):\n    result = (_volume_groups_get_query(context, session=session)\n              .filter_by(id=volume_grp_id)\n              .first())\n\n    if not result:\n        raise exception.VolumeGroupNotFound(volume_grp_id)\n\n    return result\n\n\ndef volume_groups_create(context, volume_groups):\n    \"\"\"Create multiple volume groups.\"\"\"\n    session = get_session()\n    volume_groups_refs = []\n    with session.begin():\n\n        for volume_group in volume_groups:\n            LOG.debug('Adding new volume group for '\n                      'native_volume_group_id {0}:'\n                      .format(volume_group.get('native_volume_group_id')))\n            if not volume_group.get('id'):\n                volume_group['id'] = uuidutils.generate_uuid()\n\n            volume_group_ref = models.VolumeGroup()\n            volume_group_ref.update(volume_group)\n            volume_groups_refs.append(volume_group_ref)\n\n        session.add_all(volume_groups_refs)\n\n    return volume_groups_refs\n\n\ndef volume_groups_delete(context, volume_groups_id_list):\n    \"\"\"Delete multiple volume groups.\"\"\"\n    session = get_session()\n    with session.begin():\n        for volume_group_id in volume_groups_id_list:\n            LOG.debug('Deleting volume group {0}:'.format(volume_group_id))\n            query = _volume_groups_get_query(context, session)\n            result = query.filter_by(id=volume_group_id).delete()\n\n            if not result:\n                LOG.error(exception.VolumeGroupNotFound(volume_group_id))\n    return\n\n\ndef volume_groups_update(context, volume_groups):\n    \"\"\"Update multiple volume groups.\"\"\"\n    session = get_session()\n    with session.begin():\n        for volume_group in volume_groups:\n            LOG.debug('Updating volume groups {0}:'\n                      .format(volume_group.get('id')))\n            query = _volume_groups_get_query(context, session)\n            result = query.filter_by(id=volume_group.get('id')\n                                     ).update(volume_group)\n\n            if not result:\n                LOG.error(exception.VolumeGroupNotFound(volume_group\n                                                        .get('id')))\n\n\ndef volume_groups_get(context, volume_group_id):\n    \"\"\"Get a volume group or raise an exception if it does not exist.\"\"\"\n    return _volume_groups_get(context, volume_group_id)\n\n\ndef volume_groups_get_all(context, marker=None, limit=None,\n                          sort_keys=None, sort_dirs=None,\n                          filters=None, offset=None):\n    \"\"\"Retrieves all volume groups\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session,\n                                         models.VolumeGroup, marker,\n                                         limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        # No volume group would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.VolumeGroup)\ndef _process_volume_groups_info_filters(query, filters):\n    \"\"\"Common filter processing for volume groups queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.VolumeGroup, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef volume_groups_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the volume groups of a device\"\"\"\n    _volume_groups_get_query(context).filter_by(storage_id=storage_id) \\\n        .delete()\n\n\ndef _masking_views_get_query(context, session=None):\n    return model_query(context, models.MaskingView, session=session)\n\n\ndef _masking_views_get(context, masking_view_id, session=None):\n    result = (_masking_views_get_query(context, session=session)\n              .filter_by(id=masking_view_id)\n              .first())\n\n    if not result:\n        raise exception.MaskingViewNotFound(masking_view_id)\n\n    return result\n\n\ndef masking_views_create(context, masking_views):\n    \"\"\"Create multiple masking views.\"\"\"\n    session = get_session()\n    masking_views_refs = []\n    with session.begin():\n\n        for masking_view in masking_views:\n            LOG.debug('Adding new masking view for '\n                      'native_masking_view_id {0}:'\n                      .format(masking_view.get('native_masking_view_id')))\n            if not masking_view.get('id'):\n                masking_view['id'] = uuidutils.generate_uuid()\n\n            masking_view_ref = models.MaskingView()\n            masking_view_ref.update(masking_view)\n            masking_views_refs.append(masking_view_ref)\n\n        session.add_all(masking_views_refs)\n\n    return masking_views_refs\n\n\ndef masking_views_delete(context, masking_views_id_list):\n    \"\"\"Delete multiple masking views.\"\"\"\n    session = get_session()\n    with session.begin():\n        for masking_view_id in masking_views_id_list:\n            LOG.debug('Deleting masking view {0}:'.format(masking_view_id))\n            query = _masking_views_get_query(context, session)\n            result = query.filter_by(id=masking_view_id).delete()\n\n            if not result:\n                LOG.error(exception.MaskingViewNotFound(masking_view_id))\n    return\n\n\ndef masking_views_update(context, masking_views):\n    \"\"\"Update multiple masking views.\"\"\"\n    session = get_session()\n    with session.begin():\n        for masking_view in masking_views:\n            LOG.debug('Updating masking views {0}:'\n                      .format(masking_view.get('id')))\n            query = _masking_views_get_query(context, session)\n            result = query.filter_by(id=masking_view.get('id')\n                                     ).update(masking_view)\n\n            if not result:\n                LOG.error(exception.MaskingViewNotFound(masking_view\n                                                        .get('id')))\n\n\ndef masking_views_get(context, masking_view_id):\n    \"\"\"Get a masking view or raise an exception if it does not exist.\"\"\"\n    return _masking_views_get(context, masking_view_id)\n\n\ndef masking_views_get_all(context, marker=None, limit=None,\n                          sort_keys=None, sort_dirs=None,\n                          filters=None, offset=None):\n    \"\"\"Retrieves all masking views\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session,\n                                         models.MaskingView, marker,\n                                         limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        # No masking view would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.MaskingView)\ndef _process_masking_views_info_filters(query, filters):\n    \"\"\"Common filter processing for masking views queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.MaskingView, filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef masking_views_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the masking views of a device\"\"\"\n    _masking_views_get_query(context).filter_by(storage_id=storage_id)\\\n        .delete()\n\n\ndef _storage_host_grp_host_rels_get_query(context, session=None):\n    return model_query(context, models.StorageHostGrpHostRel,\n                       session=session)\n\n\ndef _storage_host_grp_host_rels_get(context, host_grp_host_relation_id,\n                                    session=None):\n    result = (\n        _storage_host_grp_host_rels_get_query(context, session=session)\n        .filter_by(id=host_grp_host_relation_id).first())\n\n    if not result:\n        raise exception.StorageHostGrpHostRelNotFound(\n            host_grp_host_relation_id)\n\n    return result\n\n\ndef storage_host_grp_host_rels_create(context,\n                                      host_grp_host_relations):\n    \"\"\"Create multiple storage host grp host relations.\"\"\"\n    session = get_session()\n    host_grp_host_relation_refs = []\n    with session.begin():\n\n        for host_grp_host_relation in host_grp_host_relations:\n            LOG.debug('Adding new storage host group host relation for '\n                      'native storage host group id {0}:'\n                      .format(host_grp_host_relation\n                              .get('native_storage_host_group_id')))\n            if not host_grp_host_relation.get('id'):\n                host_grp_host_relation['id'] = uuidutils.generate_uuid()\n\n            host_grp_host_relation_ref \\\n                = models.StorageHostGrpHostRel()\n            host_grp_host_relation_ref.update(host_grp_host_relation)\n            host_grp_host_relation_refs.append(host_grp_host_relation_ref)\n\n        session.add_all(host_grp_host_relation_refs)\n\n    return host_grp_host_relation_refs\n\n\ndef storage_host_grp_host_rels_delete(context,\n                                      host_grp_host_relations_list):\n    \"\"\"Delete multiple storage host grp host relations.\"\"\"\n    session = get_session()\n    with session.begin():\n        for host_grp_host_relation_id in host_grp_host_relations_list:\n            LOG.debug('deleting storage host grp host relation {0}:'.format(\n                host_grp_host_relation_id))\n            query = _storage_host_grp_host_rels_get_query(context,\n                                                          session)\n            result = query.filter_by(id=host_grp_host_relation_id).delete()\n\n            if not result:\n                LOG.error(exception.StorageHostGrpHostRelNotFound(\n                    host_grp_host_relation_id))\n    return\n\n\ndef storage_host_grp_host_rels_update(context,\n                                      host_grp_host_relations_list):\n    \"\"\"Update multiple storage host grp host relations.\"\"\"\n    session = get_session()\n    with session.begin():\n        for host_grp_host_relation in host_grp_host_relations_list:\n            LOG.debug('Updating storage host grp host relations {0}:'\n                      .format(host_grp_host_relation.get('id')))\n            query = _storage_host_grp_host_rels_get_query(context,\n                                                          session)\n            result = query.filter_by(id=host_grp_host_relation.get('id')\n                                     ).update(host_grp_host_relation)\n\n            if not result:\n                LOG.error(exception.StorageHostGrpHostRelNotFound(\n                    host_grp_host_relation.get('id')))\n\n\ndef storage_host_grp_host_rels_get(context, host_grp_host_relation_id):\n    \"\"\"Get a storage host grp host relation or raise an exception if it does\n    not exist.\n    \"\"\"\n    return _storage_host_grp_host_rels_get(context,\n                                           host_grp_host_relation_id)\n\n\ndef storage_host_grp_host_rels_get_all(context, marker=None, limit=None,\n                                       sort_keys=None, sort_dirs=None,\n                                       filters=None, offset=None):\n    \"\"\"Retrieves all storage host grp host relations\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models\n                                         .StorageHostGrpHostRel,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        # No storage host grp host relation would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.StorageHostGrpHostRel)\ndef _process_storage_host_grp_host_rels_info_filters(query, filters):\n    \"\"\"Common filter processing for storage host grp host relations queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.StorageHostGrpHostRel,\n                                      filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef storage_host_grp_host_rels_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the storage host grp host relations of a device\"\"\"\n    _storage_host_grp_host_rels_get_query(context) \\\n        .filter_by(storage_id=storage_id).delete()\n\n\ndef _port_grp_port_rels_get_query(context, session=None):\n    return model_query(context, models.PortGrpPortRel,\n                       session=session)\n\n\ndef _port_grp_port_rels_get(context, port_grp_port_relation_id,\n                            session=None):\n    result = (_port_grp_port_rels_get_query(context, session=session)\n              .filter_by(id=port_grp_port_relation_id).first())\n\n    if not result:\n        raise exception.PortGrpPortRelNotFound(port_grp_port_relation_id)\n\n    return result\n\n\ndef port_grp_port_rels_create(context, port_grp_port_rels):\n    \"\"\"Create multiple port grp port relations.\"\"\"\n    session = get_session()\n    port_grp_port_relation_refs = []\n    with session.begin():\n\n        for port_grp_port_relation in port_grp_port_rels:\n            LOG.debug('adding new port group port relation for '\n                      'native port group id {0}:'\n                      .format(port_grp_port_relation\n                              .get('native_port_group_id')))\n            if not port_grp_port_relation.get('id'):\n                port_grp_port_relation['id'] = uuidutils.generate_uuid()\n\n            port_grp_port_relation_ref \\\n                = models.PortGrpPortRel()\n            port_grp_port_relation_ref.update(port_grp_port_relation)\n            port_grp_port_relation_refs.append(port_grp_port_relation_ref)\n\n        session.add_all(port_grp_port_relation_refs)\n\n    return port_grp_port_relation_refs\n\n\ndef port_grp_port_rels_delete(context,\n                              port_grp_port_rels_list):\n    \"\"\"Delete multiple port grp port relations.\"\"\"\n    session = get_session()\n    with session.begin():\n        for port_grp_port_relation_id in port_grp_port_rels_list:\n            LOG.debug('deleting port grp port relation {0}:'.format(\n                port_grp_port_relation_id))\n            query = _port_grp_port_rels_get_query(context, session)\n            result = query.filter_by(id=port_grp_port_relation_id).delete()\n\n            if not result:\n                LOG.error(exception.PortGrpPortRelNotFound(\n                    port_grp_port_relation_id))\n    return\n\n\ndef port_grp_port_rels_update(context,\n                              port_grp_port_rels_list):\n    \"\"\"Update multiple port grp port relations.\"\"\"\n    session = get_session()\n    with session.begin():\n        for port_grp_port_relation in port_grp_port_rels_list:\n            LOG.debug('Updating port grp port relations {0}:'\n                      .format(port_grp_port_relation.get('id')))\n            query = _port_grp_port_rels_get_query(context,\n                                                  session)\n            result = query.filter_by(id=port_grp_port_relation.get('id')\n                                     ).update(port_grp_port_relation)\n\n            if not result:\n                LOG.error(exception.PortGrpPortRelNotFound(\n                    port_grp_port_relation.get('id')))\n\n\ndef port_grp_port_rels_get(context, port_grp_port_relation_id):\n    \"\"\"Get a port grp port relation or raise an exception if it does\n    not exist.\n    \"\"\"\n    return _port_grp_port_rels_get(context,\n                                   port_grp_port_relation_id)\n\n\ndef port_grp_port_rels_get_all(context, marker=None, limit=None,\n                               sort_keys=None, sort_dirs=None,\n                               filters=None, offset=None):\n    \"\"\"Retrieves all port grp port relations\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.\n                                         PortGrpPortRel,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        # No port grp port relation would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.PortGrpPortRel)\ndef _process_port_grp_port_rels_info_filters(query, filters):\n    \"\"\"Common filter processing for port grp port relations queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.PortGrpPortRel,\n                                      filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef port_grp_port_rels_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the port grp port relations of a device\"\"\"\n    _port_grp_port_rels_get_query(context) \\\n        .filter_by(storage_id=storage_id).delete()\n\n\ndef _vol_grp_vol_rels_get_query(context, session=None):\n    return model_query(context, models.VolGrpVolRel,\n                       session=session)\n\n\ndef _vol_grp_vol_rels_get(context, volume_grp_volume_relation_id,\n                          session=None):\n    result = (_vol_grp_vol_rels_get_query(context, session=session)\n              .filter_by(id=volume_grp_volume_relation_id).first())\n\n    if not result:\n        raise exception.VolGrpVolRelNotFound(\n            volume_grp_volume_relation_id)\n\n    return result\n\n\ndef vol_grp_vol_rels_create(context, vol_grp_vol_rels):\n    \"\"\"Create multiple volume grp volume relations.\"\"\"\n    session = get_session()\n    volume_grp_volume_relation_refs = []\n    with session.begin():\n\n        for volume_grp_volume_relation in vol_grp_vol_rels:\n            LOG.debug('adding new volume group volume relation for '\n                      'native volume group  id {0}:'\n                      .format(volume_grp_volume_relation\n                              .get('native_volume_group_id')))\n            if not volume_grp_volume_relation.get('id'):\n                volume_grp_volume_relation['id'] = uuidutils.generate_uuid()\n\n            volume_grp_volume_relation_ref \\\n                = models.VolGrpVolRel()\n            volume_grp_volume_relation_ref.update(volume_grp_volume_relation)\n            volume_grp_volume_relation_refs.append(\n                volume_grp_volume_relation_ref)\n\n        session.add_all(volume_grp_volume_relation_refs)\n\n    return volume_grp_volume_relation_refs\n\n\ndef vol_grp_vol_rels_delete(context,\n                            vol_grp_vol_rels_list):\n    \"\"\"Delete multiple volume grp volume relations.\"\"\"\n    session = get_session()\n    with session.begin():\n        for volume_grp_volume_relation_id in vol_grp_vol_rels_list:\n            LOG.debug('deleting volume grp volume relation {0}:'.format(\n                volume_grp_volume_relation_id))\n            query = _vol_grp_vol_rels_get_query(context, session)\n            result = query.filter_by(id=volume_grp_volume_relation_id).delete()\n\n            if not result:\n                LOG.error(exception.VolGrpVolRelationNotFound(\n                    volume_grp_volume_relation_id))\n    return\n\n\ndef vol_grp_vol_rels_update(context,\n                            vol_grp_vol_rels_list):\n    \"\"\"Update multiple volume grp volume relations.\"\"\"\n    session = get_session()\n    with session.begin():\n        for volume_grp_volume_relation in vol_grp_vol_rels_list:\n            LOG.debug('Updating volume grp volume relations {0}:'\n                      .format(volume_grp_volume_relation.get('id')))\n            query = _vol_grp_vol_rels_get_query(context,\n                                                session)\n            result = query.filter_by(id=volume_grp_volume_relation.get('id')\n                                     ).update(volume_grp_volume_relation)\n\n            if not result:\n                LOG.error(exception.VolGrpVolRelationNotFound(\n                    volume_grp_volume_relation.get('id')))\n\n\ndef vol_grp_vol_rels_get(context, volume_grp_volume_relation_id):\n    \"\"\"Get a volume grp volume relation or raise an exception if it does\n    not exist.\n    \"\"\"\n    return _vol_grp_vol_rels_get(context,\n                                 volume_grp_volume_relation_id)\n\n\ndef vol_grp_vol_rels_get_all(context, marker=None, limit=None,\n                             sort_keys=None, sort_dirs=None,\n                             filters=None, offset=None):\n    \"\"\"Retrieves all volume grp volume relations\"\"\"\n    session = get_session()\n    with session.begin():\n        # Generate the query\n        query = _generate_paginate_query(context, session, models.\n                                         VolGrpVolRel,\n                                         marker, limit, sort_keys, sort_dirs,\n                                         filters, offset)\n        # No volume grp volume relation would match, return empty list\n        if query is None:\n            return []\n        return query.all()\n\n\n@apply_like_filters(model=models.VolGrpVolRel)\ndef _process_vol_grp_vol_rels_info_filters(query, filters):\n    \"\"\"Common filter processing for volume grp volume relations queries.\"\"\"\n    if filters:\n        if not is_valid_model_filters(models.VolGrpVolRel,\n                                      filters):\n            return\n        query = query.filter_by(**filters)\n\n    return query\n\n\ndef vol_grp_vol_rels_delete_by_storage(context, storage_id):\n    \"\"\"Delete all the volume grp volume relations of a device\"\"\"\n    _vol_grp_vol_rels_get_query(context) \\\n        .filter_by(storage_id=storage_id).delete()\n\n\nPAGINATION_HELPERS = {\n    models.AccessInfo: (_access_info_get_query, _process_access_info_filters,\n                        _access_info_get),\n    models.StoragePool: (_storage_pool_get_query,\n                         _process_storage_pool_info_filters,\n                         _storage_pool_get),\n    models.Storage: (_storage_get_query, _process_storage_info_filters,\n                     _storage_get),\n    models.AlertSource: (_alert_source_get_query,\n                         _process_alert_source_filters,\n                         _alert_source_get),\n    models.Volume: (_volume_get_query, _process_volume_info_filters,\n                    _volume_get),\n    models.Controller: (_controller_get_query,\n                        _process_controller_info_filters,\n                        _controller_get),\n    models.Port: (_port_get_query, _process_port_info_filters, _port_get),\n    models.Disk: (_disk_get_query, _process_disk_info_filters,\n                  _disk_get),\n    models.Quota: (_quota_get_query,\n                   _process_quota_info_filters, _quota_get),\n    models.Filesystem: (_filesystem_get_query,\n                        _process_filesystem_info_filters, _filesystem_get),\n    models.Qtree: (_qtree_get_query,\n                   _process_qtree_info_filters, _qtree_get),\n    models.Share: (_share_get_query,\n                   _process_share_info_filters, _share_get),\n    models.Task: (_task_get_query,\n                  _process_tasks_info_filters,\n                  _task_get),\n    models.FailedTask: (_failed_tasks_get_query,\n                        _process_failed_tasks_info_filters,\n                        _failed_tasks_get),\n    models.StorageHostInitiator: (\n        _storage_host_initiators_get_query,\n        _process_storage_host_initiators_info_filters,\n        _storage_host_initiators_get),\n    models.StorageHost: (_storage_hosts_get_query,\n                         _process_storage_hosts_info_filters,\n                         _storage_hosts_get),\n    models.StorageHostGroup: (_storage_host_groups_get_query,\n                              _process_storage_host_groups_info_filters,\n                              _storage_host_groups_get),\n    models.PortGroup: (_port_groups_get_query,\n                       _process_port_groups_info_filters,\n                       _port_groups_get),\n    models.VolumeGroup: (_volume_groups_get_query,\n                         _process_volume_groups_info_filters,\n                         _volume_groups_get),\n    models.MaskingView: (_masking_views_get_query,\n                         _process_masking_views_info_filters,\n                         _masking_views_get),\n    models.StorageHostGrpHostRel: (\n        _storage_host_grp_host_rels_get_query,\n        _process_storage_host_grp_host_rels_info_filters,\n        _storage_host_grp_host_rels_get),\n\n    models.PortGrpPortRel: (_port_grp_port_rels_get_query,\n                            _process_port_grp_port_rels_info_filters,\n                            _port_grp_port_rels_get),\n    models.VolGrpVolRel: (\n        _vol_grp_vol_rels_get_query,\n        _process_vol_grp_vol_rels_info_filters,\n        _vol_grp_vol_rels_get),\n\n}\n\n\ndef process_sort_params(sort_keys, sort_dirs, default_keys=None,\n                        default_dir='asc'):\n    \"\"\"Process the sort parameters to include default keys.\n\n    Creates a list of sort keys and a list of sort directions. Adds the default\n    keys to the end of the list if they are not already included.\n\n    When adding the default keys to the sort keys list, the associated\n    direction is:\n    1) The first element in the 'sort_dirs' list (if specified), else\n    2) 'default_dir' value (Note that 'asc' is the default value since this is\n    the default in sqlalchemy.utils.paginate_query)\n\n    :param sort_keys: List of sort keys to include in the processed list\n    :param sort_dirs: List of sort directions to include in the processed list\n    :param default_keys: List of sort keys that need to be included in the\n                         processed list, they are added at the end of the list\n                         if not already specified.\n    :param default_dir: Sort direction associated with each of the default\n                        keys that are not supplied, used when they are added\n                        to the processed list\n    :returns: list of sort keys, list of sort directions\n    :raise exception.InvalidInput: If more sort directions than sort keys\n                                   are specified or if an invalid sort\n                                   direction is specified\n    \"\"\"\n    if default_keys is None:\n        default_keys = ['created_at']\n\n    # Determine direction to use for when adding default keys\n    if sort_dirs and len(sort_dirs):\n        default_dir_value = sort_dirs[0]\n    else:\n        default_dir_value = default_dir\n\n    # Create list of keys (do not modify the input list)\n    if sort_keys:\n        result_keys = list(sort_keys)\n    else:\n        result_keys = []\n\n    # If a list of directions is not provided, use the default sort direction\n    # for all provided keys.\n    if sort_dirs:\n        result_dirs = []\n        # Verify sort direction\n        for sort_dir in sort_dirs:\n            if sort_dir not in ('asc', 'desc'):\n                msg = _(\"Unknown sort direction, must be 'desc' or 'asc'.\")\n                raise exception.InvalidInput(msg)\n            result_dirs.append(sort_dir)\n    else:\n        result_dirs = [default_dir_value for _sort_key in result_keys]\n\n    # Ensure that the key and direction length match\n    while len(result_dirs) < len(result_keys):\n        result_dirs.append(default_dir_value)\n    # Unless more direction are specified, which is an error\n    if len(result_dirs) > len(result_keys):\n        msg = _(\"Sort direction array size exceeds sort key array size.\")\n        raise exception.InvalidInput(msg)\n\n    # Ensure defaults are included\n    for key in default_keys:\n        if key not in result_keys:\n            result_keys.append(key)\n            result_dirs.append(default_dir_value)\n\n    return result_keys, result_dirs\n\n\ndef _generate_paginate_query(context, session, paginate_type, marker,\n                             limit, sort_keys, sort_dirs, filters,\n                             offset=None\n                             ):\n    \"\"\"Generate the query to include the filters and the paginate options.\n\n    Returns a query with sorting / pagination criteria added or None\n    if the given filters will not yield any results.\n\n    :param context: context to query under\n    :param session: the session to use\n    :param marker: the last item of the previous page; we returns the next\n                    results after this value.\n    :param limit: maximum number of items to return\n    :param sort_keys: list of attributes by which results should be sorted,\n                      paired with corresponding item in sort_dirs\n    :param sort_dirs: list of directions in which results should be sorted,\n                      paired with corresponding item in sort_keys\n    :param filters: dictionary of filters; values that are in lists, tuples,\n                    or sets cause an 'IN' operation, while exact matching\n                    is used for other values, see _process_volume_filters\n                    function for more information\n    :param offset: number of items to skip\n    :param paginate_type: type of pagination to generate\n    :returns: updated query or None\n    \"\"\"\n    get_query, process_filters, get = PAGINATION_HELPERS[paginate_type]\n\n    sort_keys, sort_dirs = process_sort_params(sort_keys,\n                                               sort_dirs,\n                                               default_dir='desc')\n    query = get_query(context, session=session)\n\n    if filters:\n        query = process_filters(query, filters)\n        if query is None:\n            return None\n\n    marker_object = None\n    if marker is not None:\n        marker_object = get(context, marker, session)\n\n    return sqlalchemyutils.paginate_query(query, paginate_type, limit,\n                                          sort_keys,\n                                          marker=marker_object,\n                                          sort_dirs=sort_dirs,\n                                          offset=offset)\n"
  },
  {
    "path": "delfin/db/sqlalchemy/models.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright 2011 Piston Cloud Computing, Inc.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\"\"\"\nSQLAlchemy models for Delfin  data.\n\"\"\"\n\nfrom oslo_config import cfg\nfrom oslo_db.sqlalchemy import models\nfrom oslo_db.sqlalchemy.types import JsonEncodedDict\nfrom sqlalchemy import Column, Integer, String, Boolean, BigInteger, \\\n    DateTime, BIGINT\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom delfin.common import constants\n\nCONF = cfg.CONF\nBASE = declarative_base()\n\n\nclass DelfinBase(models.ModelBase,\n                 models.TimestampMixin):\n    \"\"\"Base class for Delfin Models.\"\"\"\n    __table_args__ = {'mysql_engine': 'InnoDB'}\n    metadata = None\n\n    def to_dict(self):\n        model_dict = {}\n        for k, v in self.items():\n            if not issubclass(type(v), DelfinBase):\n                model_dict[k] = v\n        return model_dict\n\n\nclass AccessInfo(BASE, DelfinBase):\n    \"\"\"Represent access info required for storage accessing.\"\"\"\n    __tablename__ = \"access_info\"\n    storage_id = Column(String(36), primary_key=True)\n    storage_name = Column(String(255))\n    vendor = Column(String(255))\n    model = Column(String(255))\n    rest = Column(JsonEncodedDict)\n    ssh = Column(JsonEncodedDict)\n    cli = Column(JsonEncodedDict)\n    smis = Column(JsonEncodedDict)\n    extra_attributes = Column(JsonEncodedDict)\n\n\nclass Storage(BASE, DelfinBase):\n    \"\"\"Represents a storage object.\"\"\"\n\n    __tablename__ = 'storages'\n    id = Column(String(36), primary_key=True)\n    name = Column(String(255))\n    description = Column(String(255))\n    location = Column(String(255))\n    status = Column(String(255))\n    sync_status = Column(Integer, default=constants.SyncStatus.SYNCED)\n    vendor = Column(String(255))\n    model = Column(String(255))\n    serial_number = Column(String(255))\n    firmware_version = Column(String(255))\n    total_capacity = Column(BigInteger)\n    used_capacity = Column(BigInteger)\n    free_capacity = Column(BigInteger)\n    raw_capacity = Column(BigInteger)\n    subscribed_capacity = Column(BigInteger)\n    deleted_at = Column(DateTime)\n    deleted = Column(Boolean, default=False)\n\n\nclass Volume(BASE, DelfinBase):\n    \"\"\"Represents a volume object.\"\"\"\n    __tablename__ = 'volumes'\n    id = Column(String(36), primary_key=True)\n    native_volume_id = Column(String(255))\n    name = Column(String(255))\n    description = Column(String(255))\n    type = Column(String(255))\n    status = Column(String(255))\n    storage_id = Column(String(36))\n    native_storage_pool_id = Column(String(255))\n    wwn = Column(String(255))\n    total_capacity = Column(BigInteger)\n    used_capacity = Column(BigInteger)\n    free_capacity = Column(BigInteger)\n    compressed = Column(Boolean)\n    deduplicated = Column(Boolean)\n\n\nclass StoragePool(BASE, DelfinBase):\n    \"\"\"Represents a storage_pool object.\"\"\"\n    __tablename__ = 'storage_pools'\n    id = Column(String(36), primary_key=True)\n    native_storage_pool_id = Column(String(255))\n    name = Column(String(255))\n    description = Column(String(255))\n    storage_type = Column(String(255))\n    status = Column(String(255))\n    storage_id = Column(String(36))\n    total_capacity = Column(BigInteger)\n    used_capacity = Column(BigInteger)\n    free_capacity = Column(BigInteger)\n    subscribed_capacity = Column(BigInteger)\n\n\nclass Disk(BASE, DelfinBase):\n    \"\"\"Represents a disk object.\"\"\"\n    __tablename__ = 'disks'\n    id = Column(String(36), primary_key=True)\n    native_disk_id = Column(String(255))\n    name = Column(String(255))\n    physical_type = Column(String(255))\n    logical_type = Column(String(255))\n    status = Column(String(255))\n    location = Column(String(255))\n    storage_id = Column(String(255))\n    native_disk_group_id = Column(String(255))\n    serial_number = Column(String(255))\n    manufacturer = Column(String(255))\n    model = Column(String(255))\n    firmware = Column(String(255))\n    speed = Column(Integer)\n    capacity = Column(BigInteger)\n    health_score = Column(Integer)\n\n\nclass Controller(BASE, DelfinBase):\n    \"\"\"Represents a controller object.\"\"\"\n    __tablename__ = 'controllers'\n    id = Column(String(36), primary_key=True)\n    native_controller_id = Column(String(255))\n    name = Column(String(255))\n    status = Column(String(255))\n    location = Column(String(255))\n    soft_version = Column(String(255))\n    cpu_info = Column(String(255))\n    cpu_count = Column(Integer)\n    memory_size = Column(BigInteger)\n    storage_id = Column(String(36))\n    mgmt_ip = Column(String(255))\n\n\nclass Port(BASE, DelfinBase):\n    \"\"\"Represents a port object.\"\"\"\n    __tablename__ = 'ports'\n    id = Column(String(36), primary_key=True)\n    native_port_id = Column(String(255))\n    name = Column(String(255))\n    location = Column(String(255))\n    type = Column(String(255))\n    logical_type = Column(String(255))\n    connection_status = Column(String(255))\n    health_status = Column(String(255))\n    storage_id = Column(String(36))\n    native_parent_id = Column(String(255))\n    speed = Column(Integer)\n    max_speed = Column(Integer)\n    wwn = Column(String(255))\n    mac_address = Column(String(255))\n    ipv4 = Column(String(255))\n    ipv4_mask = Column(String(255))\n    ipv6 = Column(String(255))\n    ipv6_mask = Column(String(255))\n\n\nclass Filesystem(BASE, DelfinBase):\n    \"\"\"Represents a filesystem object.\"\"\"\n    __tablename__ = 'filesystems'\n    id = Column(String(36), primary_key=True)\n    native_filesystem_id = Column(String(255))\n    name = Column(String(255))\n    type = Column(String(255))\n    status = Column(String(255))\n    storage_id = Column(String(36))\n    native_pool_id = Column(String(255))\n    security_mode = Column(String(255))\n    total_capacity = Column(BigInteger)\n    used_capacity = Column(BigInteger)\n    free_capacity = Column(BigInteger)\n    compressed = Column(Boolean)\n    deduplicated = Column(Boolean)\n    worm = Column(String(255))\n\n\nclass Qtree(BASE, DelfinBase):\n    \"\"\"Represents a qtree object.\"\"\"\n    __tablename__ = 'qtrees'\n    id = Column(String(36), primary_key=True)\n    native_qtree_id = Column(String(255))\n    name = Column(String(255))\n    path = Column(String(255))\n    storage_id = Column(String(36))\n    native_filesystem_id = Column(String(255))\n    security_mode = Column(String(255))\n\n\nclass Quota(BASE, DelfinBase):\n    \"\"\"Represents a qtree object.\"\"\"\n    __tablename__ = 'quota'\n    id = Column(String(36), primary_key=True)\n    native_quota_id = Column(String(255))\n    type = Column(String(255))\n    storage_id = Column(String(36))\n    native_filesystem_id = Column(String(255))\n    native_qtree_id = Column(String(255))\n    capacity_hard_limit = Column(BigInteger)\n    capacity_soft_limit = Column(BigInteger)\n    file_hard_limit = Column(BigInteger)\n    file_soft_limit = Column(BigInteger)\n    file_count = Column(BigInteger)\n    used_capacity = Column(BigInteger)\n    user_group_name = Column(String(255))\n\n\nclass Share(BASE, DelfinBase):\n    \"\"\"Represents a share object.\"\"\"\n    __tablename__ = 'shares'\n    id = Column(String(36), primary_key=True)\n    native_share_id = Column(String(255))\n    name = Column(String(255))\n    path = Column(String(255))\n    storage_id = Column(String(36))\n    native_filesystem_id = Column(String(255))\n    native_qtree_id = Column(String(255))\n    protocol = Column(String(255))\n\n\nclass AlertSource(BASE, DelfinBase):\n    \"\"\"Represents an alert source configuration.\"\"\"\n    __tablename__ = 'alert_source'\n    storage_id = Column(String(36), primary_key=True)\n    host = Column(String(255))\n    version = Column(String(255))\n    community_string = Column(String(255))\n    username = Column(String(255))\n    security_level = Column(String(255))\n    auth_key = Column(String(255))\n    auth_protocol = Column(String(255))\n    privacy_protocol = Column(String(255))\n    privacy_key = Column(String(255))\n    engine_id = Column(String(255))\n    port = Column(Integer)\n    context_name = Column(String(255))\n    retry_num = Column(Integer)\n    expiration = Column(Integer)\n\n\nclass Task(BASE, DelfinBase):\n    \"\"\"Represents a task attributes.\"\"\"\n    __tablename__ = 'tasks'\n    id = Column(Integer, primary_key=True, autoincrement=True)\n    storage_id = Column(String(36))\n    interval = Column(Integer)\n    method = Column(String(255))\n    args = Column(JsonEncodedDict)\n    last_run_time = Column(Integer)\n    job_id = Column(String(36))\n    executor = Column(String(255))\n    deleted_at = Column(DateTime)\n    deleted = Column(Boolean, default=False)\n\n\nclass FailedTask(BASE, DelfinBase):\n    \"\"\"Represents a failed task attributes.\"\"\"\n    __tablename__ = 'failed_tasks'\n    id = Column(Integer, primary_key=True, autoincrement=True)\n    storage_id = Column(String(36))\n    task_id = Column(Integer)\n    interval = Column(Integer)\n    start_time = Column(BIGINT)\n    end_time = Column(BIGINT)\n    retry_count = Column(Integer)\n    method = Column(String(255))\n    result = Column(String(255))\n    job_id = Column(String(36))\n    executor = Column(String(255))\n    deleted_at = Column(DateTime)\n    deleted = Column(Boolean, default=False)\n\n\nclass StorageHostInitiator(BASE, DelfinBase):\n    \"\"\"Represents the storage host initiator attributes.\"\"\"\n    __tablename__ = 'storage_host_initiators'\n    id = Column(String(36), primary_key=True)\n    storage_id = Column(String(36))\n    name = Column(String(255))\n    description = Column(String(255))\n    alias = Column(String(255))\n    wwn = Column(String(255))\n    status = Column(String(255))\n    type = Column(String(255))\n    native_storage_host_id = Column(String(255))\n    native_storage_host_initiator_id = Column(String(255))\n\n\nclass StorageHost(BASE, DelfinBase):\n    \"\"\"Represents the storage host attributes.\"\"\"\n    __tablename__ = 'storage_hosts'\n    id = Column(String(36), primary_key=True)\n    storage_id = Column(String(36))\n    name = Column(String(255))\n    description = Column(String(255))\n    os_type = Column(String(255))\n    ip_address = Column(String(255))\n    status = Column(String(255))\n    native_storage_host_id = Column(String(255))\n\n\nclass StorageHostGroup(BASE, DelfinBase):\n    \"\"\"Represents the storage host group attributes.\"\"\"\n    __tablename__ = 'storage_host_groups'\n    id = Column(String(36), primary_key=True)\n    storage_id = Column(String(36))\n    name = Column(String(255))\n    description = Column(String(255))\n    native_storage_host_group_id = Column(String(255))\n\n\nclass PortGroup(BASE, DelfinBase):\n    \"\"\"Represents the port group attributes.\"\"\"\n    __tablename__ = 'port_groups'\n    id = Column(String(36), primary_key=True)\n    storage_id = Column(String(36))\n    name = Column(String(255))\n    description = Column(String(255))\n    native_port_group_id = Column(String(255))\n\n\nclass VolumeGroup(BASE, DelfinBase):\n    \"\"\"Represents the volume group attributes.\"\"\"\n    __tablename__ = 'volume_groups'\n    id = Column(String(36), primary_key=True)\n    storage_id = Column(String(36))\n    name = Column(String(255))\n    description = Column(String(255))\n    native_volume_group_id = Column(String(255))\n\n\nclass MaskingView(BASE, DelfinBase):\n    \"\"\"Represents the masking view attributes.\"\"\"\n    __tablename__ = 'masking_views'\n    id = Column(String(36), primary_key=True)\n    storage_id = Column(String(36))\n    name = Column(String(255))\n    description = Column(String(255))\n    native_storage_host_group_id = Column(String(255))\n    native_volume_group_id = Column(String(255))\n    native_port_group_id = Column(String(255))\n    native_storage_host_id = Column(String(255))\n    native_volume_id = Column(String(255))\n    native_masking_view_id = Column(String(255))\n\n\nclass StorageHostGrpHostRel(BASE, DelfinBase):\n    \"\"\"Represents the storage host group and storage host relation\n    attributes.\n    \"\"\"\n    __tablename__ = 'storage_host_grp_host_rels'\n    id = Column(String(36), primary_key=True)\n    storage_id = Column(String(36))\n    name = Column(String(255))\n    description = Column(String(255))\n    native_storage_host_group_id = Column(String(255))\n    native_storage_host_id = Column(String(255))\n\n\nclass PortGrpPortRel(BASE, DelfinBase):\n    \"\"\"Represents port group and port relation attributes.\"\"\"\n    __tablename__ = 'port_grp_port_rels'\n    id = Column(String(36), primary_key=True)\n    storage_id = Column(String(36))\n    name = Column(String(255))\n    description = Column(String(255))\n    native_port_group_id = Column(String(255))\n    native_port_id = Column(String(255))\n\n\nclass VolGrpVolRel(BASE, DelfinBase):\n    \"\"\"Represents the volume group and volume relation attributes.\"\"\"\n    __tablename__ = 'vol_grp_vol_rels'\n    id = Column(String(36), primary_key=True)\n    storage_id = Column(String(36))\n    name = Column(String(255))\n    description = Column(String(255))\n    native_volume_group_id = Column(String(255))\n    native_volume_id = Column(String(255))\n"
  },
  {
    "path": "delfin/drivers/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/api.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\n\nfrom oslo_log import log\nfrom oslo_utils import uuidutils\n\nfrom delfin import db\nfrom delfin.drivers import helper\nfrom delfin.drivers import manager\n\nLOG = log.getLogger(__name__)\n\n\nclass API(object):\n    def __init__(self):\n        self.driver_manager = manager.DriverManager()\n\n    def discover_storage(self, context, access_info):\n        \"\"\"Discover a storage system with access information.\"\"\"\n        helper.encrypt_password(context, access_info)\n        if 'storage_id' not in access_info:\n            access_info['storage_id'] = six.text_type(\n                uuidutils.generate_uuid())\n\n        driver = self.driver_manager.get_driver(context,\n                                                cache_on_load=False,\n                                                **access_info)\n        storage = driver.get_storage(context)\n\n        # Need to validate storage response from driver\n        helper.check_storage_repetition(context, storage)\n        access_info = db.access_info_create(context, access_info)\n        storage['id'] = access_info['storage_id']\n        storage = db.storage_create(context, storage)\n\n        LOG.info(\"Storage found successfully.\")\n        return storage\n\n    def update_access_info(self, context, access_info):\n        \"\"\"Validate and update access information.\"\"\"\n        helper.encrypt_password(context, access_info)\n        driver = self.driver_manager.get_driver(context,\n                                                cache_on_load=False,\n                                                **access_info)\n        storage_new = driver.get_storage(context)\n\n        # Need to validate storage response from driver\n        storage_id = access_info['storage_id']\n        helper.check_storage_consistency(context, storage_id, storage_new)\n        access_info = db.access_info_update(context, storage_id, access_info)\n        db.storage_update(context, storage_id, storage_new)\n\n        LOG.info(\"Access information updated successfully.\")\n        return access_info\n\n    def remove_storage(self, context, storage_id):\n        \"\"\"Clear driver instance from driver factory.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        driver.delete_storage(context)\n        self.driver_manager.remove_driver(storage_id)\n\n    def get_storage(self, context, storage_id):\n        \"\"\"Get storage device information from storage system\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.get_storage(context)\n\n    def list_storage_pools(self, context, storage_id):\n        \"\"\"List all storage pools from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_storage_pools(context)\n\n    def list_volumes(self, context, storage_id):\n        \"\"\"List all storage volumes from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_volumes(context)\n\n    def list_controllers(self, context, storage_id):\n        \"\"\"List all storage controllers from storage system.\"\"\"\n\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_controllers(context)\n\n    def list_ports(self, context, storage_id):\n        \"\"\"List all ports from storage system.\"\"\"\n\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_ports(context)\n\n    def list_disks(self, context, storage_id):\n        \"\"\"List all disks from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_disks(context)\n\n    def list_quotas(self, context, storage_id):\n        \"\"\"List all quotas from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_quotas(context)\n\n    def list_filesystems(self, context, storage_id):\n        \"\"\"List all filesystems from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_filesystems(context)\n\n    def list_qtrees(self, context, storage_id):\n        \"\"\"List all qtrees from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_qtrees(context)\n\n    def list_shares(self, context, storage_id):\n        \"\"\"List all shares from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_shares(context)\n\n    def add_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Config the trap receiver in storage system.\"\"\"\n        pass\n\n    def remove_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Remove trap receiver configuration from storage system.\"\"\"\n        pass\n\n    def parse_alert(self, context, storage_id, alert):\n        \"\"\"Parse alert data got from snmp trap server.\"\"\"\n        access_info = db.access_info_get(context, storage_id)\n        driver = self.driver_manager.get_driver(context,\n                                                invoke_on_load=False,\n                                                **access_info)\n        return driver.parse_alert(context, alert)\n\n    def clear_alert(self, context, storage_id, sequence_number):\n        \"\"\"Clear alert from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        driver.clear_alert(context, sequence_number)\n\n    def list_alerts(self, context, storage_id, query_para=None):\n        \"\"\"List alert from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_alerts(context, query_para)\n\n    def collect_perf_metrics(self, context, storage_id,\n                             resource_metrics, start_time, end_time):\n\n        \"\"\"Collect performance metrics\"\"\"\n        driver = self.driver_manager.get_driver(context,\n                                                storage_id=storage_id)\n        return driver.collect_perf_metrics(context, storage_id,\n                                           resource_metrics, start_time,\n                                           end_time)\n\n    def get_capabilities(self, context, storage_id, filters=None):\n        \"\"\"Get capabilities from supported driver\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.get_capabilities(context, filters)\n\n    def list_storage_host_initiators(self, context, storage_id):\n        \"\"\"List all storage initiators from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_storage_host_initiators(context)\n\n    def list_storage_hosts(self, context, storage_id):\n        \"\"\"List all storage hosts from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_storage_hosts(context)\n\n    def list_storage_host_groups(self, context, storage_id):\n        \"\"\"List all storage host groups from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_storage_host_groups(context)\n\n    def list_port_groups(self, context, storage_id):\n        \"\"\"List all port groups from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_port_groups(context)\n\n    def list_volume_groups(self, context, storage_id):\n        \"\"\"List all volume groups from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_volume_groups(context)\n\n    def list_masking_views(self, context, storage_id):\n        \"\"\"List all masking views from storage system.\"\"\"\n        driver = self.driver_manager.get_driver(context, storage_id=storage_id)\n        return driver.list_masking_views(context)\n\n    def get_alert_sources(self, context, storage_id):\n        access_info = db.access_info_get(context, storage_id)\n        driver = self.driver_manager.get_driver(context,\n                                                cache_on_load=False,\n                                                **access_info)\n        return driver.get_alert_sources(context)\n"
  },
  {
    "path": "delfin/drivers/dell_emc/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/dell_emc/power_store/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/dell_emc/power_store/consts.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.common import constants\n\n\nclass StatusCode(object):\n    SUCCESS = 200\n    SUCCESS_CREATE_RESPONSE = 201\n    SUCCESS_NO_CONTENT = 204\n    PARTIAL_CONTENT = 206\n    UNAUTHORIZED = 401\n    FORBIDDEN = 403\n\n\nclass DigitalConstant(object):\n    ZERO = 0\n    ONE = 1\n    MINUS_ONE = -1\n    TWO = 2\n    THREE = 3\n    FIVE = 5\n    SIX = 6\n    SIXTY = 60\n\n\nSTORAGE_STATUS_MAP = {\n    'Unconfigured': constants.StorageStatus.NORMAL,\n    'Unconfigured_Faulted': constants.StorageStatus.ABNORMAL,\n    'Configuring': constants.StorageStatus.NORMAL,\n    'Core_Initialization': constants.StorageStatus.NORMAL,\n    'Configured': constants.StorageStatus.NORMAL,\n    'Expanding': constants.StorageStatus.NORMAL,\n    'Removing': constants.StorageStatus.NORMAL,\n    'Clustering_Failed': constants.StorageStatus.ABNORMAL,\n    'Core_Initialization_Failed': constants.StorageStatus.ABNORMAL,\n    'Removed': constants.StorageStatus.OFFLINE,\n    'Post_Core_Initialization': constants.StorageStatus.NORMAL,\n    'Unknown': constants.StorageStatus.UNKNOWN\n}\n\nVOLUME_STATUS_MAP = {\n    'Ready': constants.StorageStatus.NORMAL,\n    'Initializing': constants.StorageStatus.NORMAL,\n    'Offline': constants.StorageStatus.OFFLINE,\n    'Destroying': constants.StorageStatus.NORMAL\n}\n\nVIRTUAL_VOLUME_STATUS_MAP = {\n    'Ready': constants.StorageStatus.NORMAL,\n    'Not_Ready': constants.StorageStatus.ABNORMAL,\n    'Write_Disabled': constants.StorageStatus.ABNORMAL,\n    'Mixed': constants.StorageStatus.ABNORMAL,\n    'Not_Applicable': constants.StorageStatus.ABNORMAL\n}\n\nVOLUME_TYPE_MAP = {\n    'Primary': constants.VolumeType.THIN,\n    'Clone': constants.VolumeType.THIN\n}\n\nDISK_PHYSICAL_TYPE = {\n    'SAS_SSD': constants.DiskPhysicalType.SSD,\n    'NVMe_SCM': constants.DiskPhysicalType.UNKNOWN,\n    'NVMe_SSD': constants.DiskPhysicalType.NVME_SSD,\n    'Unknown': constants.DiskPhysicalType.UNKNOWN\n}\n\nDISK_STATUS_MAP = {\n    'Uninitialized': constants.DiskStatus.NORMAL,\n    'Healthy': constants.DiskStatus.NORMAL,\n    'Initializing': constants.DiskStatus.NORMAL,\n    'Failed': constants.DiskStatus.ABNORMAL,\n    'Disconnected': constants.DiskStatus.OFFLINE,\n    'Prepare_Failed': constants.DiskStatus.NORMAL,\n    'Trigger_Update': constants.DiskStatus.NORMAL\n}\n\nCONTROLLER_STATUS_MAP = {\n    'Uninitialized': constants.ControllerStatus.NORMAL,\n    'Healthy': constants.ControllerStatus.NORMAL,\n    'Initializing': constants.ControllerStatus.NORMAL,\n    'Failed': constants.ControllerStatus.FAULT,\n    'Disconnected': constants.ControllerStatus.OFFLINE,\n    'Prepare_Failed': constants.ControllerStatus.NORMAL,\n    'Trigger_Update': constants.ControllerStatus.NORMAL\n}\n\nPORT_CONNECTION_STATUS_MAP = {\n    'true': constants.PortConnectionStatus.CONNECTED,\n    True: constants.PortConnectionStatus.CONNECTED,\n    'false': constants.PortConnectionStatus.DISCONNECTED,\n    False: constants.PortConnectionStatus.DISCONNECTED\n}\n\nPORT_HEALTH_STATUS_MAP = {\n    'Uninitialized': constants.PortHealthStatus.NORMAL,\n    'Healthy': constants.PortHealthStatus.NORMAL,\n    'Initializing': constants.PortHealthStatus.NORMAL,\n    'Failed': constants.PortHealthStatus.ABNORMAL,\n    'Disconnected': constants.PortHealthStatus.NORMAL,\n    'Prepare_Failed': constants.PortHealthStatus.NORMAL,\n    'Trigger_Update': constants.PortHealthStatus.NORMAL,\n    'Empty': constants.PortHealthStatus.UNKNOWN\n}\n\nALERT_SEVERITY_MAP = {\n    'Critical': constants.Severity.CRITICAL,\n    'Major': constants.Severity.MAJOR,\n    'Minor': constants.Severity.MINOR,\n    'Info': constants.Severity.INFORMATIONAL,\n    'None': constants.Severity.NOT_SPECIFIED,\n}\n\nHOST_OS_TYPES_MAP = {\n    'Windows': constants.HostOSTypes.WINDOWS,\n    'Linux': constants.HostOSTypes.LINUX,\n    'ESXi': constants.HostOSTypes.VMWARE_ESX,\n    'AIX': constants.HostOSTypes.AIX,\n    'HP-UX': constants.HostOSTypes.HP_UX,\n    'Solaris': constants.HostOSTypes.SOLARIS\n}\n\nINITIATOR_TYPE_MAP = {\n    'iSCSI': constants.InitiatorType.ISCSI,\n    'FC': constants.InitiatorType.FC,\n    'NVMe': constants.InitiatorType.NVME_OVER_FABRIC,\n    'NVMe_vVol': constants.InitiatorType.NVME_OVER_FABRIC\n}\n\n\nclass DiskType(object):\n    NVME_NVRAM = 'NVMe_NVRAM'\n    NVME_SCM = 'NVMe_SCM'\n\n    ALL = (NVME_SCM, NVME_NVRAM)\n\n\n#  /metrics/generate\nSPACE_METRICS_BY_APPLIANCE = 'space_metrics_by_appliance'\nSPACE_METRICS_BY_VOLUME = 'space_metrics_by_volume'\nPERFORMANCE_METRICS_BY_CLUSTER = 'performance_metrics_by_cluster'\nPERFORMANCE_METRICS_BY_APPLIANCE = 'performance_metrics_by_appliance'\nPERFORMANCE_METRICS_BY_VOLUME = 'performance_metrics_by_volume'\nPERFORMANCE_METRICS_BY_NODE = 'performance_metrics_by_node'\nPERFORMANCE_METRICS_BY_FE_FC_PORT = 'performance_metrics_by_fe_fc_port'\nPERFORMANCE_METRICS_INTERVAL = 'Twenty_Sec'\nPERF_INTERVAL = 20\n\n# character\nCHARACTER_DRIVE = 'Drive'\nCHARACTER_NODE = 'Node'\nCHARACTER_SNAPSHOT = 'Snapshot'\nCHARACTER_EMPTY = 'Empty'\nMGMT_NODE_COREOS = 'Mgmt_Node_CoreOS'\nLIMIT_COUNT = 2000\nDEFAULT_TIMEOUT = 10\n\nUTC_FORMAT = '%Y-%m-%dT%H:%M:%S.%f+00:00'\nSYSTEM_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\nPERF_TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'\n\nPARSE_ALERT_DESCRIPTION = '1.3.6.1.4.1.1139.205.1.1.2'\nPARSE_ALERT_CODE = '1.3.6.1.4.1.1139.205.1.1.1'\nPARSE_ALERT_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'\nPARSE_ALERT_TIME = '1.3.6.1.2.1.1.3.0'\nPARSE_ALERT_TIME_UTC = '1.3.6.1.4.1.1139.205.1.1.10'\nPARSE_ALERT_UPDATE_TIME_UTC = '1.3.6.1.4.1.1139.205.1.1.9'\nPARSE_ALERT_RESOURCE_TYPE = '1.3.6.1.4.1.1139.205.1.1.4'\nPARSE_ALERT_RESOURCE_ID = '1.3.6.1.4.1.1139.205.1.1.5'\nPARSE_ALERT_RESOURCE_NAME = '1.3.6.1.4.1.1139.205.1.1.6'\nPARSE_ALERT_STATE = '1.3.6.1.4.1.1139.205.1.1.7'\nPARSE_ALERT_APPLIANCE = '1.3.6.1.4.1.1139.205.1.1.8'\n\nSNMP_SEVERITY_MAP = {\n    '1.3.6.1.4.1.1139.205.1.2.1': constants.Severity.CRITICAL,\n    '1.3.6.1.4.1.1139.205.1.2.2': constants.Severity.MAJOR,\n    '1.3.6.1.4.1.1139.205.1.2.3': constants.Severity.MINOR,\n    '1.3.6.1.4.1.1139.205.1.2.4': constants.Severity.INFORMATIONAL\n}\n\nSTORAGE_CAP = {\n    constants.StorageMetric.IOPS.name: {\n        \"unit\": constants.StorageMetric.IOPS.unit,\n        \"description\": constants.StorageMetric.IOPS.description\n    },\n    constants.StorageMetric.READ_IOPS.name: {\n        \"unit\": constants.StorageMetric.READ_IOPS.unit,\n        \"description\": constants.StorageMetric.READ_IOPS.description\n    },\n    constants.StorageMetric.WRITE_IOPS.name: {\n        \"unit\": constants.StorageMetric.WRITE_IOPS.unit,\n        \"description\": constants.StorageMetric.WRITE_IOPS.description\n    },\n    constants.StorageMetric.THROUGHPUT.name: {\n        \"unit\": constants.StorageMetric.THROUGHPUT.unit,\n        \"description\": constants.StorageMetric.THROUGHPUT.description\n    },\n    constants.StorageMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.StorageMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.StorageMetric.READ_THROUGHPUT.description\n    },\n    constants.StorageMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.StorageMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.StorageMetric.WRITE_THROUGHPUT.description\n    },\n    constants.StorageMetric.RESPONSE_TIME.name: {\n        \"unit\": constants.StorageMetric.RESPONSE_TIME.unit,\n        \"description\": constants.StorageMetric.RESPONSE_TIME.description\n    },\n    constants.StorageMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.StorageMetric.READ_RESPONSE_TIME.unit,\n        \"description\": constants.StorageMetric.READ_RESPONSE_TIME.description\n    },\n    constants.StorageMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.StorageMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\": constants.StorageMetric.WRITE_RESPONSE_TIME.description\n    },\n    constants.StorageMetric.IO_SIZE.name: {\n        \"unit\": constants.StorageMetric.IO_SIZE.unit,\n        \"description\": constants.StorageMetric.IO_SIZE.description\n    },\n    constants.StorageMetric.READ_IO_SIZE.name: {\n        \"unit\": constants.StorageMetric.READ_IO_SIZE.unit,\n        \"description\": constants.StorageMetric.READ_IO_SIZE.description\n    },\n    constants.StorageMetric.WRITE_IO_SIZE.name: {\n        \"unit\": constants.StorageMetric.WRITE_IO_SIZE.unit,\n        \"description\": constants.StorageMetric.WRITE_IO_SIZE.description\n    }\n}\n\nSTORAGE_POOL_CAP = {\n    constants.StoragePoolMetric.IOPS.name: {\n        \"unit\": constants.StoragePoolMetric.IOPS.unit,\n        \"description\": constants.StoragePoolMetric.IOPS.description\n    },\n    constants.StoragePoolMetric.READ_IOPS.name: {\n        \"unit\": constants.StoragePoolMetric.READ_IOPS.unit,\n        \"description\": constants.StoragePoolMetric.READ_IOPS.description\n    },\n    constants.StoragePoolMetric.WRITE_IOPS.name: {\n        \"unit\": constants.StoragePoolMetric.WRITE_IOPS.unit,\n        \"description\": constants.StoragePoolMetric.WRITE_IOPS.description\n    },\n    constants.StoragePoolMetric.THROUGHPUT.name: {\n        \"unit\": constants.StoragePoolMetric.THROUGHPUT.unit,\n        \"description\": constants.StoragePoolMetric.THROUGHPUT.description\n    },\n    constants.StoragePoolMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.StoragePoolMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.StoragePoolMetric.READ_THROUGHPUT.description\n    },\n    constants.StoragePoolMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.StoragePoolMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.StoragePoolMetric.WRITE_THROUGHPUT.description\n    },\n    constants.StoragePoolMetric.RESPONSE_TIME.name: {\n        \"unit\": constants.StoragePoolMetric.RESPONSE_TIME.unit,\n        \"description\": constants.StoragePoolMetric.RESPONSE_TIME.description\n    },\n    constants.StoragePoolMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.StoragePoolMetric.READ_RESPONSE_TIME.unit,\n        \"description\":\n            constants.StoragePoolMetric.READ_RESPONSE_TIME.description\n    },\n    constants.StoragePoolMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.StoragePoolMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\":\n            constants.StoragePoolMetric.WRITE_RESPONSE_TIME.description\n    },\n    constants.StoragePoolMetric.IO_SIZE.name: {\n        \"unit\": constants.StoragePoolMetric.IO_SIZE.unit,\n        \"description\": constants.StoragePoolMetric.IO_SIZE.description\n    },\n    constants.StoragePoolMetric.READ_IO_SIZE.name: {\n        \"unit\": constants.StoragePoolMetric.READ_IO_SIZE.unit,\n        \"description\": constants.StoragePoolMetric.READ_IO_SIZE.description\n    },\n    constants.StoragePoolMetric.WRITE_IO_SIZE.name: {\n        \"unit\": constants.StoragePoolMetric.WRITE_IO_SIZE.unit,\n        \"description\": constants.StoragePoolMetric.WRITE_IO_SIZE.description\n    }\n}\n\nVOLUME_CAP = {\n    constants.VolumeMetric.IOPS.name: {\n        \"unit\": constants.VolumeMetric.IOPS.unit,\n        \"description\": constants.VolumeMetric.IOPS.description\n    },\n    constants.VolumeMetric.READ_IOPS.name: {\n        \"unit\": constants.VolumeMetric.READ_IOPS.unit,\n        \"description\": constants.VolumeMetric.READ_IOPS.description\n    },\n    constants.VolumeMetric.WRITE_IOPS.name: {\n        \"unit\": constants.VolumeMetric.WRITE_IOPS.unit,\n        \"description\": constants.VolumeMetric.WRITE_IOPS.description\n    },\n    constants.VolumeMetric.THROUGHPUT.name: {\n        \"unit\": constants.VolumeMetric.THROUGHPUT.unit,\n        \"description\": constants.VolumeMetric.THROUGHPUT.description\n    },\n    constants.VolumeMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.VolumeMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.VolumeMetric.READ_THROUGHPUT.description\n    },\n    constants.VolumeMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.VolumeMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.VolumeMetric.WRITE_THROUGHPUT.description\n    },\n    constants.VolumeMetric.RESPONSE_TIME.name: {\n        \"unit\": constants.VolumeMetric.RESPONSE_TIME.unit,\n        \"description\": constants.VolumeMetric.RESPONSE_TIME.description\n    },\n    constants.VolumeMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.VolumeMetric.READ_RESPONSE_TIME.unit,\n        \"description\": constants.VolumeMetric.READ_RESPONSE_TIME.description\n    },\n    constants.VolumeMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.VolumeMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\": constants.VolumeMetric.WRITE_RESPONSE_TIME.description\n    },\n    constants.VolumeMetric.IO_SIZE.name: {\n        \"unit\": constants.VolumeMetric.IO_SIZE.unit,\n        \"description\": constants.VolumeMetric.IO_SIZE.description\n    },\n    constants.VolumeMetric.READ_IO_SIZE.name: {\n        \"unit\": constants.VolumeMetric.READ_IO_SIZE.unit,\n        \"description\": constants.VolumeMetric.READ_IO_SIZE.description\n    },\n    constants.VolumeMetric.WRITE_IO_SIZE.name: {\n        \"unit\": constants.VolumeMetric.WRITE_IO_SIZE.unit,\n        \"description\": constants.VolumeMetric.WRITE_IO_SIZE.description\n    }\n}\n\nCONTROLLER_CAP = {\n    constants.ControllerMetric.IOPS.name: {\n        \"unit\": constants.ControllerMetric.IOPS.unit,\n        \"description\": constants.ControllerMetric.IOPS.description\n    },\n    constants.ControllerMetric.READ_IOPS.name: {\n        \"unit\": constants.ControllerMetric.READ_IOPS.unit,\n        \"description\": constants.ControllerMetric.READ_IOPS.description\n    },\n    constants.ControllerMetric.WRITE_IOPS.name: {\n        \"unit\": constants.ControllerMetric.WRITE_IOPS.unit,\n        \"description\": constants.ControllerMetric.WRITE_IOPS.description\n    },\n    constants.ControllerMetric.THROUGHPUT.name: {\n        \"unit\": constants.ControllerMetric.THROUGHPUT.unit,\n        \"description\": constants.ControllerMetric.THROUGHPUT.description\n    },\n    constants.ControllerMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.ControllerMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.ControllerMetric.READ_THROUGHPUT.description\n    },\n    constants.ControllerMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.ControllerMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.ControllerMetric.WRITE_THROUGHPUT.description\n    },\n    constants.ControllerMetric.RESPONSE_TIME.name: {\n        \"unit\": constants.ControllerMetric.RESPONSE_TIME.unit,\n        \"description\": constants.ControllerMetric.RESPONSE_TIME.description\n    },\n    constants.ControllerMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.ControllerMetric.READ_RESPONSE_TIME.unit,\n        \"description\":\n            constants.ControllerMetric.READ_RESPONSE_TIME.description\n    },\n    constants.ControllerMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.ControllerMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\":\n            constants.ControllerMetric.WRITE_RESPONSE_TIME.description\n    },\n    constants.ControllerMetric.IO_SIZE.name: {\n        \"unit\": constants.ControllerMetric.IO_SIZE.unit,\n        \"description\": constants.ControllerMetric.IO_SIZE.description\n    },\n    constants.ControllerMetric.READ_IO_SIZE.name: {\n        \"unit\": constants.ControllerMetric.READ_IO_SIZE.unit,\n        \"description\": constants.ControllerMetric.READ_IO_SIZE.description\n    },\n    constants.ControllerMetric.WRITE_IO_SIZE.name: {\n        \"unit\": constants.ControllerMetric.WRITE_IO_SIZE.unit,\n        \"description\": constants.ControllerMetric.WRITE_IO_SIZE.description\n    },\n    constants.ControllerMetric.CPU_USAGE.name: {\n        \"unit\": constants.ControllerMetric.CPU_USAGE.unit,\n        \"description\": constants.ControllerMetric.CPU_USAGE.description\n    }\n}\n\nPORT_CAP = {\n    constants.PortMetric.IOPS.name: {\n        \"unit\": constants.PortMetric.IOPS.unit,\n        \"description\": constants.PortMetric.IOPS.description\n    },\n    constants.PortMetric.READ_IOPS.name: {\n        \"unit\": constants.PortMetric.READ_IOPS.unit,\n        \"description\": constants.PortMetric.READ_IOPS.description\n    },\n    constants.PortMetric.WRITE_IOPS.name: {\n        \"unit\": constants.PortMetric.WRITE_IOPS.unit,\n        \"description\": constants.PortMetric.WRITE_IOPS.description\n    },\n    constants.PortMetric.THROUGHPUT.name: {\n        \"unit\": constants.PortMetric.THROUGHPUT.unit,\n        \"description\": constants.PortMetric.THROUGHPUT.description\n    },\n    constants.PortMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.PortMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.PortMetric.READ_THROUGHPUT.description\n    },\n    constants.PortMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.PortMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.PortMetric.WRITE_THROUGHPUT.description\n    },\n    constants.PortMetric.RESPONSE_TIME.name: {\n        \"unit\": constants.PortMetric.RESPONSE_TIME.unit,\n        \"description\": constants.PortMetric.RESPONSE_TIME.description\n    },\n    constants.PortMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.PortMetric.READ_RESPONSE_TIME.unit,\n        \"description\": constants.PortMetric.READ_RESPONSE_TIME.description\n    },\n    constants.PortMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.PortMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\": constants.PortMetric.WRITE_RESPONSE_TIME.description\n    },\n    constants.PortMetric.IO_SIZE.name: {\n        \"unit\": constants.PortMetric.IO_SIZE.unit,\n        \"description\": constants.PortMetric.IO_SIZE.description\n    },\n    constants.PortMetric.READ_IO_SIZE.name: {\n        \"unit\": constants.PortMetric.READ_IO_SIZE.unit,\n        \"description\": constants.PortMetric.READ_IO_SIZE.description\n    },\n    constants.PortMetric.WRITE_IO_SIZE.name: {\n        \"unit\": constants.PortMetric.WRITE_IO_SIZE.unit,\n        \"description\": constants.PortMetric.WRITE_IO_SIZE.description\n    }\n}\n"
  },
  {
    "path": "delfin/drivers/dell_emc/power_store/power_store.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom oslo_log import log\n\nfrom delfin.drivers.dell_emc.power_store import rest_handler, consts\n\nLOG = log.getLogger(__name__)\n\n\nclass PowerStoreDriver(driver.StorageDriver):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.rest_handler = rest_handler.RestHandler(**kwargs)\n        self.rest_handler.login()\n\n    def get_storage(self, context):\n        return self.rest_handler.get_storage(self.storage_id)\n\n    def list_storage_pools(self, context):\n        return self.rest_handler.get_storage_pools(self.storage_id)\n\n    def list_volumes(self, context):\n        return self.rest_handler.get_volumes(self.storage_id)\n\n    def list_alerts(self, context, query_para=None):\n        return self.rest_handler.list_alerts(query_para)\n\n    def clear_alert(self, context, alert):\n        \"\"\"\n        PowerStore doesn't support clear alerts through API.\n        :param context:\n        :param alert:\n        :return:\n        \"\"\"\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return rest_handler.RestHandler.get_parse_alerts(alert)\n\n    def get_alert_sources(self, context):\n        return self.rest_handler.get_alert_sources(self.storage_id)\n\n    def list_controllers(self, context):\n        return self.rest_handler.get_controllers(self.storage_id)\n\n    def list_disks(self, context):\n        return self.rest_handler.get_disks(self.storage_id)\n\n    def list_ports(self, context):\n        hardware_d = self.rest_handler.get_port_hardware()\n        appliance_name_dict = self.rest_handler.get_appliance_name()\n        ports = self.rest_handler.get_fc_ports(\n            self.storage_id, hardware_d, appliance_name_dict)\n        ports.extend(\n            self.rest_handler.get_eth_ports(\n                self.storage_id, hardware_d, appliance_name_dict))\n        ports.extend(\n            self.rest_handler.get_sas_ports(\n                self.storage_id, hardware_d, appliance_name_dict))\n        return ports\n\n    def reset_connection(self, context, **kwargs):\n        self.rest_handler.logout()\n        self.rest_handler.login()\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}:{port}'\n\n    def collect_perf_metrics(self, context, storage_id, resource_metrics,\n                             start_time, end_time):\n        LOG.info('The system(storage_id: %s) starts to collect powerstore '\n                 'performance, start_time: %s, end_time: %s',\n                 storage_id, start_time, end_time)\n        metrics = []\n        if resource_metrics.get(constants.ResourceType.STORAGE):\n            storage_metrics = self.rest_handler.get_storage_metrics(\n                storage_id,\n                resource_metrics.get(constants.ResourceType.STORAGE),\n                start_time, end_time)\n            metrics.extend(storage_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect storage'\n                     ' performance, The length is: %s',\n                     storage_id, len(storage_metrics))\n        if resource_metrics.get(constants.ResourceType.STORAGE_POOL):\n            pool_metrics = self.rest_handler.get_pool_metrics(\n                storage_id,\n                resource_metrics.get(constants.ResourceType.STORAGE_POOL),\n                start_time, end_time)\n            metrics.extend(pool_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect pool'\n                     ' performance, The length is: %s',\n                     storage_id, len(pool_metrics))\n        if resource_metrics.get(constants.ResourceType.VOLUME):\n            volume_metrics = self.rest_handler.get_volume_metrics(\n                storage_id,\n                resource_metrics.get(constants.ResourceType.VOLUME),\n                start_time, end_time)\n            metrics.extend(volume_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect volume'\n                     ' performance, The length is: %s',\n                     storage_id, len(volume_metrics))\n        if resource_metrics.get(constants.ResourceType.CONTROLLER):\n            controller_metrics = self.rest_handler.get_controllers_metrics(\n                storage_id,\n                resource_metrics.get(constants.ResourceType.CONTROLLER),\n                start_time, end_time)\n            metrics.extend(controller_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect controller'\n                     ' performance, The length is: %s',\n                     storage_id, len(controller_metrics))\n        if resource_metrics.get(constants.ResourceType.PORT):\n            fc_port_metrics = self.rest_handler.get_fc_port_metrics(\n                storage_id,\n                resource_metrics.get(constants.ResourceType.PORT),\n                start_time, end_time)\n            metrics.extend(fc_port_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect port'\n                     ' performance, The length is: %s',\n                     storage_id, len(fc_port_metrics))\n        return metrics\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        return {\n            'is_historic': True,\n            'resource_metrics': {\n                constants.ResourceType.STORAGE: consts.STORAGE_CAP,\n                constants.ResourceType.STORAGE_POOL: consts.STORAGE_POOL_CAP,\n                constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n                constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP,\n                constants.ResourceType.PORT: consts.PORT_CAP\n            }\n        }\n\n    def get_latest_perf_timestamp(self, context):\n        return self.rest_handler.get_system_time()\n\n    def list_storage_host_initiators(self, context):\n        return self.rest_handler.list_storage_host_initiators(self.storage_id)\n\n    def list_storage_hosts(self, context):\n        return self.rest_handler.list_storage_hosts(self.storage_id)\n\n    def list_storage_host_groups(self, context):\n        return self.rest_handler.list_storage_host_groups(self.storage_id)\n\n    def list_volume_groups(self, context):\n        return self.rest_handler.list_volume_groups(self.storage_id)\n\n    def list_masking_views(self, context):\n        return self.rest_handler.list_masking_views(self.storage_id)\n"
  },
  {
    "path": "delfin/drivers/dell_emc/power_store/rest_handler.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport datetime\nimport hashlib\nimport threading\nimport time\nfrom decimal import Decimal\n\nimport requests\nimport six\nfrom oslo_log import log as logging\nfrom oslo_utils import units\n\nfrom delfin import exception, utils, cryptor\nfrom delfin.common import constants\nfrom delfin.drivers.dell_emc.power_store import consts\nfrom delfin.drivers.utils.rest_client import RestClient\nfrom delfin.i18n import _\n\nLOG = logging.getLogger(__name__)\n\n\nclass RestHandler(RestClient):\n    REST_LOGIN_SESSION_URL = '/api/rest/login_session'\n    REST_LOGOUT_URL = '/api/rest/logout'\n    REST_CLUSTER_URL = \\\n        '/api/rest/cluster?select=name,compatibility_level,global_id,state,' \\\n        'primary_appliance_id,id,system_time&limit=2000&offset={}'\n    REST_APPLIANCE_URL = '/api/rest/appliance?select=id,name,model' \\\n                         '&limit=2000&offset={}'\n    REST_SOFTWARE_INSTALLED_URL = \\\n        '/api/rest/software_installed?select=id,release_version,' \\\n        'build_version,appliance&limit=2000&offset={}'\n    REST_VOLUME_URL = '/api/rest/volume?select=id,name,description,state,' \\\n                      'type,wwn,size,appliance_id&limit=2000&offset={}'\n    REST_GENERATE_URL = '/api/rest/metrics/generate'\n    REST_FC_PORT_URL = \\\n        '/api/rest/fc_port?select=appliance_id,current_speed,id,is_link_up,' \\\n        'name,partner_id,supported_speeds,wwn,node_id,sfp_id' \\\n        '&limit=2000&offset={}'\n    REST_ETH_PORT_URL = \\\n        '/api/rest/eth_port?select=appliance_id,current_speed,id,is_link_up,' \\\n        'name,partner_id,supported_speeds,mac_address,node_id,sfp_id' \\\n        '&limit=2000&offset={}'\n    REST_SAS_PORT_URL = \\\n        '/api/rest/sas_port?select=appliance_id,current_speed,id,' \\\n        'is_link_up,name,node_id,speed,sfp_id&limit=2000&offset={}'\n    REST_HARDWARE_URL = \\\n        '/api/rest/hardware?select=name,extra_details,id,lifecycle_state,' \\\n        'serial_number,slot,type,appliance_id,status_led_state' \\\n        '&limit=2000&offset={}'\n    REST_NODE_URL = \\\n        '/api/rest/node?select=appliance_id,id,slot&limit=2000&offset={}'\n    REST_ALERT_URL = \\\n        '/api/rest/alert?select=id,description_l10n,severity,resource_name,' \\\n        'resource_type,raised_timestamp,state,event_code,resource_id' \\\n        '&limit=2000&offset={}'\n    REST_SNMP_ALERT_URL = \\\n        '/api/rest/alert?select=id,description_l10n,severity,resource_name,' \\\n        'resource_type,raised_timestamp,state&limit=2000&offset=0' \\\n        '&description_l10n=in.({})&snmp_sent_timestamp=not.is.null' \\\n        '&order=snmp_sent_timestamp'\n    REST_INITIATOR_URL = '/api/rest/initiator?select=id,port_name,port_type,' \\\n                         'host_id&limit=2000&offset={}'\n    REST_HOST_URL = '/api/rest/host?select=id,name,host_initiators,os_type,' \\\n                    'description&limit=2000&offset={}'\n    REST_HOST_GROUP_URL = '/api/rest/host_group?select=id,name,hosts,' \\\n                          'description&limit=2000&offset={}'\n    REST_VOLUME_GROUP_URL = '/api/rest/volume_group?select=description,name,' \\\n                            'id,volumes&limit=2000&offset={}'\n    REST_HOST_VOLUME_MAPPING_URL = \\\n        '/api/rest/host_volume_mapping?select=host_group_id,host_id,id,' \\\n        'volume_id&limit=2000&offset={}'\n    REST_IP_POOL_ADDRESS_URL = \\\n        '/api/rest/ip_pool_address?select=id,name,address,appliance_id,' \\\n        'node_id,purposes&limit=2000&offset={}'\n    REST_METRICS_ARCHIVE_URL = '/api/rest/metrics_archive'\n    REST_FILE_SYSTEM_URL = '/api/rest/file_system'\n    REST_FILE_TREE_QUOTA_URL = '/api/rest/file_tree_quota'\n    REST_SMB_SHARE_URL = '/api/rest/smb_share'\n    REST_NFS_SERVER_URL = '/api/rest/nfs_server'\n    REST_FILE_USER_QUOTA_URL = '/api/rest/file_user_quota'\n    AUTH_KEY = 'DELL-EMC-TOKEN'\n\n    def __init__(self, **kwargs):\n        super(RestHandler, self).__init__(**kwargs)\n        rest_access = kwargs.get('rest')\n        self.username = rest_access.get('username')\n        self.session_lock = threading.Lock()\n\n    def login(self):\n        try:\n            with self.session_lock:\n                if self.session is None:\n                    self.init_http_head()\n                self.session.auth = requests.auth.HTTPBasicAuth(\n                    self.rest_username, cryptor.decode(self.rest_password))\n                res = self.call_with_token(RestHandler.REST_LOGIN_SESSION_URL)\n                if res.status_code == 200 or res.status_code == 206:\n                    self.session.headers[RestHandler.AUTH_KEY] = \\\n                        cryptor.encode(res.headers[RestHandler.AUTH_KEY])\n                else:\n                    LOG.error(\"Login error.URL: %s,Reason: %s.\",\n                              RestHandler.REST_LOGIN_SESSION_URL, res.text)\n                    if 'Unauthorized' in res.text:\n                        raise exception.InvalidUsernameOrPassword()\n                    elif 'Forbidden' in res.text:\n                        raise exception.InvalidIpOrPort()\n                    else:\n                        raise exception.StorageBackendException(\n                            six.text_type(res.text))\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise e\n\n    def call_with_token(self, url, data=None, method='GET',\n                        calltimeout=consts.DEFAULT_TIMEOUT):\n        auth_key = None\n        if self.session:\n            auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None)\n            if auth_key:\n                self.session.headers[RestHandler.AUTH_KEY] \\\n                    = cryptor.decode(auth_key)\n        res = self.do_call(url, data, method, calltimeout)\n        if auth_key:\n            self.session.headers[RestHandler.AUTH_KEY] = auth_key\n        return res\n\n    def logout(self):\n        res = self.call_with_token(RestHandler.REST_LOGOUT_URL, None, 'POST')\n        if res.status_code != consts.StatusCode.SUCCESS_NO_CONTENT and \\\n                res.status_code != consts.StatusCode.SUCCESS_CREATE_RESPONSE:\n            LOG.error(\"logout error.URL: %s,Reason: %s.\",\n                      RestHandler.REST_LOGOUT_URL, res.text)\n            raise exception.StorageBackendException(six.text_type(res.text))\n\n    def rest_call(self, url, data=None, method='GET', offset=0, result=None,\n                  count=0):\n        if result is None:\n            result = []\n        if '{}' in url:\n            res = self.call_with_token(url.format(offset), data, method)\n        else:\n            res = self.call_with_token(url, data, method)\n        if res.status_code == consts.StatusCode.SUCCESS:\n            result.extend(res.json())\n        elif res.status_code == consts.StatusCode.PARTIAL_CONTENT:\n            result.extend(res.json())\n            if len(res.json()) == consts.LIMIT_COUNT:\n                offset += consts.LIMIT_COUNT\n                self.rest_call(url, data, method, offset, result, count)\n        elif res.status_code == consts.StatusCode.UNAUTHORIZED or \\\n                res.status_code == consts.StatusCode.FORBIDDEN:\n            if count < consts.DigitalConstant.THREE:\n                self.login()\n                count = count + consts.DigitalConstant.ONE\n                self.rest_call(url, data, method, offset, result, count)\n        return result\n\n    def get_storage(self, storage_id):\n        clusters = self.rest_call(self.REST_CLUSTER_URL)\n        if not clusters:\n            LOG.error('The cluster data is empty')\n            raise exception.StorageBackendException(\n                'The cluster data is empty')\n        cluster = clusters[consts.DigitalConstant.ZERO]\n        appliance_id = cluster.get('primary_appliance_id')\n        appliances = self.rest_call(self.REST_APPLIANCE_URL)\n        model = ''\n        for appliance in appliances:\n            if appliance_id == appliance.get('id'):\n                model = appliance.get('model')\n        pools = self.get_storage_pools(storage_id)\n        total_capacity = consts.DigitalConstant.ZERO\n        used_capacity = consts.DigitalConstant.ZERO\n        for pool in pools:\n            total_capacity += pool.get('total_capacity')\n            used_capacity += pool.get('used_capacity')\n        disks = self.get_disks(storage_id)\n        storage_result = {\n            'model': model,\n            'total_capacity': total_capacity,\n            'raw_capacity': sum(disk.get('capacity') for disk in disks),\n            'used_capacity': used_capacity,\n            'free_capacity': total_capacity - used_capacity,\n            'vendor': 'DELL EMC',\n            'name': cluster.get('name'),\n            'serial_number': cluster.get('global_id'),\n            'firmware_version': self.get_firmware_version(appliance_id),\n            'status': consts.STORAGE_STATUS_MAP.get(\n                cluster.get('state'), constants.StorageStatus.UNKNOWN)\n        }\n        return storage_result\n\n    def get_firmware_version(self, appliance_id):\n        software_s = self.rest_call(RestHandler.REST_SOFTWARE_INSTALLED_URL)\n        for software in software_s:\n            appliance_d = software.get('appliance')\n            if not appliance_d:\n                continue\n            software_appliance_id = appliance_d.get('id')\n            if appliance_id == software_appliance_id:\n                return software.get('release_version')\n\n    def get_storage_pools(self, storage_id):\n        list_pool = []\n        appliances = self.rest_call(RestHandler.REST_APPLIANCE_URL)\n        for appliance in appliances:\n            appliance_id = appliance.get('id')\n            data = {'entity': consts.SPACE_METRICS_BY_APPLIANCE,\n                    'entity_id': appliance_id}\n            appliance_spaces = self.rest_call(self.REST_GENERATE_URL,\n                                              data, 'POST')\n            if not appliance_spaces:\n                LOG.error('The pools space data is empty')\n                raise exception.StorageBackendException(\n                    'The pools space data is empty')\n            appliance_space = \\\n                appliance_spaces[consts.DigitalConstant.MINUS_ONE]\n            total_capacity = appliance_space.get('physical_total')\n            used_capacity = appliance_space.get('physical_used')\n            pool_result = {\n                'name': appliance.get('name'),\n                'storage_id': storage_id,\n                'native_storage_pool_id': appliance_id,\n                'status': constants.StoragePoolStatus.NORMAL,\n                'storage_type': constants.StorageType.BLOCK,\n                'total_capacity': total_capacity,\n                'used_capacity': used_capacity,\n                'free_capacity': total_capacity - used_capacity\n            }\n            list_pool.append(pool_result)\n        return list_pool\n\n    def get_volumes(self, storage_id):\n        list_volume = []\n        volumes = self.rest_call(self.REST_VOLUME_URL)\n        for volume in volumes:\n            snapshot_type = volume.get('type')\n            if consts.CHARACTER_SNAPSHOT == snapshot_type:\n                continue\n            volume_type = consts.VOLUME_TYPE_MAP.get(\n                snapshot_type, constants.VolumeType.THIN)\n            status = consts.VOLUME_STATUS_MAP.get(\n                volume.get('state'), constants.StorageStatus.UNKNOWN)\n            volume_id = volume.get('id')\n            total_capacity = volume.get('size')\n            used_capacity = self.get_volume_used_capacity(\n                volume_id, volume_type, total_capacity)\n            volume_result = {\n                'name': volume.get('name'),\n                'storage_id': storage_id,\n                'description': volume.get('description'),\n                'status': status,\n                'native_volume_id': volume_id,\n                'native_storage_pool_id': volume.get('appliance_id'),\n                'wwn': volume.get('wwn') if volume.get('wwn') else '',\n                'type': volume_type,\n                'total_capacity': total_capacity,\n                'used_capacity': used_capacity,\n                'free_capacity': total_capacity - used_capacity\n            }\n            list_volume.append(volume_result)\n        return list_volume\n\n    def get_volume_used_capacity(self, volume_id, volume_type, used_capacity):\n        if volume_type == constants.VolumeType.THICK:\n            return used_capacity\n        data = {'entity': consts.SPACE_METRICS_BY_VOLUME,\n                'entity_id': volume_id}\n        volumes_spaces = self.rest_call(self.REST_GENERATE_URL, data, 'POST')\n        if volumes_spaces:\n            volumes_space = \\\n                volumes_spaces[consts.DigitalConstant.MINUS_ONE]\n            used_capacity = volumes_space.get('logical_used')\n        return used_capacity\n\n    def get_disks(self, storage_id):\n        disk_list = []\n        hardware_list = self.rest_call(self.REST_HARDWARE_URL)\n        for hardware in hardware_list:\n            lifecycle_state = hardware.get('lifecycle_state')\n            if consts.CHARACTER_DRIVE != hardware.get('type') or \\\n                    lifecycle_state == consts.CHARACTER_EMPTY:\n                continue\n            extra_details = hardware.get('extra_details')\n            capacity = None\n            firmware = ''\n            physical_type = constants.DiskPhysicalType.UNKNOWN\n            if extra_details:\n                firmware = extra_details.get('firmware_version')\n                drive_type = extra_details.get('drive_type')\n                if drive_type in consts.DiskType.ALL:\n                    continue\n                physical_type = consts.DISK_PHYSICAL_TYPE.get(\n                    drive_type, constants.DiskPhysicalType.UNKNOWN)\n                capacity = extra_details.get('size')\n            hardware_name = hardware.get('name')\n            if not capacity:\n                LOG.warning(\"disk capacity is null: %s\", hardware_name)\n                continue\n            disk_result = {\n                'name': hardware_name,\n                'storage_id': storage_id,\n                'native_disk_id': hardware.get('id'),\n                'serial_number': hardware.get('serial_number'),\n                'manufacturer': 'DELL EMC',\n                'firmware': firmware,\n                'capacity': capacity,\n                'status': consts.DISK_STATUS_MAP.get(\n                    lifecycle_state, constants.DiskStatus.NORMAL),\n                'physical_type': physical_type,\n                'logical_type': constants.DiskLogicalType.UNKNOWN,\n                'location': str(hardware.get('slot'))\n            }\n            disk_list.append(disk_result)\n        return disk_list\n\n    def get_controllers(self, storage_id):\n        list_controllers = []\n        nodes = self.get_node()\n        ips = self.get_ip()\n        hardware_list = self.rest_call(self.REST_HARDWARE_URL)\n        for hardware in hardware_list:\n            lifecycle_state = hardware.get('lifecycle_state')\n            if consts.CHARACTER_NODE != hardware.get('type') or \\\n                    lifecycle_state == consts.CHARACTER_EMPTY:\n                continue\n            slot = hardware.get('slot')\n            appliance_id = hardware.get('appliance_id')\n            node_id = nodes.get(f'{appliance_id}{slot}')\n            address = ips.get(f'{appliance_id}{node_id}')\n            if not address:\n                LOG.warning('mgmt_ip is empty,'\n                            ' Exceptions may occur in snmptrap')\n            extra_details = hardware.get('extra_details')\n            memory_size = ''\n            cpu_info = ''\n            if extra_details:\n                memory_size = extra_details.get(\n                    'physical_memory_size_gb', 0) * units.Gi\n                cpu_info = extra_details.get('cpu_model')\n            full_name = hardware.get('name')\n            if full_name:\n                name = full_name.split('-')[\n                    consts.DigitalConstant.MINUS_ONE]\n            else:\n                LOG.warning('The name of hardware is empty')\n                continue\n            controller_result = {\n                'name': name,\n                'storage_id': storage_id,\n                'native_controller_id': hardware.get('id'),\n                'status': consts.CONTROLLER_STATUS_MAP.get(\n                    lifecycle_state, constants.ControllerStatus.UNKNOWN),\n                'location': f'{name}:Slot-{slot}',\n                'cpu_info': cpu_info,\n                'cpu_count': consts.DigitalConstant.ONE,\n                'memory_size': memory_size,\n                'mgmt_ip': address\n            }\n            list_controllers.append(controller_result)\n        return list_controllers\n\n    def get_node(self):\n        node_dict = {}\n        nodes = self.rest_call(self.REST_NODE_URL)\n        for node in nodes:\n            appliance_id = node.get('appliance_id')\n            slot = node.get('slot')\n            node_id = node.get('id')\n            node_dict[f'{appliance_id}{slot}'] = node_id\n        return node_dict\n\n    def get_ip(self):\n        ip_dict = {}\n        ip_pool_address = self.rest_call(self.REST_IP_POOL_ADDRESS_URL)\n        for ip_address in ip_pool_address:\n            purposes_list = ip_address.get('purposes')\n            if consts.MGMT_NODE_COREOS not in purposes_list:\n                continue\n            address = ip_address.get('address')\n            appliance_id = ip_address.get('appliance_id')\n            node_id = ip_address.get('node_id')\n            ip_dict[f'{appliance_id}{node_id}'] = address\n        return ip_dict\n\n    def get_appliance_name(self):\n        appliance_name = {}\n        appliances = self.rest_call(self.REST_APPLIANCE_URL)\n        for appliance in appliances:\n            appliance_name[appliance.get('id')] = appliance.get('name')\n        return appliance_name\n\n    def get_port_hardware(self):\n        hardware_dict = {}\n        hardware_list = self.rest_call(self.REST_HARDWARE_URL)\n        for hardware in hardware_list:\n            hardware_dict[hardware.get('id')] = hardware\n        return hardware_dict\n\n    def get_fc_ports(self, storage_id, hardware_dict, appliance_name_dict):\n        list_fc_ports = []\n        fc_res = self.rest_call(self.REST_FC_PORT_URL)\n        for fc in fc_res:\n            appliance_id = fc.get('appliance_id')\n            name = fc.get('name')\n            is_link_up = fc.get('is_link_up')\n            connection_status = consts.PORT_CONNECTION_STATUS_MAP.get(\n                is_link_up, constants.PortConnectionStatus.UNKNOWN)\n            lifecycle_state = hardware_dict.get(\n                fc.get('sfp_id'), {}).get('lifecycle_state')\n            health_status = consts.PORT_HEALTH_STATUS_MAP.get(\n                lifecycle_state, constants.PortHealthStatus.UNKNOWN)\n            fc_port_result = {\n                'name': name,\n                'storage_id': storage_id,\n                'native_port_id': fc.get('id'),\n                'location': f'{appliance_name_dict.get(appliance_id)}:{name}',\n                'connection_status': connection_status,\n                'health_status': health_status,\n                'type': constants.PortType.FC,\n                'speed': self.convert_speed(fc.get('current_speed')),\n                'max_speed': self.convert_speed(fc.get('supported_speeds')),\n                'native_parent_id': fc.get('node_id'),\n                'wwn': fc.get('wwn')\n            }\n            list_fc_ports.append(fc_port_result)\n        return list_fc_ports\n\n    @staticmethod\n    def convert_speed(supported_speeds):\n        if not supported_speeds:\n            return\n        supported_speed = \\\n            supported_speeds[consts.DigitalConstant.MINUS_ONE]\\\n            if isinstance(supported_speeds, list) else supported_speeds\n        if '_Gbps' in supported_speed:\n            supported_speed = supported_speed.replace('_Gbps', '')\n            return int(supported_speed) * units.G\n        if '_Mbps' in supported_speed:\n            supported_speed = supported_speed.replace('_Mbps', '')\n            return int(supported_speed) * units.M\n        if '_Kbps' in supported_speed:\n            supported_speed = supported_speed.replace('_Kbps', '')\n            return int(supported_speed) * units.k\n\n    def get_eth_ports(self, storage_id, hardware_dict, appliance_name_dict):\n        list_eth_ports = []\n        eth_ports = self.rest_call(self.REST_ETH_PORT_URL)\n        for eth in eth_ports:\n            name = eth.get('name')\n            appliance_id = eth.get('appliance_id')\n            is_link_up = eth.get('is_link_up')\n            connection_status = consts.PORT_CONNECTION_STATUS_MAP.get(\n                is_link_up, constants.PortConnectionStatus.UNKNOWN)\n            lifecycle_state = hardware_dict.get(\n                eth.get('sfp_id'), {}).get('lifecycle_state')\n            health_status = consts.PORT_HEALTH_STATUS_MAP.get(\n                lifecycle_state, constants.PortHealthStatus.UNKNOWN)\n            eth_port_result = {\n                'name': name,\n                'storage_id': storage_id,\n                'native_port_id': eth.get('id'),\n                'location': f'{appliance_name_dict.get(appliance_id)}:{name}',\n                'connection_status': connection_status,\n                'health_status': health_status,\n                'type': constants.PortType.ETH,\n                'speed': self.convert_speed(eth.get('current_speed')),\n                'max_speed': self.convert_speed(eth.get('supported_speeds')),\n                'native_parent_id': eth.get('node_id'),\n                'mac_address': eth.get('mac_address')\n            }\n            list_eth_ports.append(eth_port_result)\n        return list_eth_ports\n\n    def get_sas_ports(self, storage_id, hardware_dict, appliance_name_dict):\n        list_sas_ports = []\n        sas_ports = self.rest_call(self.REST_SAS_PORT_URL)\n        for sas in sas_ports:\n            name = sas.get('name')\n            appliance_id = sas.get('appliance_id')\n            is_link_up = sas.get('is_link_up')\n            connection_status = consts.PORT_CONNECTION_STATUS_MAP.get(\n                is_link_up, constants.PortConnectionStatus.UNKNOWN)\n            lifecycle_state = hardware_dict.get(\n                sas.get('sfp_id'), {}).get('lifecycle_state')\n            health_status = consts.PORT_HEALTH_STATUS_MAP.get(\n                lifecycle_state, constants.PortHealthStatus.UNKNOWN)\n            sas_port_result = {\n                'name': name,\n                'storage_id': storage_id,\n                'native_port_id': sas.get('id'),\n                'location': f'{appliance_name_dict.get(appliance_id)}:{name}',\n                'connection_status': connection_status,\n                'health_status': health_status,\n                'type': constants.PortType.SAS,\n                'speed': self.convert_speed(sas.get('speed')),\n                'native_parent_id': sas.get('node_id')\n            }\n            list_sas_ports.append(sas_port_result)\n        return list_sas_ports\n\n    def list_alerts(self, query_para=None):\n        alerts = self.rest_call(self.REST_ALERT_URL)\n        alerts_list = []\n        for alert in alerts:\n            if 'CLEARED' == alert.get('state'):\n                continue\n            raised_timestamp = alert.get('raised_timestamp')\n            time_difference = self.get_time_difference()\n            timestamp_s = datetime.datetime.strptime(\n                raised_timestamp, consts.UTC_FORMAT).timestamp()\n            timestamp = int((timestamp_s + time_difference) * units.k) if\\\n                raised_timestamp else None\n            if query_para:\n                try:\n                    if timestamp is None or timestamp \\\n                            < int(query_para.get('begin_time')) or \\\n                            timestamp > int(query_para.get('end_time')):\n                        continue\n                except Exception as e:\n                    LOG.error(e)\n            alerts_model = self.set_alert_model(alert, timestamp)\n            alerts_list.append(alerts_model)\n        return alerts_list\n\n    @staticmethod\n    def get_time_difference():\n        time_difference = time.mktime(\n            time.localtime()) - time.mktime(time.gmtime())\n        return time_difference\n\n    @staticmethod\n    def get_parse_alerts(snmp_alert):\n        try:\n            if consts.PARSE_ALERT_DESCRIPTION in snmp_alert.keys():\n                description = snmp_alert.get(consts.PARSE_ALERT_DESCRIPTION)\n                raised_time = snmp_alert.get(consts.PARSE_ALERT_TIME_UTC)\n                timestamp = None\n                if raised_time:\n                    time_difference = RestHandler.get_time_difference()\n                    timestamp_s = datetime.datetime.strptime(\n                        raised_time, consts.SYSTEM_TIME_FORMAT).timestamp()\n                    timestamp = int((timestamp_s + time_difference) * units.k)\n                resource_type = snmp_alert.get(\n                    consts.PARSE_ALERT_RESOURCE_TYPE)\n                resource_name = snmp_alert.get(\n                    consts.PARSE_ALERT_RESOURCE_NAME)\n                location = f'{resource_type}:{resource_name}'\n                event_code = snmp_alert.get(consts.PARSE_ALERT_CODE)\n                resource_id = snmp_alert.get(consts.PARSE_ALERT_RESOURCE_ID)\n                match_key_str = f'{description}{timestamp}{resource_type}' \\\n                                f'{resource_name}{event_code}{resource_id}'\n                match_key = hashlib.md5(match_key_str.encode()).hexdigest()\n                alerts_model = {\n                    'alert_id': match_key,\n                    'occur_time': timestamp if\n                    timestamp else utils.utcnow_ms(),\n                    'severity': consts.SNMP_SEVERITY_MAP.get(\n                        snmp_alert.get(consts.PARSE_ALERT_SEVERITY),\n                        constants.Severity.NOT_SPECIFIED),\n                    'category': constants.Category.FAULT,\n                    'location': location if\n                    resource_type and resource_name else '',\n                    'type': constants.EventType.EQUIPMENT_ALARM,\n                    'resource_type': resource_type if resource_type else\n                    constants.DEFAULT_RESOURCE_TYPE,\n                    'alert_name': description,\n                    'match_key': match_key,\n                    'description': description\n                }\n                return alerts_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (_(\"Failed to build alert model as some attributes missing\"))\n            raise exception.InvalidResults(msg)\n\n    def get_alert_sources(self, storage_id):\n        sources_list = []\n        controllers = self.get_controllers(storage_id)\n        for controller in controllers:\n            mgmt_ip = controller.get('mgmt_ip')\n            mgmt_ip_t = {'host': mgmt_ip}\n            sources_list.append(mgmt_ip_t)\n        return sources_list\n\n    @staticmethod\n    def set_alert_model(alert, timestamp):\n        description = alert.get('description_l10n')\n        resource_type = alert.get('resource_type')\n        resource_name = alert.get('resource_name')\n        resource_id = alert.get('resource_id')\n        event_code = alert.get('event_code')\n        match_key_str = f'{description}{timestamp}{resource_type}' \\\n                        f'{resource_name}{event_code}{resource_id}'\n        alerts_model = {\n            'alert_id': alert.get('id'),\n            'occur_time': timestamp,\n            'severity': consts.ALERT_SEVERITY_MAP.get(\n                alert.get('severity'), constants.Severity.NOT_SPECIFIED),\n            'category': constants.Category.FAULT,\n            'location': f'{resource_type}:{resource_name}',\n            'type': constants.EventType.EQUIPMENT_ALARM,\n            'resource_type': resource_type,\n            'alert_name': description,\n            'match_key': hashlib.md5(match_key_str.encode()).hexdigest(),\n            'description': description\n        }\n        return alerts_model\n\n    def list_storage_host_initiators(self, storage_id):\n        list_initiators = self.get_initiators(storage_id)\n        if list_initiators:\n            return list_initiators\n        hosts = self.rest_call(self.REST_HOST_URL)\n        for host in hosts:\n            initiators = host.get('host_initiators')\n            for initiator in (initiators or []):\n                port_name = initiator.get('port_name')\n                initiator_dict = {\n                    'native_storage_host_initiator_id': port_name,\n                    'native_storage_host_id': host.get('id'),\n                    'name': port_name,\n                    'type': consts.INITIATOR_TYPE_MAP.get(\n                        initiator.get('port_type'),\n                        constants.InitiatorType.UNKNOWN),\n                    'status': constants.InitiatorStatus.UNKNOWN,\n                    'wwn': port_name,\n                    'storage_id': storage_id\n                }\n                list_initiators.append(initiator_dict)\n        return list_initiators\n\n    def get_initiators(self, storage_id):\n        list_initiators = []\n        try:\n            initiators = self.rest_call(self.REST_INITIATOR_URL)\n            for initiator in initiators:\n                port_name = initiator.get('port_name')\n                initiator_dict = {\n                    'native_storage_host_initiator_id': initiator.get('id'),\n                    'native_storage_host_id': initiator.get('host_id'),\n                    'name': port_name,\n                    'type': consts.INITIATOR_TYPE_MAP.get(\n                        initiator.get('port_type'),\n                        constants.InitiatorType.UNKNOWN),\n                    'status': constants.InitiatorStatus.UNKNOWN,\n                    'wwn': port_name,\n                    'storage_id': storage_id\n                }\n                list_initiators.append(initiator_dict)\n        except Exception as e:\n            LOG.error(\"get initiators error: %s\", six.text_type(e))\n        return list_initiators\n\n    def list_storage_hosts(self, storage_id):\n        host_list = []\n        hosts = self.rest_call(self.REST_HOST_URL)\n        for host in hosts:\n            h = {\n                \"name\": host.get('name'),\n                \"storage_id\": storage_id,\n                \"native_storage_host_id\": host.get('id'),\n                'description': host.get('description')\n                if host.get('description') else '',\n                \"os_type\": consts.HOST_OS_TYPES_MAP.get(\n                    host.get('os_type'), constants.HostOSTypes.UNKNOWN),\n                \"status\": constants.HostStatus.NORMAL\n            }\n            host_list.append(h)\n        return host_list\n\n    def list_storage_host_groups(self, storage_id):\n        host_groups = self.rest_call(self.REST_HOST_GROUP_URL)\n        host_group_list = []\n        storage_host_grp_relation_list = []\n        for hgroup in (host_groups or []):\n            hgroup_id = hgroup.get('id')\n            hg = {\n                'native_storage_host_group_id': hgroup_id,\n                'name': hgroup.get('name'),\n                'description': hgroup.get('description') if\n                hgroup.get('description') else '',\n                'storage_id': storage_id\n            }\n            host_group_list.append(hg)\n            for host in (hgroup.get('hosts') or []):\n                host_relation = {\n                    'native_storage_host_group_id': hgroup_id,\n                    'storage_id': storage_id,\n                    'native_storage_host_id': host.get('id')\n                }\n                storage_host_grp_relation_list.append(host_relation)\n        result = {\n            'storage_host_groups': host_group_list,\n            'storage_host_grp_host_rels': storage_host_grp_relation_list\n        }\n        return result\n\n    def list_volume_groups(self, storage_id):\n        volume_groups = self.rest_call(self.REST_VOLUME_GROUP_URL)\n        vol_group_list = []\n        vol_grp_vol_relation_list = []\n        for volume_group in volume_groups:\n            volume_group_id = volume_group.get('id')\n            vol_g = {\n                'name': volume_group.get('name'),\n                'storage_id': storage_id,\n                'native_volume_group_id': volume_group_id,\n                'description': volume_group.get('description')\n                if volume_group.get('description') else ''\n            }\n            vol_group_list.append(vol_g)\n            for volumes in (volume_group.get('volumes') or []):\n                volume_group_relation = {\n                    'storage_id': storage_id,\n                    'native_volume_group_id': volume_group_id,\n                    'native_volume_id': volumes.get('id')\n                }\n                vol_grp_vol_relation_list.append(volume_group_relation)\n        result = {\n            'volume_groups': vol_group_list,\n            'vol_grp_vol_rels': vol_grp_vol_relation_list\n        }\n        return result\n\n    def list_masking_views(self, storage_id):\n        list_masking_views = []\n        volume_mapping = self.rest_call(self.REST_HOST_VOLUME_MAPPING_URL)\n        for mapping in volume_mapping:\n            native_masking_view_id = mapping.get('id')\n            host_group_id = mapping.get('host_group_id')\n            host_id = mapping.get('host_id')\n            view = {\n                'native_masking_view_id': native_masking_view_id,\n                'name': native_masking_view_id,\n                'native_volume_id': mapping.get('volume_id'),\n                'storage_id': storage_id\n            }\n            if host_group_id:\n                view['native_storage_host_group_id'] = host_group_id\n            if host_id:\n                view['native_storage_host_id'] = host_id\n            list_masking_views.append(view)\n        return list_masking_views\n\n    def get_storage_metrics(self, storage_id, resource_metrics, start_time,\n                            end_time):\n        storage_metrics = []\n        clusters = self.rest_call(self.REST_CLUSTER_URL)\n        if not clusters:\n            return storage_metrics\n        cluster = clusters[consts.DigitalConstant.ZERO]\n        cluster_id = cluster.get('id')\n        cluster_name = cluster.get('name')\n        if not cluster_id or not cluster_name:\n            return storage_metrics\n        data = {'entity': consts.PERFORMANCE_METRICS_BY_CLUSTER,\n                'entity_id': cluster_id,\n                'interval': consts.PERFORMANCE_METRICS_INTERVAL}\n        packaging_data = self.package_data(data, end_time, start_time)\n        storage_metrics = self.set_metrics_data(\n            cluster.get('global_id'), cluster_name, packaging_data,\n            resource_metrics, constants.ResourceType.STORAGE, storage_id)\n        return storage_metrics\n\n    def get_pool_metrics(self, storage_id, resource_metrics, start_time,\n                         end_time):\n        pool_metrics_list = []\n        appliances = self.rest_call(self.REST_APPLIANCE_URL)\n        for appliance in appliances:\n            pool_id = appliance.get('id')\n            pool_name = appliance.get('name')\n            if not pool_id or not pool_name:\n                continue\n            data = {'entity': consts.PERFORMANCE_METRICS_BY_APPLIANCE,\n                    'entity_id': pool_id,\n                    'interval': consts.PERFORMANCE_METRICS_INTERVAL}\n            packaging_data = self.package_data(data, end_time, start_time)\n            pool_metrics = self.set_metrics_data(\n                pool_id, pool_name, packaging_data, resource_metrics,\n                constants.ResourceType.STORAGE_POOL, storage_id)\n            pool_metrics_list.extend(pool_metrics)\n        return pool_metrics_list\n\n    def get_volume_metrics(self, storage_id, resource_metrics, start_time,\n                           end_time):\n        volume_metrics_list = []\n        volumes = self.rest_call(self.REST_VOLUME_URL)\n        for volume in volumes:\n            volume_id = volume.get('id')\n            volume_name = volume.get('name')\n            if not volume_id or not volume_name:\n                continue\n            data = {'entity': consts.PERFORMANCE_METRICS_BY_VOLUME,\n                    'entity_id': volume_id,\n                    'interval': consts.PERFORMANCE_METRICS_INTERVAL}\n            packaging_data = self.package_data(data, end_time, start_time)\n            volume_metrics = self.set_metrics_data(\n                volume_id, volume_name, packaging_data, resource_metrics,\n                constants.ResourceType.VOLUME, storage_id)\n            volume_metrics_list.extend(volume_metrics)\n        return volume_metrics_list\n\n    def get_controllers_metrics(self, storage_id, resource_metrics, start_time,\n                                end_time):\n        controllers_metrics_list = []\n        controller_dict = self.get_node_hardware()\n        controllers = self.rest_call(self.REST_NODE_URL)\n        for controller in controllers:\n            controller_id = controller.get('id')\n            if not controller_id:\n                continue\n            hardware_id, hardware_name = self.get_resource(controller,\n                                                           controller_dict)\n            if not hardware_id:\n                LOG.info('controllers performance: Unexpected data')\n                continue\n            data = {'entity': consts.PERFORMANCE_METRICS_BY_NODE,\n                    'entity_id': controller_id,\n                    'interval': consts.PERFORMANCE_METRICS_INTERVAL}\n            packaging_data = self.package_data(data, end_time, start_time)\n            controllers_metrics = self.set_metrics_data(\n                hardware_id, hardware_name, packaging_data, resource_metrics,\n                constants.ResourceType.CONTROLLER, storage_id)\n            controllers_metrics_list.extend(controllers_metrics)\n        return controllers_metrics_list\n\n    @staticmethod\n    def get_resource(controller, controller_dict):\n        appliance_id = controller.get('appliance_id')\n        slot = controller.get('slot')\n        hardware = controller_dict.get(f'{appliance_id}{slot}', {})\n        hardware_id = hardware.get('id')\n        full_name = hardware.get('name')\n        if full_name:\n            hardware_name = full_name.split('-')[\n                consts.DigitalConstant.MINUS_ONE]\n        else:\n            hardware_name = hardware_id\n        return hardware_id, hardware_name\n\n    def get_node_hardware(self):\n        hardware_dict = {}\n        hardware_list = self.rest_call(self.REST_HARDWARE_URL)\n        for hardware in hardware_list:\n            lifecycle_state = hardware.get('lifecycle_state')\n            if consts.CHARACTER_NODE != hardware.get('type') or \\\n                    lifecycle_state == consts.CHARACTER_EMPTY:\n                continue\n            slot = hardware.get('slot')\n            appliance_id = hardware.get('appliance_id')\n            key = f'{appliance_id}{slot}'\n            hardware_dict[key] = hardware\n        return hardware_dict\n\n    def get_fc_port_metrics(self, storage_id, resource_metrics, start_time,\n                            end_time):\n        fc_port_metrics_list = []\n        fc_ports = self.rest_call(self.REST_FC_PORT_URL)\n        for fc_port in fc_ports:\n            fc_port_id = fc_port.get('id')\n            fc_port_name = fc_port.get('name')\n            if not fc_port_id or not fc_port_name:\n                continue\n            data = {'entity': consts.PERFORMANCE_METRICS_BY_FE_FC_PORT,\n                    'entity_id': fc_port_id,\n                    'interval': consts.PERFORMANCE_METRICS_INTERVAL}\n            packaging_data = self.package_data(data, end_time, start_time)\n            fc_port_metrics = self.set_metrics_data(\n                fc_port_id, fc_port_name, packaging_data, resource_metrics,\n                constants.ResourceType.PORT, storage_id)\n            fc_port_metrics_list.extend(fc_port_metrics)\n        return fc_port_metrics_list\n\n    @staticmethod\n    def set_metrics_data(resource_id, resource_name, packaging_data,\n                         resource_metrics, resource_type, storage_id):\n        metrics_list = []\n        for resource_key in resource_metrics.keys():\n            labels = {\n                'storage_id': storage_id,\n                'resource_type': resource_type,\n                'resource_id': resource_id,\n                'resource_name': resource_name,\n                'type': 'RAW',\n                'unit': resource_metrics[resource_key]['unit']\n            }\n            resource_value = {}\n            for about_timestamp in packaging_data.keys():\n                metrics_data = packaging_data.get(about_timestamp)\n                resource_value[about_timestamp] = \\\n                    metrics_data.get(resource_key)\n            if resource_value:\n                metrics_res = constants.metric_struct(\n                    name=resource_key, labels=labels, values=resource_value)\n                metrics_list.append(metrics_res)\n        return metrics_list\n\n    def package_data(self, data, end_time, start_time):\n        perf_data = self.rest_call(self.REST_GENERATE_URL, data, 'POST')\n        packaging_data = {}\n        duplicate = set()\n        for perf in perf_data:\n            timestamp = perf.get('timestamp')\n            time_difference = self.get_time_difference()\n            timestamp_s = int(\n                datetime.datetime.strptime(timestamp, consts.PERF_TIME_FORMAT)\n                .timestamp() + time_difference)\n            repeat_count = perf.get('repeat_count')\n            if repeat_count > consts.DigitalConstant.ONE:\n                repeat_timestamp_s =\\\n                    (repeat_count - consts.DigitalConstant.ONE)\\\n                    * consts.PERF_INTERVAL\n                count_timestamp_s = timestamp_s + repeat_timestamp_s\n                count_timestamp_ms = count_timestamp_s * units.k\n                if start_time > count_timestamp_ms:\n                    continue\n            for count in range(consts.DigitalConstant.ZERO, repeat_count):\n                count_timestamp_s = timestamp_s + count * consts.PERF_INTERVAL\n                count_timestamp_ms = count_timestamp_s * units.k\n                about_timestamp = \\\n                    int(count_timestamp_s / consts.DigitalConstant.SIXTY) \\\n                    * consts.DigitalConstant.SIXTY * units.k\n                if count_timestamp_ms < start_time or \\\n                        count_timestamp_ms >= end_time \\\n                        or about_timestamp in duplicate:\n                    continue\n                duplicate.add(about_timestamp)\n                cpu_utilization = perf.get('io_workload_cpu_utilization')\n                metrics_d = {\n                    'iops': Decimal(str(perf.get('total_iops'))).quantize(\n                        Decimal('0'), rounding=\"ROUND_HALF_UP\"),\n                    \"readIops\": Decimal(str(perf.get('read_iops'))).quantize(\n                        Decimal('0'), rounding=\"ROUND_HALF_UP\"),\n                    \"writeIops\": Decimal(str(perf.get('write_iops'))).quantize(\n                        Decimal('0'), rounding=\"ROUND_HALF_UP\"),\n                    \"throughput\": round(\n                        perf.get('total_bandwidth') / units.Mi, 3),\n                    \"readThroughput\": round(\n                        perf.get('read_bandwidth') / units.Mi, 3),\n                    \"writeThroughput\": round(\n                        perf.get('write_bandwidth') / units.Mi, 3),\n                    \"responseTime\": round(\n                        perf.get('avg_latency') / units.k, 3),\n                    \"readResponseTime\": round(\n                        perf.get('avg_read_latency') / units.k, 3),\n                    \"writeResponseTime\": round(\n                        perf.get('avg_write_latency') / units.k, 3),\n                    \"ioSize\": round(perf.get('avg_io_size') / units.Ki, 3),\n                    \"readIoSize\": round(\n                        perf.get('avg_read_size') / units.Ki, 3),\n                    \"writeIoSize\": round(\n                        perf.get('avg_write_size') / units.Ki, 3),\n                    \"cpuUsage\": Decimal(str(cpu_utilization)).quantize(\n                        Decimal('0.000'), rounding=\"ROUND_HALF_UP\")\n                    if cpu_utilization else '',\n                    'time': about_timestamp\n                }\n                packaging_data[about_timestamp] = metrics_d\n        return packaging_data\n\n    def get_system_time(self):\n        clusters = self.rest_call(self.REST_CLUSTER_URL)\n        if clusters:\n            cluster = clusters[consts.DigitalConstant.ZERO]\n            system_time = cluster.get('system_time')\n            time_difference = self.get_time_difference()\n            timestamp_s = datetime.datetime.strptime(\n                system_time, consts.SYSTEM_TIME_FORMAT).timestamp()\n            timestamp = int((timestamp_s + time_difference) * units.k)\\\n                if system_time else None\n            return timestamp\n"
  },
  {
    "path": "delfin/drivers/dell_emc/scaleio/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/dell_emc/scaleio/alert_consts.py",
    "content": "# Copyright 2022 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nALERT_MAP = {\n    'DEVICE_FAILED': 'Device failed',\n    'SDC_DISCONNECTED': 'SDC disconnected',\n    'MDM_NOT_CLUSTERED': 'MDM is not clustered',\n    'SDS_DISCONNECTED ': 'SDS is disconnected',\n    'SDS_DISCONNECTS_FREQUENTLY ': 'SDS disconnects frequently ',\n    'SDS_RMCACHE_MEMORY_ALLOCATION_FAILED':\n        'Memory allocation for RAM ReadCache failed on SDS',\n    'STORAGE_POOL_HAS_CAPACITY_ERRORS': 'Storage Pool has capacity errors',\n    'STORAGE_POOL_HAS_FAILED_CAPACITY': 'Storage Pool has failed capacity',\n    'STORAGE_POOL_HAS_DEGRADED_CAPACITY': 'Storage Pool has degraded capacity',\n    'STORAGE_POOL_HAS_UNREACHABLE_CAPACITY':\n        'Storage Pool has decreased capacity',\n    'STORAGE_POOL_HAS_UNAVAILABLE_UNUSED_CAPACITY':\n        'Storage Pool has unavailable-unused capacity',\n    'STORAGE_POOL_UNBALANCED': 'Storage Pool is unbalanced ',\n    'CAPACITY_UTILIZATION_ABOVE_CRITICAL_THRESHOLD':\n        'Capacity utilization above critical threshold',\n    'CAPACITY_UTILIZATION_ABOVE_HIGH_THRESHOLD':\n        'Capacity utilization above high threshold',\n    'CONFIGURED_SPARE_CAPACITY_SMALLER_THAN_LARGEST_FAULT_UNIT':\n    'Configured spare capacity is smaller than largest fault unit',\n    'SPARE_CAPACITY_AND_FREE_CAPACITY_SMALLER_THAN_LARGEST_FAULT_UNIT':\n        'Spare capacity and free capacity are smaller '\n        'than the largest fault unit',\n    'SPARE_CAPACITY_BELOW_THRESHOLD ': 'Spare capacity is below threshold',\n    'LICENSE_EXPIRED': 'License expired',\n    'LICENSE_ABOUT_TO_EXPIRE ': 'License will expire in %d days',\n    'FWD_REBUILD_STUCK ': 'Forward rebuild cannot proceed ',\n    'BKWD_REBUILD_STUCK': 'Backward rebuild cannot proceed',\n    'REBALANCE_STUCK ': 'Rebalance cannot proceed ',\n    'MDM_FAILS_OVER_FREQUENTLY': 'MDM fails over frequently',\n    'FAILURE_RECOVERY_CAPACITY_BELOW_THRESHOLD':\n        'Failure recovery capacity is below the threshold',\n    'DEVICE_PENDING_ACTIVATION':\n        'Device test is done and device is pending activation',\n    'PD_INACTIVE ': 'Inactive Protection Domain',\n    'DRL_MODE_NON_VOLATILE': 'DRL mode: Hardened ',\n    'NOT_ENOUGH_FAULT_UNITS_IN_SP ':\n        'Storage Pool does not meet the minimum requirement of 3 fault units',\n    'SDC_MAX_COUNT': 'No more SDCs can be defined on this system; '\n                     'the maximum has been reached',\n    'FIXED_READ_ERROR_COUNT_ABOVE_THRESHOLD': 'Device has fixed read errors ',\n    'SCANNER_COMPARE_ERROR':\n        'Background device scanning has found data conflicts',\n    'STORAGE_POOL_EXTREMELY_UNBALANCED':\n        'The Storage Pool relies too heavily(over 50%)on capacity from a '\n        'single SDS or Fault SetBalance capacity over other SDSs or Fault Sets'\n\n}\n"
  },
  {
    "path": "delfin/drivers/dell_emc/scaleio/consts.py",
    "content": "# Copyright 2022 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nfrom delfin.common import constants\n\nStorageVendor = 'DELL EMC'\nDEFAULT_TIMEOUT = 10\nREST_AUTH_LOGIN = '/api/login'\nREST_AUTH_LOGOUT = '/api/logout'\nREST_SCALEIO_SYSTEM = '/api/types/System/instances'\nREST_SCALEIO_STORAGE_POOL = '/api/types/StoragePool/instances'\nREST_SCALEIO_VOLUMES = '/api/types/Volume/instances'\nREST_SCALEIO_DISKS = '/api/types/Device/instances'\nREST_SCALIO_HOSTS = '/api/types/Sdc/instances'\nREST_SCALIO_INITIIATORS = '/api/types/Sds/instances'\nREST_SCALEIO_ALERT = '/api/types/Alert/instances'\nDEFAULT_ALERTS_TIME_CONVERSION = 1000\nDEFAULT_VOLUME_USERD_CAPACITY = 0\nDATETIME_UTC_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\nOID_SEVERITY = '1139.101.1.1'\nOID_EVENT_TYPE = '1139.101.1.2'\nOID_ERR_ID = '1139.101.1.3'\nOID_EVENT_ID = '1139.101.1.4'\n\nTRAP_ALERT_MAP = {\n    '5': constants.Severity.CRITICAL,\n    '2': constants.Severity.WARNING,\n}\n"
  },
  {
    "path": "delfin/drivers/dell_emc/scaleio/rest_handler.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hashlib\nimport six\nimport json\n\nimport requests\nimport datetime\nimport time\nfrom oslo_log import log\nfrom oslo_utils import units\nfrom delfin import exception\nfrom delfin import cryptor\nfrom delfin.common import alert_util\nfrom delfin.drivers.utils.rest_client import RestClient\nfrom delfin.drivers.dell_emc.scaleio import consts\nfrom delfin.drivers.dell_emc.scaleio import alert_consts\nfrom delfin.common import constants\n\nLOG = log.getLogger(__name__)\n\n\nclass RestHandler(RestClient):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.init_http_head()\n        self.login()\n\n    def login(self):\n        try:\n            res = self.get_rest_info(consts.REST_AUTH_LOGIN, 'login', 'GET')\n            if res:\n                self.rest_auth_token = res\n            else:\n                LOG.error(\"Login error. URL: %(url)s\\n\"\n                          \"Reason: %(reason)s.\",\n                          {\"url\": consts.REST_AUTH_LOGIN, \"reason\": res.text})\n                if 'User authentication failed' in res.text:\n                    raise exception.InvalidUsernameOrPassword()\n                else:\n                    raise exception.StorageBackendException(\n                        six.text_type(res.text))\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise exception.InvalidResults(e)\n\n    def logout(self):\n        try:\n            if self.session:\n                self.session.close()\n        except Exception as e:\n            err_msg = \"Logout error: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(e)\n\n    def get_storage(self, storage_id):\n        try:\n            storage_json = self.get_rest_info(consts.REST_SCALEIO_SYSTEM)\n            for system_json in (storage_json or []):\n                system_id = system_json.get('id')\n                system_links = json.loads(json.dumps(\n                    system_json.get('links')))\n                total_capacity = 0\n                used_capacity = 0\n                raw_capacity = 0\n                if not system_links:\n                    continue\n                storage_disk_list = self.list_disks(storage_id)\n                for storage_disk in storage_disk_list:\n                    raw_capacity += storage_disk.get('capacity')\n                mdm_cluster = json.loads(json.dumps(\n                    system_json.get('mdmCluster')))\n                version_info = json.dumps(\n                    system_json.get('systemVersionName'))\n                version_detail = version_info.split(' Version: ')\n                version_id = version_detail[1].replace('\\\"', '')\n                model = version_detail[0].replace('\\\"', '')\n                cluster_state = mdm_cluster.get('clusterState')\n                status = constants.StorageStatus.OFFLINE\n                if 'Degraded' in cluster_state:\n                    status = constants.StorageStatus.DEGRADED\n                elif 'Normal' in cluster_state:\n                    status = constants.StorageStatus.NORMAL\n                for system_link in system_links:\n                    if 'Statistics' in system_link.get('href'):\n                        storage_detail = self.get_rest_info(\n                            system_link.get('href'))\n                        total_capacity = storage_detail. \\\n                            get('maxCapacityInKb')\n                        used_capacity = storage_detail. \\\n                            get('capacityInUseInKb')\n                storage_map = {\n                    'name': 'ScaleIO',\n                    'vendor': consts.StorageVendor,\n                    'model': model,\n                    'status': status,\n                    'serial_number': system_id,\n                    'firmware_version': version_id,\n                    'raw_capacity': raw_capacity,\n                    'total_capacity': int(total_capacity) * units.Ki,\n                    'used_capacity': int(used_capacity) * units.Ki,\n                    'free_capacity': int(total_capacity\n                                         - used_capacity) * units.Ki\n                }\n                return storage_map\n        except exception.DelfinException as err:\n            err_msg = \"Get Storage System error: %s\" % err.msg\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            LOG.error(\"Get Storage System error: %s\", six.text_type(e))\n            raise exception.InvalidResults(e)\n\n    def list_storage_pools(self, storage_id):\n        storage_pool_list = []\n        try:\n            storage_pool_json = self.get_rest_info(\n                consts.REST_SCALEIO_STORAGE_POOL)\n            for pool_json in (storage_pool_json or []):\n                pool_name = pool_json.get('name')\n                native_storage_pool_id = pool_json.get('id')\n                pool_links = pool_json.get('links')\n                used_capacity = 0\n                total_size = 0\n                for pool_link in pool_links:\n                    if 'Statistics' in pool_link.get('rel'):\n                        storage_pool_statics = self.get_rest_info(\n                            pool_link.get('href'))\n                        json.dumps(storage_pool_statics)\n                        used_capacity = storage_pool_statics.\\\n                            get('capacityInUseInKb')\n                        total_size = storage_pool_statics.\\\n                            get('maxCapacityInKb')\n                pool_map = {\n                    'name': pool_name,\n                    'storage_id': storage_id,\n                    'native_storage_pool_id': native_storage_pool_id,\n                    'status': constants.StorageStatus.NORMAL,\n                    'storage_type': constants.StorageType.BLOCK,\n                    'total_capacity': int(total_size) * units.Ki,\n                    'used_capacity': int(used_capacity) * units.Ki,\n                    'free_capacity': int(total_size -\n                                         used_capacity) * units.Ki\n                }\n                storage_pool_list.append(pool_map)\n            return storage_pool_list\n        except exception.DelfinException as err:\n            err_msg = \"Get Storage pool error: %s\" % err.msg\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            LOG.error(\"Get Storage pool error: %s\", six.text_type(e))\n            raise exception.InvalidResults(e)\n\n    def list_volumes(self, storage_id):\n        list_volumes = []\n        try:\n            storage_volume_json = self.get_rest_info(\n                consts.REST_SCALEIO_VOLUMES)\n            for json_volume in (storage_volume_json or []):\n                volume_name = json_volume.get('name')\n                native_storage_pool_id = json_volume.get('storagePoolId')\n                native_volume_id = json_volume.get('id')\n                total_size = json_volume.get('sizeInKb')\n                volume_type = constants.VolumeType.THIN\n                if 'Thick' in json_volume.get('volumeType'):\n                    volume_type = constants.VolumeType.THICK\n                volume_map = {\n                    'name': volume_name,\n                    'storage_id': storage_id,\n                    'description': volume_name,\n                    'status': 'normal',\n                    'native_volume_id': native_volume_id,\n                    'native_storage_pool_id': native_storage_pool_id,\n                    'wwn': native_volume_id,\n                    'type': volume_type,\n                    'total_capacity': int(total_size) * units.Ki,\n                    'free_capacit': consts.DEFAULT_VOLUME_USERD_CAPACITY,\n                    'used_capacity': consts.DEFAULT_VOLUME_USERD_CAPACITY,\n                    'compressed': True,\n                    'deduplicated': True\n                }\n                list_volumes.append(volume_map)\n            return list_volumes\n        except exception.DelfinException as err:\n            err_msg = \"Get Storage volume error: %s\" % err.msg\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            LOG.error(\"Get Storage volume error: %s\", six.text_type(e))\n            raise exception.InvalidResults(e)\n\n    def list_disks(self, storage_id):\n        disks_list = []\n        try:\n            storage_disks_json = self.get_rest_info(consts.REST_SCALEIO_DISKS)\n            for json_disk in (storage_disks_json or []):\n                device_status = json_disk.get('deviceState')\n                capacity = json_disk.get('maxCapacityInKb')\n                status = constants.DiskStatus.NORMAL\n                if device_status != 'Normal':\n                    status = constants.DiskStatus.OFFLINE\n                disk_map = {\n                    'native_disk_id': json_disk.get('id'),\n                    'name': json_disk.get('name'),\n                    'status': status,\n                    'storage_id': storage_id,\n                    'native_disk_group_id': json_disk.get('sdsId'),\n                    'serial_number': json_disk.get('id'),\n                    'capacity': int(capacity) * units.Ki,\n                    'health_score': status\n                }\n                disks_list.append(disk_map)\n            return disks_list\n        except exception.DelfinException as err:\n            err_msg = \"Get Storage disk error: %s\" % err.msg\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            LOG.error(\"Get Storage disk error: %s\", six.text_type(e))\n            raise exception.InvalidResults(e)\n\n    def list_alerts(self, query_para=None):\n        alert_list = []\n        try:\n            storage_alert = self.get_rest_info(consts.REST_SCALEIO_ALERT)\n            alert_description_map = alert_consts.ALERT_MAP\n            for json_alert in (storage_alert or []):\n                match_key = json_alert.get('id') + json_alert.get('name')\n                occur_time = json_alert.get('startTime')\n                datetime_obj = datetime.datetime.strptime(\n                    occur_time, consts.DATETIME_UTC_FORMAT)\n                alert_time = int(time.mktime(datetime_obj.timetuple()) *\n                                 consts.DEFAULT_ALERTS_TIME_CONVERSION\n                                 + datetime_obj.microsecond /\n                                 consts.DEFAULT_ALERTS_TIME_CONVERSION)\n                alert_type_desc = json_alert.get('alertType')\n                alert_type_desc = alert_type_desc.lower().replace('_', ' ')\n                if not alert_util.is_alert_in_time_range(query_para,\n                                                         alert_time):\n                    continue\n                alert_severity = json_alert.get('severity')\n                if 'LOW' in alert_severity:\n                    alert_severity = constants.Severity.MINOR\n                elif 'MEDIUM' in alert_severity:\n                    alert_severity = constants.Severity.CRITICAL\n                elif 'HIGH' in alert_severity:\n                    alert_severity = constants.Severity.FATAL\n                alert_type = json_alert.get('alertType')\n                alert_model = {\n                    'alert_id': json_alert.get('id'),\n                    'alert_name': alert_type + json_alert.get('name'),\n                    'severity': alert_severity,\n                    'category': constants.Category.FAULT,\n                    'type': alert_type,\n                    'sequence_number': json_alert.get('uuid'),\n                    'description': alert_description_map.get(\n                        json_alert.get('alertType'), alert_type_desc),\n                    'occur_time': alert_time,\n                    'match_key': hashlib.md5(\n                        match_key.encode()).hexdigest()\n                }\n                alert_list.append(alert_model)\n            return alert_list\n        except exception.DelfinException as err:\n            err_msg = \"Get Storage alerts error: %s\" % err.msg\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            LOG.error(\"Get Storage alerts error: %s\", six.text_type(e))\n            raise exception.InvalidResults(e)\n\n    def list_storage_host_initiators(self, storage_id):\n        initiators_list = []\n        try:\n            storage_initiators = self.get_rest_info(\n                consts.REST_SCALIO_INITIIATORS)\n            list_host = self.list_storage_hosts(storage_id)\n            for initiators_json in (storage_initiators or []):\n                status = initiators_json.get('sdsState')\n                initiators_id = initiators_json.get('id')\n                initiators_type = constants.InitiatorType.UNKNOWN\n                if 'iscsi' in initiators_json.get('perfProfile'):\n                    initiators_type = constants.InitiatorType.ISCSI\n                if 'Normal' == status:\n                    status = constants.InitiatorStatus.ONLINE\n                elif 'Disconnected' == status:\n                    status = constants.InitiatorStatus.OFFLINE\n                ip_list = initiators_json.get('ipList')\n                native_storage_host_id = None\n                for ip_data in ip_list:\n                    sds_ip = ip_data.get('ip')\n                    for host_json in list_host:\n                        ip_address = host_json.get('ip_address')\n                        if sds_ip == ip_address:\n                            native_storage_host_id = \\\n                                host_json.get('native_storage_host_id')\n                initiators_dict = {\n                    \"name\": initiators_json.get('name'),\n                    \"storage_id\": storage_id,\n                    \"native_storage_host_initiator_id\": initiators_id,\n                    \"wwn\": initiators_id,\n                    \"type\": initiators_type,\n                    \"status\": status,\n                    \"native_storage_host_id\": native_storage_host_id,\n                }\n                initiators_list.append(initiators_dict)\n            return initiators_list\n        except exception.DelfinException as err:\n            err_msg = \"Get Storage initiators error: %s\" % err.msg\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            LOG.error(\"Get Storage initiators error: %s\", six.text_type(e))\n            raise exception.InvalidResults(e)\n\n    def list_storage_hosts(self, storage_id):\n        host_list = []\n        try:\n            storage_hosts = self.get_rest_info(consts.REST_SCALIO_HOSTS)\n            for host_json in (storage_hosts or []):\n                status = host_json.get('mdmConnectionState')\n                if 'Connected' == status:\n                    status = constants.HostStatus.NORMAL\n                elif 'Disconnected' == status:\n                    status = constants.HostStatus.OFFLINE\n                ip_address = host_json.get('sdcIp')\n                soft_version = host_json.get('softwareVersionInfo')\n                host_dict = {\n                    \"name\": host_json.get('sdcGuid'),\n                    \"description\": ip_address + soft_version,\n                    \"storage_id\": storage_id,\n                    \"native_storage_host_id\":\n                        host_json.get('id'),\n                    \"os_type\": host_json.get('osType'),\n                    \"status\": status,\n                    \"ip_address\": ip_address\n                }\n                host_list.append(host_dict)\n            return host_list\n        except exception.DelfinException as err:\n            err_msg = \"Get Storage hosts error: %s\" % err.msg\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            LOG.error(\"Get Storage hosts error: %s\", six.text_type(e))\n            raise exception.InvalidResults(e)\n\n    def list_masking_views(self, storage_id):\n        list_masking_views_list = []\n        try:\n            storage_view = self.get_rest_info(consts.REST_SCALEIO_VOLUMES)\n            for map_json in (storage_view or []):\n                view_name = map_json.get('name')\n                volume_id = map_json.get('id')\n                map_sdc_list = map_json.get('mappedSdcInfo')\n                if map_sdc_list:\n                    for map_sdc in map_sdc_list:\n                        sdc_id = map_sdc.get('sdcId')\n                        view_map = {\n                            \"name\": view_name + sdc_id + volume_id,\n                            \"description\": view_name,\n                            \"storage_id\": storage_id,\n                            \"native_masking_view_id\":\n                                view_name + sdc_id + volume_id,\n                            'native_volume_id': volume_id,\n                            'native_storage_host_id': sdc_id\n                        }\n                        list_masking_views_list.append(view_map)\n            return list_masking_views_list\n        except exception.DelfinException as err:\n            err_msg = \"Get Storage Views Error: %s\" % err.msg\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            LOG.error(\"Get Storage Views Error: %s\", six.text_type(e))\n            raise exception.InvalidResults(e)\n\n    @staticmethod\n    def parse_alert(alert):\n        alert_model = dict()\n        try:\n            alert_dict = alert.split(' ')\n            for alert_json in alert_dict:\n                alert_detail = alert_json.split('=')[1]\n                if consts.OID_SEVERITY in alert_json:\n                    severity = consts.TRAP_ALERT_MAP.get(\n                        alert_detail, constants.Severity.INFORMATIONAL)\n                    alert_model['severity'] = severity\n                elif consts.OID_EVENT_ID in alert_json:\n                    alert_model['alert_name'] = alert_detail.replace('\\\"', '')\n                elif consts.OID_EVENT_TYPE in alert_json:\n                    alert_desc = alert_detail.split('.')[2].lower().replace(\n                        '_', ' ')\n                    alert_model['description'] = alert_desc\n                    alert_model['location'] = alert_desc\n                elif consts.OID_ERR_ID in alert_json:\n                    alert_model['alert_id'] = str(\n                        alert_detail.replace('\\\"', ''))\n                alert_model['category'] = constants.Category.FAULT\n                alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n                now = time.time()\n                alert_model['occur_time'] = \\\n                    int(round(now * consts.DEFAULT_ALERTS_TIME_CONVERSION))\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = \"Failed to build alert model: %s.\" % (six.text_type(e))\n            raise exception.InvalidResults(msg)\n\n    def get_rest_info(self, url, data=None, method='GET'):\n        if 'login' == data:\n            self.session.auth = requests.auth.HTTPBasicAuth(\n                self.rest_username, cryptor.decode(self.rest_password))\n        else:\n            self.login()\n            self.session.auth = requests.auth.HTTPBasicAuth(\n                self.rest_username, self.rest_auth_token)\n        res = self.do_call(url, data, method)\n        try:\n            if res.status_code == 200:\n                result_json = json.loads(res.text)\n            elif res.status_code == 500:\n                LOG.error('Connect Timeout error')\n                raise exception.ConnectTimeout()\n            elif res.status_code == 401:\n                LOG.error('User authentication failed')\n                raise exception.InvalidUsernameOrPassword\n            else:\n                raise exception.BadResponse()\n        except Exception as err:\n            LOG.exception('Get RestHandler.call failed: %(url)s.'\n                          ' Error: %(err)s', {'url': url, 'err': err})\n            raise exception.InvalidResults(err)\n        return result_json\n"
  },
  {
    "path": "delfin/drivers/dell_emc/scaleio/scaleio_stor.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#\n# http:#www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom oslo_log import log\nfrom delfin.drivers import driver\nfrom delfin.drivers.dell_emc.scaleio import rest_handler\nfrom delfin.drivers.dell_emc.scaleio.rest_handler import RestHandler\n\nLOG = log.getLogger(__name__)\n\n\nclass ScaleioStorageDriver(driver.StorageDriver):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.rest_handler = rest_handler.RestHandler(**kwargs)\n        self.rest_handler.logout()\n        self.rest_handler.verify = kwargs.get('verify', False)\n        self.rest_handler.login()\n\n    def reset_connection(self, context, **kwargs):\n        self.rest_handler.logout()\n        self.rest_handler.verify = kwargs.get('verify', False)\n        return self.rest_handler.login()\n\n    def get_storage(self, context):\n        return self.rest_handler.get_storage(self.storage_id)\n\n    def list_storage_pools(self, context):\n        return self.rest_handler.list_storage_pools(self.storage_id)\n\n    def list_volumes(self, context):\n        return self.rest_handler.list_volumes(self.storage_id)\n\n    def list_disks(self, context):\n        return self.rest_handler.list_disks(self.storage_id)\n\n    def list_alerts(self, context, query_para=None):\n        return self.rest_handler.list_alerts(query_para)\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return RestHandler.parse_alert(alert)\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def clear_alert(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    def list_storage_host_initiators(self, context):\n        return self.rest_handler.list_storage_host_initiators(self.storage_id)\n\n    def list_storage_hosts(self, context):\n        return self.rest_handler.list_storage_hosts(self.storage_id)\n\n    def list_masking_views(self, context):\n        return self.rest_handler.list_masking_views(self.storage_id)\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n"
  },
  {
    "path": "delfin/drivers/dell_emc/unity/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/dell_emc/unity/alert_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hashlib\nimport time\n\nimport six\nfrom oslo_log import log\n\nfrom delfin import exception, utils\nfrom delfin.common import alert_util\nfrom delfin.common import constants\nfrom delfin.drivers.dell_emc.unity import consts\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertHandler(object):\n\n    OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'\n    OID_NODE = '1.3.6.1.4.1.1139.103.1.18.1.1'\n    OID_COMPONENT = '1.3.6.1.4.1.1139.103.1.18.1.2'\n    OID_SYMPTOMID = '1.3.6.1.4.1.1139.103.1.18.1.3'\n    OID_SYMPTOMTEXT = '1.3.6.1.4.1.1139.103.1.18.1.4'\n    ALERT_LEVEL_MAP = {0: constants.Severity.CRITICAL,\n                       1: constants.Severity.CRITICAL,\n                       2: constants.Severity.CRITICAL,\n                       3: constants.Severity.MAJOR,\n                       4: constants.Severity.WARNING,\n                       5: constants.Severity.FATAL,\n                       6: constants.Severity.INFORMATIONAL,\n                       7: constants.Severity.NOT_SPECIFIED\n                       }\n    TRAP_LEVEL_MAP = {'1.3.6.1.4.1.1139.103.1.18.2.0':\n                      constants.Severity.CRITICAL,\n                      '1.3.6.1.4.1.1139.103.1.18.2.1':\n                      constants.Severity.CRITICAL,\n                      '1.3.6.1.4.1.1139.103.1.18.2.2':\n                      constants.Severity.CRITICAL,\n                      '1.3.6.1.4.1.1139.103.1.18.2.3':\n                      constants.Severity.MAJOR,\n                      '1.3.6.1.4.1.1139.103.1.18.2.4':\n                      constants.Severity.WARNING,\n                      '1.3.6.1.4.1.1139.103.1.18.2.5':\n                      constants.Severity.FATAL,\n                      '1.3.6.1.4.1.1139.103.1.18.2.6':\n                      constants.Severity.INFORMATIONAL,\n                      '1.3.6.1.4.1.1139.103.1.18.2.7':\n                      constants.Severity.NOT_SPECIFIED\n                      }\n    SECONDS_TO_MS = 1000\n    SECONDS_PER_HOUR = 60 * 60\n    STATE_SOLVED = 2\n    TIME_PATTERN = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\n    @staticmethod\n    def parse_alert(context, alert):\n        try:\n            alert_model = dict()\n            alert_model['alert_id'] = alert.get(AlertHandler.OID_SYMPTOMID)\n            trap_map_desc = consts.TRAP_DESC.get(\n                alert.get(AlertHandler.OID_SYMPTOMID))\n            if trap_map_desc:\n                alert_desc = trap_map_desc[2]\n            else:\n                alert_desc = alert.get(AlertHandler.OID_SYMPTOMTEXT)\n            alert_model['alert_name'] = alert.get(AlertHandler.OID_SYMPTOMTEXT)\n            alert_model['severity'] = AlertHandler.TRAP_LEVEL_MAP.get(\n                alert.get(AlertHandler.OID_SEVERITY),\n                constants.Severity.INFORMATIONAL)\n            alert_model['category'] = constants.Category.FAULT\n            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n            occur_time = utils.utcnow_ms()\n            alert_model['occur_time'] = occur_time\n            alert_model['description'] = alert_desc\n            alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n            alert_model['location'] = alert.get(AlertHandler.OID_NODE)\n            alert_model['match_key'] = hashlib.md5(alert.get(\n                AlertHandler.OID_SYMPTOMTEXT).encode()).hexdigest()\n\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (_(\"Failed to build alert model as some attributes missing\"))\n            raise exception.InvalidResults(msg)\n\n    def parse_queried_alerts(self, alert_model_list, alert_dict, query_para):\n        alerts = alert_dict.get('entries')\n        for alert in alerts:\n            try:\n                content = alert.get('content', {})\n                if content.get('state') == AlertHandler.STATE_SOLVED:\n                    continue\n                occur_time = int(time.mktime(time.strptime(\n                    content.get('timestamp'),\n                    self.TIME_PATTERN)))\n                hour_offset = (time.mktime(time.localtime()) - time.mktime(\n                    time.gmtime())) / AlertHandler.SECONDS_PER_HOUR\n                occur_time = occur_time + (int(hour_offset) *\n                                           AlertHandler.SECONDS_PER_HOUR)\n                if not alert_util.is_alert_in_time_range(\n                        query_para, int(occur_time *\n                                        AlertHandler.SECONDS_TO_MS)):\n                    continue\n                alert_model = {}\n                location = ''\n                resource_type = constants.DEFAULT_RESOURCE_TYPE\n                if content.get('component'):\n                    location = content.get('component').get('id')\n                alert_model['alert_id'] = content.get('messageId')\n                alert_model['alert_name'] = content.get('message')\n                alert_model['severity'] = self.ALERT_LEVEL_MAP.get(\n                    content.get('severity'),\n                    constants.Severity.INFORMATIONAL)\n                alert_model['category'] = constants.Category.FAULT\n                alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n                alert_model['sequence_number'] = content.get('id')\n                alert_model['occur_time'] = int(occur_time *\n                                                AlertHandler.SECONDS_TO_MS)\n                alert_model['description'] = content.get('description')\n                alert_model['resource_type'] = resource_type\n                alert_model['location'] = location\n                alert_model['match_key'] = hashlib.md5(\n                    content.get('message').encode()).hexdigest()\n                if alert_model['severity'] == 'Informational':\n                    continue\n                alert_model_list.append(alert_model)\n            except Exception as e:\n                LOG.error(e)\n                err_msg = \"Failed to build alert model as some attributes \" \\\n                          \"missing in queried alerts: %s\" % (six.text_type(e))\n                raise exception.InvalidResults(err_msg)\n"
  },
  {
    "path": "delfin/drivers/dell_emc/unity/consts.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nDEFAULT_TIMEOUT = 10\nALERT_TIMEOUT = 20\nREST_RETRY_TIMES = 1\nTRAP_DESC = {\n    \"1:127486a\": [\"WARNING\", \"ALRT_LCC_FW_UPGRADE_FAILED\",\n                  \"The link control card (LCC) will continue to function with \"\n                  \"older versions of the software. The next time the affected \"\n                  \"Storage Processor (SP) reboots, the firmware will attempt \"\n                  \"the upgrade again.\"],\n    \"1:127486b\": [\"WARNING\", \"ALRT_LCC_FW_UPGRADE_FAILED\",\n                  \"The link control card (LCC) will continue to function with \"\n                  \"older versions of the software. The next time the affected \"\n                  \"Storage Processor (SP) reboots, the firmware will attempt \"\n                  \"the upgrade again.\"],\n    \"1:1278982\": [\"ERROR\", \"ALRT_DAE_INVALID_DRIVE\",\n                  \"There is an invalid disk in the Disk Array Enclosure (DAE).\"\n                  \" Replace the disk with the correct type.\"],\n    \"1:12dc501\": [\"CRITICAL\", \"ALRT_SYS_POOL_OFFLINE\",\n                  \"An internal system service is offline. Some system \"\n                  \"capabilities may not be available. Contact your service \"\n                  \"provider\"],\n    \"1:12dcd00\": [\"CRITICAL\", \"ALRT_SYS_LUN_OFFLINE\",\n                  \"An internal system service required for metrics is offline.\"\n                  \"System metrics are not available. Contact your service \"\n                  \"provider.\"],\n    \"1:1670071\": [\"INFO\", \"ALRT_PBL_ENV_CLEARED\",\n                  \"The environmental interface failure has been resolved. \"\n                  \"No action is required.\"],\n    \"1:1678007\": [\"ERROR\", \"ALRT_PBL_ESP_ERROR_FUP_FAILED\",\n                  \"Firmware upgrade has failed. Contact \"\n                  \"your service provider.\"],\n    \"1:167800e\": [\"ERROR\", \"ALRT_PBL_ESP_ERROR_RESUME_PROM_READ_FAILED\",\n                  \"The Resume Prom Read operation has failed. Contact \"\n                  \"your service provider\"],\n    \"1:167803d\": [\"CRITICAL\", \"ALRT_SSD_PECYCLE_EXPIRE\",\n                  \"%2 is predicted to exceed drive specified write endurance \"\n                  \"in %3 days. It is recommended to replace the drive\"],\n    \"1:1678049\": [\"ERROR\", \"ALRT_PBL_ENV_FAILURE\",\n                  \"An environmental interface failure has been detected. Gath\"\n                  \"er diagnostic materials and contact your service \"\n                  \"provider.\"],\n    \"1:1678052\": [\"ERROR\", \"ALRT_PBL_ESP_ERROR_LCC_COMPONENT_FAULTED\",\n                  \"Link Control Card (LCC) has faulted. This failure may be \"\n                  \"caused by a component other than the LCC. Replace the \"\n                  \"faulted disks first. If the problem persists, contact \"\n                  \"your service provider.\"],\n    \"1:167805f\": [\"ERROR\", \"ALRT_DRIVE_AFA_FAILED\",\n                  \"One of the system drives failed All-Flash check. Replace \"\n                  \"the non-Flash drive with a Flash drive.\"],\n    \"1:167c00a\": [\"ERROR\", \"ALRT_PBL_ESP_PEERSP_POST_FAIL\",\n                  \"The Storage Processor (SP) has faulted. Contact your \"\n                  \"service provider.\"],\n    \"1:1688028\": [\"ERROR\", \"ALRT_DISK_USER_DISK_IN_SYSTEM_SLOT\",\n                  \"A bind user disk has been inserted in a system disk slot. \"\n                  \"remove the disk and insert it in a user drive slot.\"],\n    \"1:1688029\": [\"ERROR\", \"ALRT_DISK_SYSTEM_DISK_IN_USER_SLOT\",\n                  \"A system disk has been inserted in a wrong slot. Remove the\"\n                  \" disk and insert it in a system disk slot.\"],\n    \"1:16d80c4\": [\"INFO\", \"ALRT_SNAPSHOT_INVALIDATION\",\n                  \"Snapshots have been automatically marked for deletion due \"\n                  \"to insufficient pool space.\"],\n    \"1:16f0077\": [\"INFO\", \"ALRT_MLU_UDI_RDWT_RESTORE\",\n                  \"The file system is now read-write, because free space in \"\n                  \"its pool has increased more than 12.5 GB.\"],\n    \"1:16f0078\": [\"INFO\", \"ALRT_MLU_UDI_SNAPS_OK\",\n                  \"The file system is no longer at risk of losing its \"\n                  \"snapshots, because there is now enough free space in Its \"\n                  \"associated pool.\"],\n    \"1:16f0079\": [\"INFO\", \"ALRT_MLU_UDI_ABOUT_NOT_NEEDING_FULL_SYNC\",\n                  \"The file system no longer needs a full synchronization for \"\n                  \"the associated replication session, because there is enough\"\n                  \" free space in the associated pool.\"],\n    \"1:16f4007\": [\"WARNING\",\n                  \"ALRT_FILESYSTEM_REACHED_CIFS_SHARE_COUNT_THRESHOLD\",\n                  \"Creation of the SMB share has exceeded the 90% threshold \"\n                  \"for the underlying file system or snapshot. Remove \"\n                  \"unnecessary SMB shares from the file system or snapshot.\"],\n    \"1:16f4008\": [\"WARNING\",\n                  \"ALRT_FILESYSTEM_REACHED_NFS_SHARE_COUNT_THRESHOLD\",\n                  \"Creation of the NFS share has exceeded the 90% threshold \"\n                  \"for the underlying file system or snapshot. Remove \"\n                  \"unnecessary NFS shares from the file system or snapshot.\"],\n    \"1:16f4009\": [\"WARNING\", \"ALRT_MLU_UDI_ABOUT_SNAP_INVALIDATION\",\n                  \"Pool containing the file system is low on free space, and \"\n                  \"the file system will lose all of its snapshots. To retain \"\n                  \"the snapshots, add more space to the pool, free up space \"\n                  \"from the pool, or use the CLI to change the file system's \"\n                  \"pool full policy to failWrites.\"],\n    \"1:16f400a\": [\"WARNING\", \"ALRT_MLU_UDI_ABOUT_NEEDING_FULL_SYNC\",\n                  \"Pool containing the file system is low on free space, so \"\n                  \"the associated replication session will need a full \"\n                  \"synchronization. To resolve this issue, add more space to \"\n                  \"the pool, free up space from the pool, or use the CLI to \"\n                  \"change the file system's pool full policy to failWrites.\"],\n    \"1:16f400b\": [\"WARNING\", \"ALRT_POOL_SPACE_LOW\",\n                  \"The pool space is low and the associated  file system is \"\n                  \"configured with a Fail Writes pool full  policy. When the \"\n                  \"pool reaches full capacity, any write operations to this \"\n                  \"file system  may fail. Change the pool full policy for the \"\n                  \"file system using the CLI or add more space to the pool.\"],\n    \"1:16f8319\": [\"ERROR\", \"ALRT_MLU_UDI_RD_ONLY_SNAP_INVALIDATION\",\n                  \"The file system is now read-only, because the pool's free \"\n                  \"space dropped below 4 GB, and its poolFullPolicy is set to \"\n                  \"failWrites. To make the file system read-write with this \"\n                  \"policy, add space to the pool or free up space until there \"\n                  \"is at least 12.5 GB of free space. Alternatively, use the \"\n                  \"CLI to change the file system's pool full policy to \"\n                  \"deleteAllSnaps.\"],\n    \"1:16fc000\": [\"ERROR\", \"ALRT_SYS_VDM_OFFLINE\",\n                  \"An internal system service is offline. Some system \"\n                  \"capabilities may not be available. Contact your service \"\n                  \"provider.\"],\n    \"1:1744002\": [\"WARNING\", \"ALRT_VVNX_VDISK_EXCEED_MAX_COUNT\",\n                  \"The maximum number of virtual disks is exceeded. One or \"\n                  \"more virtual disks will not be available unless you remove\"\n                  \" some existing virtual disks. Check the system \"\n                  \"log for details.\"],\n    \"1:174c001\": [\"CRITICAL\", \"ALRT_LXF_UNSUPPORTED_SCSI_CONTROLLER\",\n                  \"An unsupported virtual SCSI controller was added to the \"\n                  \"system. You should remove this controller, because it can \"\n                  \"cause problems on the next reboot.\"],\n    \"1:1760114\": [\"INFO\", \"ALRT_CBE_KEYSTORE_BACKUP_REQUIRED\",\n                  \"The Data at Rest Encryption keystore has been modified due \"\n                  \"to configuration changes on the array. It is very important\"\n                  \" to retrieve and save a copy of the keystore in order to \"\n                  \"secure your data on the array.\"],\n    \"1:1768001\": [\"ERROR\", \"ALRT_KMIP_SERVER_UNAVAILABLE\",\n                  \"A configured KMIP Server is either unavailable \"\n                  \"or misconfigured.\"],\n    \"1:1768002\": [\"ERROR\", \"ALRT_KMIP_SERVER_NO_ENCRYPTION_KEY\",\n                  \"ALRT_KMIP_SERVER_NO_ENCRYPTION_KEY\",\n                  \"A configured KMIP Server does not have the encryption \"\n                  \"key for this array\"],\n    \"10:10000\": [\"WARNING\", \"ALRT_FILESYSTEM_REACHED_CIFS_SHARE_MAX_COUNT\",\n                 \"A file system has reached a limit of maximum allowed number \"\n                 \"of SMB shares.\"],\n    \"10:10001\": [\"WARNING\", \"ALRT_FILESYSTEM_REACHED_NFS_SHARE_MAX_COUNT\",\n                 \"A file system has reached a limit of maximum allowed number\"\n                 \" of NFS shares\"],\n    \"12:104e0017\": [\"CRITICAL\", \"ALRT_LDAP_NO_CONNECT\",\n                    \"The system could not connect to the LDAP server. This \"\n                    \"impacts your ability to log into the system but does not\"\n                    \" impact data access.\"],\n    \"12:104f0003\": [\"ERROR\", \"ALRT_TIME_NOT_SYNCED\",\n                    \"There is a significant difference between the clock time\"\n                    \" of the Storage Processor (SP) and the Windows domain \"\n                    \"controller. To resolve time synchronization problems, \"\n                    \"you can set up a network time protocol (NTP) server or \"\n                    \"contact your Windows domain administrator.\"],\n    \"12:1074002f\": [\"CRITICAL\", \"ALRT_MS_DC_NO_CONNECT\",\n                    \"The system could not connect to the Microsoft Windows \"\n                    \"Domain Controller.\"],\n    \"12:10760024\": [\"CRITICAL\", \"ALRT_DNS_FAIL_PING\",\n                    \"The DNS server is not available on the network and the \"\n                    \"NX3e system could Not connect.\"],\n    \"12:10760025\": [\"CRITICAL\", \"ALRT_DNS_INVALID_CONFIG\",\n                    \"The system cannot connect to the DNS server. The DNS \"\n                    \"server may be configured Incorrectly.\"],\n    \"13:102b0001\": [\"ERROR\", \"ALRT_DUPLICATE_ADDRESS_FOUND\",\n                    \"A duplicate address was detected on the network. The \"\n                    \"address being configured cannot be used, because it is \"\n                    \"being used by another Node.\"],\n    \"13:102b0002\": [\"ERROR\", \"ALRT_DUPLICATE_ADDRESS_FOUND\",\n                    \"A duplicate address was detected on the network. The \"\n                    \"address being configured cannot be used, because it is \"\n                    \"being used by another node.\"],\n    \"13:10360005\": [\"WARNING\", \"ALRT_NAS_CA_CERT_EXPIRES_TODAY\",\n                    \"The CA certificate installed on the NAS server will \"\n                    \"expire today. This certificate is required to keep \"\n                    \"SSL-enabled services (such as LDAP with enabled SSL \"\n                    \"security and CA certificate validation) functioning. \"\n                    \"Upon certificate expiration, users may lose access to \"\n                    \"shares on the NAS server, especially when multiprotocol \"\n                    \"sharing is enabled. Contact the system administrator to \"\n                    \"renew the CA certificate, and then upload it to the \"\n                    \"NAS server.\"],\n    \"13:10360007\": [\"WARNING\", \"ALRT_NAS_CA_CERT_EXPIRES_IN_ONE_WEEK\",\n                    \"The CA certificate installed on the NAS server will \"\n                    \"expire in one week. This certificate is required to keep \"\n                    \"SSL-enabled services (such as LDAP with enabled SSL \"\n                    \"security and CA certificate validation) functioning. \"\n                    \"Once it expires, users may lose access to shares on the\"\n                    \" NAS server, especially when multiprotocol sharing is \"\n                    \"enabled. Contact the system administrator to renew the \"\n                    \"CA certificate, and then upload it to the NAS server.\"],\n    \"13:10360008\": [\"ERROR\", \"ALRT_NAS_CA_CERT_HAS_EXPIRED\",\n                    \"The CA certificate installed on the NAS server has \"\n                    \"expired. Services that use this certificate to \"\n                    \"validate remote hosts (such as LDAP with enabled SSL \"\n                    \"security and CA certificate validation) will not \"\n                    \"function properly, and corresponding SSL connections \"\n                    \"will be rejected. Users may lose access to shares on \"\n                    \"the NAS server, especially when multiprotocol sharing \"\n                    \"is enabled. Contact the system administrator to renew \"\n                    \"the CA certificate, and then upload it to the \"\n                    \"NAS server.\"],\n    \"13:10360009\": [\"INFO\", \"ALRT_NAS_CA_CERT_EXPIRES_IN_30_DAYS\",\n                    \"The CA certificate installed on the NAS server will \"\n                    \"expire in 30 days. This certificate is required to \"\n                    \"keep SSL-enabled services (such as LDAP with enabled \"\n                    \"SSL security and CA certificate validation) functioning.\"\n                    \" Upon certificate expiration, users may lose access to \"\n                    \"shares on the NAS server, especially when multiprotocol\"\n                    \" sharing is enabled. Contact the system administrator to\"\n                    \" renew the CA certificate, and then upload it to the \"\n                    \"NAS server.\"],\n    \"13:1040003c\": [\"WARNING\", \"ALRT_BLOCK_USER_SOFTQUOTA\",\n                    \"You have used too much space in the specified file system\"\n                    \" and should delete unwanted files and directories from it\"\n                    \". Alternatively, the administrator can increase your soft\"\n                    \" quota limit for the file system.\"],\n    \"13:1040003d\": [\"ERROR\", \"ALRT_BLOCK_USER_SOFTQUOTA_EXPIRED\",\n                    \"You have used too much space in the specified file system\"\n                    \" and will no longer be able to write to the file sysetem\"\n                    \" unless you delete unwanted files and directories from it\"\n                    \". Alternatively, the administrator can increase your soft\"\n                    \" quota limit for the file system.\"],\n    \"13:1040003e\": [\"ERROR\", \"ALRT_BLOCK_USER_HARDQUOTA\",\n                    \"You have used too much space in the specified file system\"\n                    \" and will no longer be able to write to it unless you\"\n                    \" delete unwanted files and directories to reduce the\"\n                    \" percentage of used space. Alternatively, the \"\n                    \"administrator can increase your hard quota limit for\"\n                    \" the file system.\"],\n    \"13:1040003f\": [\"WARNING\", \"ALRT_BLOCK_USER_SOFTQUOTA_CROSSEDWITHINTREE\",\n                    \"You have used too much space in the specified quota tree\"\n                    \" and should delete unwanted files and directories from\"\n                    \" the tree. Alternatively, the administrator can increase\"\n                    \" your soft quota limit for the quota tree.\"],\n    \"13:10400040\": [\"ERROR\",\n                    \"ALRT_BLOCK_USER_SOFTQUOTACROSSED_GRACEEXPIREDWITHINTREE\",\n                    \"You have used too much space in the specified quota tree\"\n                    \" and will no longer be able to write to it unless you\"\n                    \" delete unwanted files and directories to reduce the \"\n                    \"percentage of used space. Alternatively, the \"\n                    \"administrator can increase your soft quota limit for\"\n                    \" that quota tree.\"],\n    \"13:10400041\": [\"ERROR\", \"ALRT_BLOCK_USER_HARDQUOTAEXCEEDEDWITHINTREE\",\n                    \"You have used too much space in the specified quota tree\"\n                    \" and will no longer be able to write to it unless you\"\n                    \" delete unwanted files and directories to reduce the\"\n                    \" percentage of used space. Alternatively, the \"\n                    \"administrator can increase your hard quota limit for \"\n                    \"the quota tree.\"],\n    \"13:10400042\": [\"WARNING\", \"ALRT_BLOCK_TREESOFTQUOTACROSSED\",\n                    \"Too much space has been consumed on the specified quota\"\n                    \" tree. You should delete unwanted files and directories\"\n                    \" from the quota tree. Alternatively, the administrator\"\n                    \" can increase the soft quota limit for the quota tree.\"],\n    \"13:10400043\": [\"ERROR\", \"ALRT_BLOCK_TREESOFTQUOTACROSSED_GRACEEXPIRED\",\n                    \"Too much space has been consumed on the specified quota\"\n                    \" tree. Users will no longer be able to write to the quota\"\n                    \" tree unless they delete unwanted files and directories \"\n                    \"from it. Alternatively, the administrator can increase \"\n                    \"the soft quota limit for the quota tree.\"],\n    \"13:10400044\": [\"ERROR\", \"ALRT_BLOCK_TREEHARDQUOTAEXCEEDED\",\n                    \"Too much space has been consumed on the specified quota\"\n                    \" tree. Users will no longer be able to write to the quota\"\n                    \" tree unless they delete unwanted files and directories\"\n                    \" from it. Alternatively, the administrator can increase\"\n                    \" the hard quota limit for the quota tree.\"],\n    \"13:10400045\": [\"WARNING\", \"ALRT_BLOCK_TREESOFTQUOTA_AGGREGATION\",\n                    \"Too much space has been consumed on the specified quota\"\n                    \" tree. You should delete unwanted files and directories\"\n                    \" from the quota tree. Alternatively, the administrator\"\n                    \" can increase the soft quota limit for the quota tree.\"],\n    \"13:10400046\": [\"ERROR\", \"ALRT_BLOCK_TREEHARDQUOTA_AGGREGATION\",\n                    \"Too much space has been consumed on the specified quota\"\n                    \" tree. Users will no longer be able to write to the\"\n                    \" quota tree unless they delete unwanted files and \"\n                    \"directories from it. Alternatively, the administrator \"\n                    \"can increase the hard quota limit for the quota tree.\"],\n    \"13:10400047\": [\"WARNING\", \"ALRT_BLOCK_USERSOFTQUOTA_AGGREGATION\",\n                    \"You have used too much space in the specified file\"\n                    \" system and should delete unwanted files and directories\"\n                    \" from it. Alternatively, the administrator can increase\"\n                    \" your soft quota limit for the file system.\"],\n    \"13:10400048\": [\"ERROR\", \"ALRT_BLOCK_USERHARDQUOTA_AGGREGATION\",\n                    \"You have used too much space in the specified file system\"\n                    \" and will no longer be able to write to the file system\"\n                    \" unless you delete unwanted files and directories from it\"\n                    \". Alternatively, the administrator can increase your \"\n                    \"quota limits for the file system.\"],\n    \"13:10400049\": [\"WARNING\",\n                    \"ALRT_BLOCK_USERSOFTQUOTAWITHINTREE_AGGREGATION\",\n                    \"You have used too much space in the specified quota tree\"\n                    \" and should delete unwanted files and directories from it\"\n                    \". Alternatively, the administrator can increase your soft\"\n                    \" quota limit for the quota tree.\"],\n    \"13:1040004a\": [\"ERROR\", \"ALRT_BLOCK_USERHARDQUOTAWITHINTREE_AGGREGATION\",\n                    \"You have used too much space in the specified quota tree\"\n                    \" and will no longer be able to write to the quota tree\"\n                    \" unless you delete unwanted files and directories from it\"\n                    \". Alternatively, the administrator can increase your\"\n                    \" quota limits for the quota tree.\"],\n    \"13:10490005\": [\"ERROR\", \"ALRT_NAS_NIS_UNREACHABLE\",\n                    \"The Network Information Service (NIS) configured for the\"\n                    \" NAS server was unable to provide user mapping \"\n                    \"information and is not responding. Check the availability\"\n                    \" of the NIS server, and ensure that the domain name and \"\n                    \"addresses used for the server are accurate.\"],\n    \"13:104e0005\": [\"ERROR\", \"ALRT_NAS_LDAP_ALL_UNREACHABLE\",\n                    \"The LDAP service configured for the NAS server was unable\"\n                    \" to provide user mapping information and is no longer \"\n                    \"responding. At least one configured LDAP server needs to \"\n                    \"be operational. Check the availability of the LDAP \"\n                    \"servers, and look for connectivity Issues.\"],\n    \"13:104e0007\": [\"WARNING\", \"ALRT_NAS_LDAP_BAD_CONFIGURATION\",\n                    \"The LDAP client settings on the NAS server are not \"\n                    \"configured correctly for the domain. You may encounter\"\n                    \" unexpected issues or mapping errors when using LDAP as a\"\n                    \" Unix directory service. Verify account settings. Check \"\n                    \"the binding and access permissions for the \"\n                    \"configured LDAP servers.\"],\n    \"13:104f0001\": [\"ERROR\", \"ALRT_NAS_CIFSSERVER_TIMENOTSYNC\",\n                    \"The current system time is not synchronized with the \"\n                    \"Active Directory controller of the domain. Check the \"\n                    \"system NTP (Network Time Protocol) settings to ensure the\"\n                    \" your system's time is synchronized with the time of \"\n                    \"the Active Directory controller.\"],\n    \"13:10510004\": [\"CRITICAL\", \"ALRT_VIRUS_CHECKER_NO_CONNECT\",\n                    \"The system could not connect to your virus Checker\"\n                    \" server.\"],\n    \"13:10510005\": [\"ERROR\", \"ALRT_VC_ERROR_STOPCIFS\",\n                    \"No virus checker server is available. SMB has stopped and\"\n                    \" cannot resume until a virus checker server becomes \"\n                    \"available. Check the status of the network and the virus\"\n                    \" checker servers.\"],\n    \"13:10510006\": [\"ERROR\", \"ALRT_VC_ERROR_STOPVC\",\n                    \"The virus checker server is not available. Virus checking\"\n                    \" is paused and cannot resume until a virus checker server\"\n                    \" becomes available. Check the status of the network and\"\n                    \" the virus checker servers.\"],\n    \"13:1051000b\": [\"CRITICAL\", \"ALRT_VIRUS_SCAN_CMPLTE\",\n                    \"The antivirus scan has completed successfully.\"],\n    \"13:1051000c\": [\"CRITICAL\", \"ALRT_VIRUS_SCAN_FAIL\",\n                    \"Antivirus scanning has aborted.        .\"],\n    \"13:1051000d\": [\"CRITICAL\", \"ALRT_VC_FILE_DELETE\",\n                    \"An infected file was detected and deleted by your \"\n                    \"antivirus application\"],\n    \"13:1051000e\": [\"CRITICAL\", \"ALRT_VC_FILE_RENAMED\",\n                    \"An infected file was detected and renamed by your\"\n                    \" antivirus application.\"],\n    \"13:1051000f\": [\"CRITICAL\", \"ALRT_VC_FILE_MOD\",\n                    \"An infected file was detected and modified by your \"\n                    \"antivirus application.\"],\n    \"13:1051001e\": [\"ERROR\", \"ALRT_VC_ERROR_SERVER_OFFLINE_MSRPC\",\n                    \"The system could not connect to your virus checker server\"\n                    \". Check the status of the network and the virus checker\"\n                    \" server.\"],\n    \"13:10510021\": [\"ERROR\", \"ALRT_VC_ERROR_SERVER_OFFLINE_MSRPC_WIN\",\n                    \"The system could not connect to your virus checker server\"\n                    \". Check the status of the network and the virus checker\"\n                    \" server.\"],\n    \"13:10510022\": [\"ERROR\", \"ALRT_VC_ERROR_SERVER_OFFLINE_HTTP\",\n                    \"The system could not connect to the virus checker server.\"\n                    \" Check the status of the network and the virus checker\"\n                    \" server.\"],\n    \"13:10600002\": [\"WARNING\", \"DHSM_CONNECTION_DOWN\",\n                    \"A Distributed Hierarchical Storage Management (DHSM) \"\n                    \"connection to a secondary storage is down. Make sure that\"\n                    \": 1) The secondary storage is up and running on the \"\n                    \"correct port. 2) The DHSM settings (URL, remote port, \"\n                    \"credentials) are Correct.\"],\n    \"13:10600003\": [\"INFO\", \"DHSM_CONNECTION_RESUMED\",\n                    \"A Distributed Hierarchical Storage Management (DHSM) \"\n                    \"connection to a secondary storage has resumed. It is now\"\n                    \" operational.\"],\n    \"13:106c004b\": [\"ERROR\", \"ALRT_REP_FAILED_FOR_ATTACHED_SNAPSHOT\",\n                    \"The system cannot replicate an attached snapshot. Detach\"\n                    \" the snapshot. When the detach operation completes, try\"\n                    \" to replicate the snapshot again.\"],\n    \"13:106c004c\": [\"ERROR\",\n                    \"ALRT_REP_FAILED_FOR_SNAPSHOT_WITH_SHARES_OR_EXPORTS\",\n                    \"The system cannot replicate a snapshot that has shares or\"\n                    \" exports. Delete the shares and exports, and try to \"\n                    \"replicate the snapshot again.\"],\n    \"13:10760001\": [\"CRITICAL\", \"ALRT_DNS_NO_CONNECT\",\n                    \"The system could not connect to the DNS server. This may\"\n                    \" be the result of the DNS settings being Incorrect.\"],\n    \"13:1092000f\": [\"NOTICE\", \"CEPP_STARTED\",\n                    \"The events publishing service is running on the specified\"\n                    \" NAS server.\"],\n    \"13:10920010\": [\"NOTICE\", \"CEPP_STOPPED\",\n                    \"The events publishing service is no longer running on the\"\n                    \" specified NAS server. Events are no longer being sent to\"\n                    \" the CEPA servers.\"],\n    \"13:10920011\": [\"INFO\", \"CEPP_SERVER_ONLINE\",\n                    \"The specified CEPA server is operational.\"],\n    \"13:10920012\": [\"ERROR\", \"CEPP_SERVER_OFFLINE0\",\n                    \"The specified CEPA server is not operational. Verify: 1)\"\n                    \" Network availability and the CEPA facility is running on\"\n                    \" the CEPA server. 2) That a pool has at least one event \"\n                    \"assigned. 3) That the Events Publishing service is \"\n                    \"running. 4) Network integrity between the SMB server \"\n                    \"and the CEPA server.\"],\n    \"13:10920013\": [\"ERROR\", \"CEPP_SERVER_OFFLINENT\",\n                    \"The specified CEPA server is not operational. Verify: 1)\"\n                    \" Network availability and the CEPA facility is running on\"\n                    \" the CEPA server. 2) That a pool has at least one event \"\n                    \"assigned. 3) That the Events Publishing service is \"\n                    \"running. 4) Network integrity between the SMB server and\"\n                    \" the CEPA server.\"],\n    \"13:10920014\": [\"ERROR\", \"CEPP_SERVER_OFFLINEHTTP\",\n                    \"The specified CEPA server is not operational. Verify: 1)\"\n                    \" Network availability and the CEPA facility is running on\"\n                    \" the CEPA server. 2) That a pool has at least one event \"\n                    \"assigned. 3) That the Events Publishing service is \"\n                    \"running. 4) Network integrity between the SMB server and \"\n                    \"the CEPA server.\"],\n    \"13:10920015\": [\"ERROR\", \"CEPP_CIFS_SUSPENDED\",\n                    \"The SMB service was suspended by the events publishing \"\n                    \"service. The specified pool does not contain at least one\"\n                    \" online CEPA server, and an events policy is in effect. \"\n                    \"Make sure at least one CEPA server is online for this \"\n                    \"pool, or set the events policy to 'Ignore'.\"],\n    \"13:10920016\": [\"NOTICE\", \"CEPP_CIFS_RESUME\",\n                    \"The SMB service is no longer suspended by the events \"\n                    \"publishing service. There is either at least one online \"\n                    \"CEPA server in the pool, or the events policy was set to \"\n                    \"'Ignore'.\"],\n    \"13:10940002\": [\"WARNING\", \"ALRT_DEDUP_NO_SPACE\",\n                    \"There is insufficient space available to complete \"\n                    \"deduplication. You need to allocate additional space.\"],\n    \"13:10940066\": [\"WARNING\", \"ALRT_DEDUP_NO_PROT_SPACE\",\n                    \"There is insufficient space available to complete \"\n                    \"deduplication. You need to allocate additional \"\n                    \"protection space.\"],\n    \"13:10940068\": [\"INFO\", \"ALRT_DEDUP_FS_FAILED\",\n                    \"The deduplication process on the specified file system \"\n                    \"failed. This may have occurred because of insufficient \"\n                    \"disk space or other system resource issues. Check any \"\n                    \"related alerts and fix the underlying problems. If the \"\n                    \"problem persists, contact your service provider.\"],\n    \"13:10ad0001\": [\"WARNING\", \"ALRT_NO_DEFAULT_UNIX_ACCOUNT\",\n                    \"A Windows user was unable to access a multiprotocol file\"\n                    \" system that has a Unix access policy. Create a valid \"\n                    \"default Unix user for the associated NAS server, or map \"\n                    \"the Windows user to a valid Unix user.\"],\n    \"13:10ad0002\": [\"WARNING\", \"ALRT_NO_DEFAULT_WIN_ACCOUNT\",\n                    \"A Unix user was unable to access a multiprotocol file \"\n                    \"system that has a Windows access policy. Create a valid \"\n                    \"default Windows user for the associated NAS server, or \"\n                    \"map the Unix user to a valid Windows user.\"],\n    \"13:10ad0003\": [\"WARNING\", \"ALRT_INVALID_DEFAULT_WINDOWS_ACCOUNT\",\n                    \"A Unix user mapped to a default Windows user was unable \"\n                    \"to access a multiprotocol file system with a Windows \"\n                    \"access policy.\"],\n    \"13:10ad0004\": [\"WARNING\", \"ALRT_INVALID_DEFAULT_UNIX_ACCOUNT\",\n                    \"A Windows user was unable to access a multiprotocol file \"\n                    \"system because the default Unix user for the associated \"\n                    \"NAS server is invalid. Change the default Unix user to a \"\n                    \"valid user from the Unix directory service, or map the \"\n                    \"Windows user to a valid Unix user.\"],\n    \"13:10ad0005\": [\"ERROR\", \"ALRT_NAS_UNIX_USER_MAPPING_ERR\",\n                    \"User mapping failed. The Unix username cannot be mapped \"\n                    \"to a Windows username. Specify a valid Windows username \"\n                    \"to allow the Unix users to access the Windows- based \"\n                    \"file systems.\"],\n    \"13:10ad0007\": [\"ERROR\", \"ALRT_NAS_WIN_USER_MAPPING_ERR\",\n                    \"An SMB session cannot be established because the Windows \"\n                    \"username in the domain cannot be mapped to a Unix \"\n                    \"username. Check the Unix Directory Service settings, \"\n                    \"and optionally specify a default Unix username for the \"\n                    \"NAS server.\"],\n    \"14:100001\": [\"INFO\", \"DESC_TEST_SNMP_ALERT\",\n                  \"This is a test message to be sent in an SNMP trap.\"],\n    \"14:110001\": [\"INFO\", \"DESC_TEST_EMAIL_ALERT\",\n                  \"This is a test email alert message.\"],\n    \"14:160074\": [\"WARNING\", \"ALRT_AUTO_REMOVE_FILE_INTERFACE\",\n                  \"The system automatically removed the overridden \"\n                  \"file interface associated with a replication destination \"\n                  \"NAS server, because the corresponding file interface was \"\n                  \"removed on the source NAS server.\"],\n    \"14:160092\": [\"WARNING\", \"ALRT_AUTO_DISABLE_DNS_CLIENT\",\n                  \"The system automatically disabled an overridden DNS client \"\n                  \"of a replication destination NAS server, because the \"\n                  \"corresponding DNS client was disabled on the source \"\n                  \"NAS server.\"],\n    \"14:16009c\": [\"WARNING\", \"ALRT_AUTO_DISABLE_NIS_CLIENT\",\n                  \"The system automatically disabled the overridden NIS client\"\n                  \" of a replication destination NAS server, because the \"\n                  \"corresponding NIS client was disabled on the source \"\n                  \"NAS server\"],\n    \"14:1600c4\": [\"WARNING\", \"ALRT_AUTO_DISABLE_LDAP_CLIENT\",\n                  \"The system automatically disabled an overridden LDAP client\"\n                  \" of a replication destination NAS server, because the \"\n                  \"corresponding LDAP client was disabled on the source NAS \"\n                  \"server\"],\n    \"14:170001\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire soon\"\n                  \". Obtain and install the license files to ensure continued \"\n                  \"access to the relevant feature.\"],\n    \"14:170002\": [\"CRITICAL\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire soon\"\n                  \". Obtain and install the license files to ensure continued\"\n                  \" access to the relevant feature.\"],\n    \"14:170003\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170004\": [\"CRITICAL\", \"ALRT_ANTI_VIRUS_LICENSE_EXPIRED\",\n                  \"The Antivirus Server Integration license has expired, and \"\n                  \"the storage system no longer has antivirus protection. \"\n                  \"Obtain and install a new license file to ensure access to \"\n                  \"antivirus protection.\"],\n    \"14:170005\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170006\": [\"CRITICAL\", \"ALRT_LICENSE_EXPIRED\",\n                  \"The EMC Unity Operating Environment V4.0 license has \"\n                  \"expired, and your access to Unity functionality has been \"\n                  \"disabled. Obtain and install the license file to ensure \"\n                  \"continued access to Unity functionality.\"],\n    \"14:170007\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170008\": [\"CRITICAL\", \"ALRT_CIFS_LICENSE_EXPIRED\",\n                  \"The CIFS/SMB Support license has expired, and the storage \"\n                  \"system no longer has support for the CIFS/SMB protocol. \"\n                  \"Obtain and install a new license file to ensure support \"\n                  \"for CIFS/SMB.\"],\n    \"14:170009\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:17000a\": [\"CRITICAL\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:17000b\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:17000c\": [\"CRITICAL\", \"ALRT_EMCSUPPORT_LICENSE_EXPIRED\",\n                  \"The EMC Support license has expired, and the storage \"\n                  \"system's access to EMC support has been disabled. Obtain \"\n                  \"and install a new license file to ensure access to EMC \"\n                  \"support.\"],\n    \"14:17000d\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:17000e\": [\"CRITICAL\", \"ALRT_ESA_LICENSE_EXPIRED\",\n                  \"The EMC Storage Analytics (ESA) license has expired, and \"\n                  \"the storage system no longer has access to ESA. Obtain and\"\n                  \" install a new license file to ensure access to ESA.\"],\n    \"14:17000f\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170010\": [\"CRITICAL\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170011\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170012\": [\"CRITICAL\", \"ALRT_FASTVP_LICENSE_EXPIRED\",\n                  \"The FAST VP license has expired, and the storage system no \"\n                  \"longer has support for FAST VP. Obtain and install a new \"\n                  \"license file to ensure support for FAST VP.\"],\n    \"14:170013\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170014\": [\"CRITICAL\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170015\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170016\": [\"CRITICAL\", \"ALRT_ISCSI_LICENSE_WILL_EXPIRE\",\n                  \"The Internet Small Computer System Interface (iSCSI) \"\n                  \"license has expired, and the storage system no longer has\"\n                  \" support for iSCSI. Obtain and install a new license file\"\n                  \" to ensure iSCSI support.\"],\n    \"14:170017\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170018\": [\"CRITICAL\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170019\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:17001a\": [\"CRITICAL\", \"ALRT_LOCAL_COPIES_LICENSE_EXPIRED\",\n                  \"The Local Copies license has expired, and the storage \"\n                  \"system no longer has support for local copies (including \"\n                  \"the ability to create snapshots). Obtain and install a new \"\n                  \"license file to ensure support for local copies.\"],\n    \"14:17001b\": [\"WARNING\", \"ALRT_CIFS_LICENSE_EXPIRING\",\n                  \"The NFS license will expire soon, and the storage system's\"\n                  \" support for NFS will be disabled. Obtain and install a new\"\n                  \" license file to ensure continued support for NFS.\"],\n    \"14:17001c\": [\"CRITICAL\", \"ALRT_NFS_LICENSE_EXPIRED\",\n                  \"The NFS license has expired, and the storage system no \"\n                  \"longer has support for the NFS protocol. Obtain and install\"\n                  \" a new license file to ensure support for NFS.\"],\n    \"14:17001d\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:17001e\": [\"CRITICAL\", \"ALRT_QOS_LICENSE_EXPIRED\",\n                  \"The Quality of Service (QOS) license has expired, and the \"\n                  \"storage system no longer has support for the QOS feature.\"\n                  \" Obtain and install a new license file to ensure support \"\n                  \"for the QOS feature.\"],\n    \"14:17001f\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170020\": [\"CRITICAL\", \"ALRT_REPLICATION_LICENSE_EXPIRED\",\n                  \"The Replication license has expired, and the storage system\"\n                  \" no longer has support for replication. Obtain and install\"\n                  \" a new license file to ensure support for replication.\"],\n    \"14:170021\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170022\": [\"CRITICAL\", \"ALRT_SCE_LICENSE_EXPIRED\",\n                  \"The Storage Capacity Expansion license has expired, and \"\n                  \"your ability to manage extended storage capacity has been\"\n                  \" disabled. Obtain and install a new license file to ensure\"\n                  \" access to extended storage capacity.\"],\n    \"14:170023\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170024\": [\"CRITICAL\", \"ALRT_THIN_PROVISIONING_LICENSE_EXPIRED\",\n                  \"The Thin Provisioning license has expired, and the storage\"\n                  \" system no longer has suppport for thin provisioning. \"\n                  \"Obtain and install the license file to ensure support for\"\n                  \" thin provisioning.\"],\n    \"14:170025\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170026\": [\"CRITICAL\", \"ALRT_UNISPHERE_LICENSE_EXPIRED\",\n                  \"The Unisphere license has expired, and the storage system's\"\n                  \" access to Unisphere functionality has been disabled. \"\n                  \"Obtain and install a new license file to ensure access to\"\n                  \" Unisphere functionality.\"],\n    \"14:170027\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170028\": [\"CRITICAL\", \"ALRT_UC_LICENSE_EXPIRED\",\n                  \"The Unisphere Central license has expired, and the storage\"\n                  \" system's support for Unisphere Central has been disabled.\"\n                  \" Obtain and install a new license file to ensure support\"\n                  \" for Unisphere Central.\"],\n    \"14:170029\": [\"WARNING\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:17002a\": [\"CRITICAL\", \"ALRT_VMWARE_LICENSE_EXPIRED\",\n                  \"The VMware VASA/VVols license has expired, and the storage \"\n                  \"system no longer has support for VVols. Obtain and install\"\n                  \" a new license file to ensure support for VVols.\"],\n    \"14:17002b\": [\"CRITICAL\", \"ALRT_LICENSE_EXPIRING\",\n                  \"One of your system licenses has expired or will expire \"\n                  \"soon. Obtain and install the license files to ensure \"\n                  \"continued access to the relevant feature.\"],\n    \"14:170032\": [\"WARNING\", \"ALRT_INLINE_COMPRESSION_LICENSE_WILL_EXPIRE\",\n                  \"The inline compression license will expire soon. Obtain and\"\n                  \" install a new license file to ensure continued support \"\n                  \"for inline compression.\"],\n    \"14:170033\": [\"CRITICAL\", \"ALRT_INLINE_COMPRESSION_LICENSE_EXPIRED\",\n                  \"The Inline Compression license has expired, and the storage\"\n                  \" system no longer has support for inline compression. \"\n                  \"Obtain and install a new license file to ensure support for\"\n                  \" inline compression.\"],\n    \"14:170034\": [\"NOTICE\", \"ALRT_MAX_CAPACITY_LIMIT_INCREASE\",\n                  \"The maximum storage capacity limit has been increased.\"],\n    \"14:170051\": [\"CRITICAL\", \"ALRT_ANTI_VIRUS_LICENSE_WILL_EXPIRE\",\n                  \"The Antivirus Server Integration license has expired, and \"\n                  \"the storage system's access to antivirus protection will be\"\n                  \" disabled soon. Obtain and install a new license file to \"\n                  \"ensure continued access to antivirus protection.\"],\n    \"14:170052\": [\"CRITICAL\", \"ALRT_LICENSE_WILL_EXPIRE\",\n                  \"The EMC Unity Operating Environment, V4.0 license has \"\n                  \"expired, and your access to Unity functionality will be \"\n                  \"disabled soon. Obtain and install a new license file to \"\n                  \"ensure continued access to Unity functionality.\"],\n    \"14:170053\": [\"CRITICAL\", \"ALRT_CIFS_SMB_LICENSE_WILL_EXPIRE\",\n                  \"The CIFS/SMB Support license has expired, and the storage\"\n                  \" system's support for the CIFS/SMB protocol will be \"\n                  \"disabled soon. Obtain and install a new license file to \"\n                  \"ensure continued support for CIFS/SMB.\"],\n    \"14:170055\": [\"CRITICAL\", \"ALRT_EMCSUPPORT_LICENSE_WILL_EXPIRE\",\n                  \"The EMC Support license has expired, and the storage \"\n                  \"system's access to EMC support will be disabled soon. \"\n                  \"Obtain and install a new license file to ensure continued\"\n                  \" access to EMC support.\"],\n    \"14:170056\": [\"CRITICAL\", \"ALRT_ESA_LICENSE_WILL_EXPIRE\",\n                  \"The EMC Storage Analytics (ESA) license has expired, and \"\n                  \"the storage system's access to ESA will be disabled soon. \"\n                  \"Obtain and install a new license file to ensure continued \"\n                  \"access to ESA.\"],\n    \"14:170058\": [\"CRITICAL\", \"ALRT_FASTVP_LICENSE_EXPIRED_PERIOD\",\n                  \"The FAST VP license has expired, and the storage system's\"\n                  \" support for FAST VP will be disabled soon. Obtain and \"\n                  \"install a new license file to ensure continued support \"\n                  \"for FAST VP.\"],\n    \"14:17005a\": [\"CRITICAL\", \"ALRT_ISCSI_LICENSE_EXPIRING\",\n                  \"The Internet Small Computer System Interface (iSCSI) \"\n                  \"license has expired, and the storage system's support for \"\n                  \"iSCSI will be disabled soon. Obtain and install a new \"\n                  \"license file to ensure continued support for iSCSI.\"],\n    \"14:17005c\": [\"CRITICAL\", \"ALRT_LOCAL_COPIES_LICENSE_EXPIRING\",\n                  \"The Local Copies license has expired, and the storage \"\n                  \"system's support for local copies (including the ability \"\n                  \"to create snapshots) will be disabled soon. Obtain and \"\n                  \"install a new license file to ensure continued support \"\n                  \"for local copies.\"],\n    \"14:17005d\": [\"CRITICAL\", \"ALRT_NFS_LICENSE_EXPIRING\",\n                  \"The NFS license has expired, and the storage system's \"\n                  \"support for the NFS protocol will be disabled soon. \"\n                  \"Obtain and install a new license file to ensure continued \"\n                  \"support for NFS.\"],\n    \"14:17005e\": [\"CRITICAL\", \"ALRT_QOS_LICENSE_EXPIRING\",\n                  \"The Quality of Service (QOS) license has expired, and the\"\n                  \" storage system's support for the QOS feature will be \"\n                  \"disabled soon. Obtain and install a new license file to \"\n                  \"ensure continued support for the QOS feature.\"],\n    \"14:17005f\": [\"CRITICAL\", \"ALRT_REPLICATION_LICENSE_EXPIRING\",\n                  \"The Replication license has expired, and the storage \"\n                  \"system's support for replication will be disabled soon. \"\n                  \"Obtain and install a new license file to ensure continued \"\n                  \"support for replication.\"],\n    \"14:170060\": [\"CRITICAL\", \"ALRT_SCE_LICENSE_EXPIRING\",\n                  \"The Storage Capacity Expansion license has expired, and \"\n                  \"your ability to manage extended storage capacity will be \"\n                  \"disabled soon. Obtain and install a new license file to \"\n                  \"ensure continued access to extended storage capacity.\"],\n    \"14:170061\": [\"CRITICAL\", \"ALRT_THIN_PROVISIONING_LICENSE_EXPIRING\",\n                  \"The Thin Provisioning license has expired, and the storage\"\n                  \" system's support for thin provisioning will be disabled \"\n                  \"soon. Obtain and install the license file to ensure \"\n                  \"continued support for thin provisioning.\"],\n    \"14:170062\": [\"CRITICAL\", \"ALRT_UNISPHERE_LICENSE_EXPIRING\",\n                  \"The Unisphere license has expired, and the storage system's\"\n                  \" access to Unisphere functionality will be disabled soon. \"\n                  \"Obtain and install a new license file to ensure continued \"\n                  \"access to Unisphere functionality.\"],\n    \"14:170063\": [\"CRITICAL\", \"ALRT_UC_LICENSE_EXPIRING\",\n                  \"The Unisphere Central license has expired, and the storage\"\n                  \" system's support for Unisphere Central will be disabled \"\n                  \"soon. Obtain and install a new license file to ensure \"\n                  \"continued support for Unisphere Central.\"],\n    \"14:170064\": [\"CRITICAL\", \"ALRT_VMWARE_LICENSE_EXPIRING\",\n                  \"The VMware VASA/VVols license has expired, and the storage\"\n                  \" system's support for VVols will be disabled soon. obtain \"\n                  \"and install a new license file to ensure continued support\"\n                  \" for VVols.\"],\n    \"14:170065\": [\"CRITICAL\", \"ALRT_INLINE_COMPRESSION_LICENSE_EXPIRING\",\n                  \"The Inline Compression license has expired, and the storage\"\n                  \" system's support for inline compression will be disabled \"\n                  \"soon. Obtain and install a new license file to ensure \"\n                  \"continued support for inline compression.\"],\n    \"14:180002\": [\"ERROR\", \"ALRT_HEALTH_CHECK_NOT_START\",\n                  \"The pre-upgrade health check has failed to start.\"],\n    \"14:180004\": [\"ERROR\", \"ALRT_HEALTH_CHECK_FAILED\",\n                  \"The pre-upgrade health check has failed. Check the error \"\n                  \"messages in the Health Check dialog box.\"],\n    \"14:180005\": [\"ERROR\", \"ALRT_HEALTH_CHECK_TERMINATED\",\n                  \"The pre-upgrade health check was unexpectedly terminated. \"\n                  \"Try running the health Check again.\"],\n    \"14:180007\": [\"ERROR\", \"ALRT_UPGRADE_NOT_START\",\n                  \"The software upgrade process failed to start. Check the \"\n                  \"system logs and other alerts to identify the issue. Once \"\n                  \"the issue is fixed, try running the upgrade again.\"],\n    \"14:180008\": [\"NOTICE\", \"ALRT_UPGRADE_OK\",\n                  \"The upgrade completed successfully. To access the latest \"\n                  \"management software, you must reload Unisphere. Close any\"\n                  \" browsers opened prior to the upgrade and start a new \"\n                  \"Unisphere login session.\"],\n    \"14:180009\": [\"ERROR\", \"ALRT_UPGRADE_FAILED\",\n                  \"The upgrade has failed. Review information about the failed\"\n                  \" upgrade on the Settings screen.\"],\n    \"14:18000a\": [\"ERROR\", \"ALRT_UPGRADE_TERMINATED\",\n                  \"The upgrade terminated unexpectedly. Please try running the\"\n                  \" upgrade again.\"],\n    \"14:18000c\": [\"ERROR\", \"ALRT_UPGRADE_FAILED\",\n                  \"The upgrade has failed. From Unisphere, click Settings \"\n                  \"More Configuration  Update Software and review information\"\n                  \" about the failed upgrade.\"],\n    \"14:18000d\": [\"NOTICE\", \"ALRT_UPGRADE_SUCCESS\",\n                  \"The upgrade has completed successfully.\"],\n    \"14:18000e\": [\"NOTICE\", \"ALRT_UPGRADE_SUCCESS\",\n                  \"The upgrade has completed successfully.\"],\n    \"14:18000f\": [\"ERROR\", \"ALRT_UPGRADE_FAILED\",\n                  \"The upgrade has failed. Review information about the failed\"\n                  \" upgrade on the Settings screen.\"],\n    \"14:180010\": [\"ERROR\", \"ALRT_UPGRADE_FAILED\",\n                  \"The upgrade has failed. Review information about the \"\n                  \"failed upgrade on the Settings screen.\"],\n    \"14:180011\": [\"ERROR\", \"ALRT_UPGRADE_TERMINATED\",\n                  \"The upgrade terminated unexpectedly. Please try running the\"\n                  \" upgrade again.\"],\n    \"14:180012\": [\"ERROR\", \"ALRT_UPGRADE_TERMINATED\",\n                  \"The upgrade terminated unexpectedly. Please try running the\"\n                  \" upgrade again.\"],\n    \"14:22001d\": [\"CRITICAL\", \"ALRT_STATICPOOL_TRANSACTION_LOG_FAILURE\",\n                  \"System was unable to automatically recover after the \"\n                  \"provisioning operation failed. Contact your service \"\n                  \"provider for assistance with system cleanup.\"],\n    \"14:300007\": [\"WARNING\", \"ALRT_DART_FS_OVER_THRESHOLD\",\n                  \"The total number of storage resources has exceeded the \"\n                  \"maximum allowed threshold limit. Delete unneeded snapshots\"\n                  \" or storage resources to free up some space.\"],\n    \"14:30014\": [\"ERROR\", \"ALRT_UPGRADE_ECOM_FAILED\",\n                 \"LDAP users and groups may have been lost during the upgrade.\"\n                 \" Review the list of LDAP users and groups to determine \"\n                 \"whether any have been deleted. Add missing LDAP users/groups\"\n                 \" again, if necessary.\"],\n    \"14:330009\": [\"CRITICAL\", \"ALRT_CONFIG_PSM_RW_FAILED\",\n                  \"The system encountered an error while accessing \"\n                  \"configuration information. Reboot the storage processors \"\n                  \"(SPs) from the Service System page.\"],\n    \"14:380001\": [\"WARNING\", \"ALRT_CONTRACT_WILL_EXPIRE\",\n                  \"The <a contract will expire in <b days. Go to the EMC \"\n                  \"Online Support portal to view and manage support \"\n                  \"contracts.\"],\n    \"14:380002\": [\"CRITICAL\", \"ALRT_CONTRACT_EXPIRED\",\n                  \"The <a contract has expired. You should renew this support\"\n                  \" contact immediately. Go to the EMC Online Support portal\"\n                  \" to view and manage Contracts.\"],\n    \"14:380004\": [\"WARNING\", \"ALRT_CONTRACT_REFRESH_BAD_CREDS\",\n                  \"Contract data failed to refresh because the credentials \"\n                  \"that you provided are invalid. Verify the credentials and \"\n                  \"try again.\"],\n    \"14:380005\": [\"WARNING\", \"ALRT_CONTRACT_REFRESH_SERV_UNAVAIL\",\n                  \"The service contract data failed to automatically \"\n                  \"refresh.\"],\n    \"14:380006\": [\"WARNING\", \"ALRT_CONTRACT_REFRESH_ERR\",\n                  \"The service contract data failed to automatically refresh.\"\n                  \" This error is undetermined, but it is possible that this \"\n                  \"problem may be temporary. Please wait to see if the problem\"\n                  \" resolves itself.\"],\n    \"14:380009\": [\"INFO\", \"ALRT_DF_UPGRADE_AVAILABLE\",\n                  \"A disk firmware upgrade is now available for download. From\"\n                  \" Unisphere, click Support  Downloads. This link takes you \"\n                  \"to the EMC Online Support Downloads page from where you can\"\n                  \" download an upgrade for your storage system.\"],\n    \"14:38000a\": [\"INFO\", \"ALRT_LN_UPGRADE_AVAILABLE\",\n                  \"A language pack upgrade is now available for download. From\"\n                  \" Unisphere, click Support  Downloads. This link takes you \"\n                  \"to the EMC Online Support Downloads page from where you can\"\n                  \" download an upgrade for your storage system.\"],\n    \"14:38000b\": [\"WARNING\", \"ALRT_CONTRACT_CANT_VERIFY_CREDS\",\n                  \"The EMC Support account credentials that you provided \"\n                  \"cannot be verified because there is a network communication\"\n                  \" problem. Ensure that ports 80 (HTTP) and 443 (HTTPS) are \"\n                  \"open to internet traffic.\"],\n    \"14:38000c\": [\"NOTICE\", \"ALRT_ADVISORIES_AVAIL\",\n                  \"There are one or more new technical advisories available \"\n                  \"for viewing on the Technical Advisories page.\"],\n    \"14:380010\": [\"WARNING\", \"ALRT_CONTRACT_INVALID_CONTENT\",\n                  \"The service contract data failed to automatically refresh.\"\n                  \" The contract context is not in the proper format, but it \"\n                  \"is possible that this problem may be temporary and resolves\"\n                  \" itself. If it does not resolve itself, contact EMC service\"\n                  \" to check the service contract information in the backend \"\n                  \"servers. If the backend information is wrong, it is \"\n                  \"possible that a proxy server altered the context before \"\n                  \"transimitting it to the SP.\"],\n    \"14:380011\": [\"WARNING\", \"ALRT_NONE_CONTRACT_THROUGH_PROXY\",\n                  \"Unable to retrieve service contract information through the\"\n                  \" configured proxy server. Check whether the configured \"\n                  \"proxy server information is correct and the server is \"\n                  \"online and functioning properly.\"],\n    \"14:380012\": [\"ERROR\", \"ALR_NON_TECHNICAL_ADVISORY_THROUGH_PROXY\",\n                  \"Unable to get the latest technical advisory for the current\"\n                  \" storage system. Check whether the configured proxy server\"\n                  \" information is correct and the server is online and \"\n                  \"functioning properly.\"],\n    \"14:380013\": [\"ERROR\", \"ALRT_NON_UPGRADE_NOTIFICATION_THROUGH_PROXY\",\n                  \"Unable to know the latest available storage software, drive\"\n                  \" firmware or language pack updates through the configured\"\n                  \" support proxy server. Check whether the configured proxy\"\n                  \" server information is correct and the server is online and\"\n                  \" functioning properly.\"],\n    \"14:380017\": [\"WARNING\", \"ALRT_FIRMWARE_UPGRADE_AVAILABLE_WARNING\",\n                  \"A recommended disk firmware upgrade is now available for\"\n                  \" download. The disk firmware version currently installed\"\n                  \" is more than 180 days old. To ensure optimal performance,\"\n                  \" upgrade the disk firmware.\"],\n    \"14:380018\": [\"WARNING\", \"ALRT_LN_UPGRADE_AVAILABLE_WARNING\",\n                  \"A recommended language pack upgrade is now available for \"\n                  \"download. The language pack version currently installed is\"\n                  \" more than 180 days old. To ensure optimal experience, \"\n                  \"upgrade the language pack.\"],\n    \"14:38001d\": [\"INFO\", \"ALRT_CONTACT_INFO_REMINDER\",\n                  \"Please verify your system contact information. This will \"\n                  \"help your service provider to contact you and quickly \"\n                  \"respond to any critical issues.\"],\n    \"14:38001e\": [\"INFO\", \"ALRT_SW_UPGRADE_AVAILABLE\",\n                  \"A recommended system software is now available for \"\n                  \"download. To ensure optimal system performance, EMC \"\n                  \"recommends upgrading to this version. Run a health check \"\n                  \"about a week before installing the upgrade to identify and\"\n                  \" resolve any underlying issues that may prevent a\"\n                  \" successful update.\"],\n    \"14:38001f\": [\"WARNING\", \"ALRT_SW_UPGRADE_AVAILABLE_WARNING\",\n                  \"System is running a system software version that is more \"\n                  \"than 180 days old. A recommended system software is now \"\n                  \"available for download. To ensure optimal system \"\n                  \"performance, EMC recommends upgrading to this version.\"\n                  \" Run a health check about a week before installing the \"\n                  \"upgrade to identify and resolve any underlying issues that\"\n                  \" may prevent a successful update.\"],\n    \"14:380020\": [\"ERROR\", \"ALRT_SW_UPGRADE_AVAILABLE_ERROR\",\n                  \"System is running a deprecated version of the system \"\n                  \"software. A recommended system software is now available \"\n                  \"for download. To ensure optimal system performance, EMC \"\n                  \"recommends upgrading to this version. Run a health check \"\n                  \"about a week before installing the upgrade to identify and\"\n                  \" resolve any underlying issues that may prevent a \"\n                  \"successful update.\"],\n    \"14:380021\": [\"ERROR\", \"ALRT_SW_UPGRADE_AVAILABLE_GENERAL_ERROR\",\n                  \"System is running a deprecated version of the system \"\n                  \"software. A recommended system software is now available \"\n                  \"for download. To ensure optimal system performance, \"\n                  \"EMC recommends upgrading to this version. Run a health \"\n                  \"check about a week before installing the upgrade to \"\n                  \"identify and resolve any underlying issues that may \"\n                  \"prevent a successful update.\"],\n    \"14:380022\": [\"INFO\", \"ALRT_SW_UPGRADE_AVAILABLE_PUHC\",\n                  \"A recommended update to Health Check is available for\"\n                  \" download.\"],\n    \"14:380027\": [\"CRITICAL\", \"ALRT_DISK_USAGE_CRITICAL\",\n                  \"There is little disk space left in the system disk of \"\n                  \"current system. Please contact your service provider to\"\n                  \" do the cleaning up as soon as possible.\"],\n    \"14:380028\": [\"WARNING\", \"ALRT_DISK_USAGE_WARNING\",\n                  \"There is not much disk space left in the system disk of \"\n                  \"current system. Please pay attention to any new critical\"\n                  \" alert about system disk usage.\"],\n    \"14:380029\": [\"INFO\", \"ALRT_DISK_USAGE_INFO\",\n                  \"The system disk of current system has enough disk \"\n                  \"space now\"],\n    \"14:39000a\": [\"WARNING\", \"ALRT_SITE_INFO_UPDATE_REQUEST\",\n                  \"A dial home alert has been generated requesting an update\"\n                  \" to the site Information.\"],\n    \"14:390014\": [\"ERROR\", \"ALRT_EVE_FAIL_ENABLE_FOR_UPGRADE\",\n                  \"Integrated ESRS could not be automatically re-enabled after\"\n                  \" the upgrade. Contact your service provider to re-enable\"\n                  \" it.\"],\n    \"14:390015\": [\"ERROR\", \"ALRT_PROXY_PASS_FAIL_RESTORE_FOR_UPGRADE\",\n                  \"The proxy credentials that were provided before the upgrade\"\n                  \" could not be transferred. Please configure the proxy \"\n                  \"server information again.\"],\n    \"14:440001\": [\"INFO\", \"DESC_TEST_MOZZO_ALERT\",\n                  \"This is a test mozzo alert message.\"],\n    \"14:450001\": [\"ERROR\", \"ALRT_SED_KEY_BACKUP_FAILED\",\n                  \"A request to back up the self-encrypting drive key has \"\n                  \"failed. If the problem persists, please go to Support\"\n                  \" Chat to chat with EMC support personnel. If this option\"\n                  \" is not available, contact your service provider.\"],\n    \"14:46000d\": [\"ERROR\", \"ALRT_ESRS_CANT_CONNECT\",\n                  \"ESRS is unable to make a connection to EMC. This usually \"\n                  \"indicates a network problem, though it may resolve on \"\n                  \"its own.\"],\n    \"14:46000e\": [\"ERROR\", \"ALRT_ESRS_CANT_START\",\n                  \"An error has occurred that is preventing the ESRS service \"\n                  \"from starting up.\"],\n    \"14:46000f\": [\"INFO\", \"ALRT_ESRS_OK\",\n                  \"All issues with ESRS have been resolved.\"],\n    \"14:460010\": [\"NOTICE\", \"ALRT_ESRS_DISABLED\",\n                  \"Remote support options are not available while ESRS is\"\n                  \" Disabled.\"],\n    \"14:460011\": [\"NOTICE\", \"ALRT_ESRS_ENABLED\",\n                  \"Remote support options are available while ESRS is \"\n                  \"enabled.\"],\n    \"14:460012\": [\"ERROR\", \"ALRT_ESRS_NO_PROXY\",\n                  \"The connection to the ESRS proxy server has been lost. \"\n                  \"ESRS will not function correctly until the connection is \"\n                  \"restored. Verify that there are no network problems between\"\n                  \" the storage system and the proxy server, and that the \"\n                  \"proxy server itself has not been shut down.\"],\n    \"14:460013\": [\"INFO\", \"ALRT_ESRS_PROXY_RESTORED\",\n                  \"All connection issues with the ESRS proxy server have been\"\n                  \" resolved.\"],\n    \"14:460014\": [\"ERROR\", \"ALRT_ESRS_POL_MAN_LOST\",\n                  \"The connection to the ESRS Policy Manager has been lost.\"\n                  \" remote connectivity will not be possible if it is \"\n                  \"configured with an Ask for approval policy. Additionally,\"\n                  \" any configuration changes you make to the Policy Manager\"\n                  \" will not take effect until connectivity is restored.\"],\n    \"14:460015\": [\"INFO\", \"ALRT_ESRS_POL_MAN_RESTORED\",\n                  \"All connection issues with the ESRS Policy Manager have\"\n                  \" been resolved.\"],\n    \"14:5000a\": [\"INFO\", \"ALRT_HOST_IQN_DUPLICATE\",\n                 \"The iSCSI Qualified Name (IQN) is present in two or more \"\n                 \"hosts. Modifying host access for these hosts, deleting any\"\n                 \" of these hosts, or deleting the IQN can interrupt I/O \"\n                 \"through the IQN.\"],\n    \"14:5010001\": [\"ERROR\", \"ALRT_MOZZO_CANT_REACH_SVR\",\n                   \"The Unisphere Central server may be temporarily \"\n                   \"unavailable or unreachable. Verify network connectivity.\"],\n    \"14:5010002\": [\"INFO\", \"ALRT_MOZZO_CAN_REACH_SVR\",\n                   \"The service is operating normally. No action is \"\n                   \"required.\"],\n    \"14:5010003\": [\"ERROR\", \"ALRT_MOZZO_INVL_VNXE_VER\",\n                   \"Unisphere Central server is not compatible with your \"\n                   \"storage system software. Contact the Unisphere Central \"\n                   \"administrator to upgrade the server to a compatible \"\n                   \"version.\"],\n    \"14:5010004\": [\"INFO\", \"ALRT_MOZZO_VALID_VNXE_VE\",\n                   \"The service is now operating normally. No action is \"\n                   \"required.\"],\n    \"14:5010005\": [\"ERROR\", \"ALRT_MOZZO_INVL_SVR_CERT\",\n                   \"The certificate could not be validated. The Unisphere \"\n                   \"Central server hash specified did not match the hash value\"\n                   \" provided by the server. Please contact your Unisphere \"\n                   \"Central server administrator to verify certificate hash\"\n                   \" value.\"],\n    \"14:5010006\": [\"INFO\", \"ALRT_MOZZO_VALID_SVR_CERT\",\n                   \"The service is operating normally. No action is required\"],\n    \"14:5010007\": [\"ERROR\", \"ALRT_MOZZO_INVL_VNXE_CERT\",\n                   \"The Unisphere Central challenge phrase specified did not \"\n                   \"match the same value provided by the server. Verify this\"\n                   \" value with your Unisphere Central server administrator.\"],\n    \"14:5010008\": [\"INFO\", \"ALRT_MOZZO_VALID_VNXE_CERT\",\n                   \"The service is operating normally. No action is \"\n                   \"required.\"],\n    \"14:501000a\": [\"ERROR\", \"ALRT_MOZZO_INVL_SVR_CERT_NAME\",\n                   \"The Unisphere Central server is responding with the wrong\"\n                   \" certificate name. Please verify your Unisphere Central \"\n                   \"configuration.\"],\n    \"14:501000b\": [\"INFO\", \"ALRT_MOZZO_VALID_SVR_CERT\",\n                   \"The service is operating normally. No action is required\"],\n    \"14:52008e\": [\"ERROR\", \"ALRT_POOL_LIMITS_EXCEEDED\",\n                  \"The system was unable to create a new pool or extend an \"\n                  \"existing pool because the maximum number of pools or\"\n                  \" maximum space of all pools has been reached. Delete \"\n                  \"unneeded snapshots or storage resources to free up some \"\n                  \"space\"],\n    \"14:52008f\": [\"ERROR\", \"ALRT_SV_LIMITS_EXCEEDED\",\n                  \"The system was unable to create a new LUN because the \"\n                  \"maximum number of LUNs or maximum number of LUNs and LUN \"\n                  \"snapshots has been reached. Delete unneeded snapshots or\"\n                  \" storage resources to free up some space.\"],\n    \"14:520090\": [\"ERROR\", \"ALRT_FS_LIMITS_EXCEEDED\",\n                  \"The system was unable to create a new file system because \"\n                  \"the maximum number of file systems and file system \"\n                  \"snapshots has been reached. Delete one or more file \"\n                  \"systems or file system snapshots to maintain system \"\n                  \"performance.\"],\n    \"14:520091\": [\"WARNING\", \"ALRT_POOL_THRESHOLDS_EXCEEDED\",\n                  \"The threshold of the total number of pools in the system \"\n                  \"or the total space of all the pools in the system has been \"\n                  \"exceeded. Delete one or more pools to maintain system \"\n                  \"performance.\"],\n    \"14:520092\": [\"WARNING\", \"ALRT_SV_THRESHOLDS_EXCEEDED\",\n                  \"The threshold of the total number of the LUNs or the total \"\n                  \"number of the LUNs and LUN snapshots has been exceeded. \"\n                  \"Delete one or more LUNs to maintain system performance.\"],\n    \"14:520093\": [\"WARNING\", \"ALRT_FS_THRESHOLDS_EXCEEDED\",\n                  \"The threshold of the total number of file systems and file\"\n                  \" system snapshots has been exceeded. Delete one or more \"\n                  \"file systems to maintain system performance.\"],\n    \"14:520096\": [\"ERROR\", \"ALRT_POOL_SIZE_LIMITS_EXCEEDED\",\n                  \"The system was unable to create a new pool or extend an \"\n                  \"existing pool because the specified pool size exceeds the\"\n                  \" system limit. Consider deleting any existing pools that \"\n                  \"may no longer be used, and then try creating the new pool\"\n                  \" with a size within system limits.\"],\n    \"14:520097\": [\"ERROR\", \"ALRT_POOL_NUMBER_LIMITS_EXCEEDED\",\n                  \"The system was unable to create a new pool because the\"\n                  \" maximum number of pools has been reached. Consider \"\n                  \"deleting any existing pools that may no longer be used, \"\n                  \"and then try creating the new pool with a size within \"\n                  \"system limits.\"],\n    \"14:520098\": [\"ERROR\", \"ALRT_LUN_SIZE_LIMITS_EXCEEDED\",\n                  \"The system was unable to create a new LUN or extend an \"\n                  \"existing LUN because the specified LUN size exceeds system\"\n                  \" limit. Try creating a LUN again with a size that is within\"\n                  \" the system limits.\"],\n    \"14:520099\": [\"ERROR\", \"ALRT_LUN_NUMBER_LIMITS_EXCEEDED\",\n                  \"The system was unable to create a new LUN because the \"\n                  \"maximum number of LUNs has been reached. Consider deleting\"\n                  \" any existing LUNs that may no longer be used, and then try\"\n                  \" creating the new LUN with a size within system limits.\"],\n    \"14:52009a\": [\"ERROR\", \"ALRT_LUN_AND_SNAPSHOT_NUMBER_LIMITS_EXCEEDED\",\n                  \"The system was unable to create a new LUN because the \"\n                  \"maximum number of LUNs and LUN snapshots has been reached.\"\n                  \" Consider deleting any existing LUNs and LUN snapshots that\"\n                  \" may no longer be used, and then try creating the new LUN \"\n                  \"with a size within system limits.\"],\n    \"14:52009b\": [\"ERROR\", \"ALRT_FS_SIZE_LIMITS_EXCEEDED\",\n                  \"The system could not create a new file system or extend an\"\n                  \" existing one, because the specified file system size \"\n                  \"exceeds the system limit. Try specifying a file system size\"\n                  \" that is within the system limits.\"],\n    \"14:60001f\": [\"ERROR\", \"ALRT_SNAPSHOT_CREATE_FAILED\",\n                  \"The system could not create a snapshot because the storage\"\n                  \" resource does not have enough protection space. Add more\"\n                  \" protection space.\"],\n    \"14:600020\": [\"ERROR\", \"ALRT_SNAPSHOT_CREATE_HIT_APP_LIMIT\",\n                  \"The system could not create the snapshot because the\"\n                  \" maximum number of snapshots allowed for the application \"\n                  \"has been reached. Delete one or more snapshots and try\"\n                  \" again\"],\n    \"14:600026\": [\"ERROR\", \"ALRT_SNAPSHOT_STILL_CREATING_LAST\",\n                  \"The system is unable to create a snapshot because another \"\n                  \"snapshot creation for this same application is in progress.\"\n                  \" Reduce the frequency of scheduled snapshots.\"],\n    \"14:600027\": [\"ERROR\", \"ALRT_SNAPSHOT_CREATE_HIT_FS_LIMIT\",\n                  \"The system could not create the snapshot because the\"\n                  \" maximum number of allowed file-based snapshots has been\"\n                  \" reached. Delete one or more snapshots and try again.\"],\n    \"14:600028\": [\"ERROR\", \"ALRT_SNAPSHOT_CREATE_HIT_VOL_LIMIT\",\n                  \"The system could not create the snapshot because the\"\n                  \" maximum number of allowed LUN snapshots has been reached.\"\n                  \" Delete one or more snapshots and try again.\"],\n    \"14:60002e\": [\"ERROR\", \"ALRT_SNAPSHOT_CREATE_FAILED_LUN\",\n                  \"An attempt to create a scheduled snapshot failed, because\"\n                  \" the system could not find any LUNs associated with the \"\n                  \"storage resource. If the storage resource has no LUNs, \"\n                  \"ignore this message. If the storage resource has LUNs, \"\n                  \"contact your service provider.\"],\n    \"14:600036\": [\"WARNING\", \"ALRT_SNAPSHOT_WILL_EXCEED_VDISK_CAPACITY\",\n                  \"The number of LUN snapshots is approaching the limit for \"\n                  \"the maximum snapshots allowed. Delete snapshots and/or \"\n                  \"reduce the frequency of scheduled snapshots to stay within\"\n                  \" snapshot capacity limits.\"],\n    \"14:600037\": [\"NOTICE\", \"ALRT_SNAPSHOT_WILL_NOT_EXCEED_VDISK_CAPACITY\",\n                  \"The predicted number of LUN snapshots is no longer expected\"\n                  \" to reach the maximum.\"],\n    \"14:600038\": [\"WARNING\", \"ALRT_SNAPSHOT_WILL_EXCEED_FS_CAPACITY\",\n                  \"The number of file- based snapshots is approaching the \"\n                  \"limit for the maximum snapshots allowed. Delete snapshots\"\n                  \" and/or reduce the frequency of scheduled snapshots to stay\"\n                  \" within snapshot capacity limits.\"],\n    \"14:600039\": [\"NOTICE\", \"ALRT_SNAPSHOT_WILL_NOT_EXCEED_FS_CAPACITY\",\n                  \"The predicted number of file-based snapshots is no longer \"\n                  \"expected to reach the maximum.\"],\n    \"14:60003a\": [\"ERROR\", \"ALRT_SNAPSHOT_CREATE_HIT_FS_NAS_SERVER_LIMIT\",\n                  \"The system cannot create the snapshot because the \"\n                  \"associated NAS server has reached the number of maximum \"\n                  \"combined limit of file systems and file system snapshots.\"\n                  \" Delete one or more snapshots and try again.\"],\n    \"14:600c8\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:600cc\": [\"WARNING\", \"ALRT_APP_DEGRADE\",\n                 \"The NAS server used by this file system is either \"\n                 \"restarting, is degraded, or is not accessing an external\"\n                 \" server. Check the NAS server health status and logs. If\"\n                 \" needed, check the external server status and login \"\n                 \"information.\"],\n    \"14:600cd\": [\"ERROR\", \"ALRT_APP_FAILED\",\n                 \"The NAS server used by this file system is either \"\n                 \"restarting, is degraded, or is not accessing an external\"\n                 \" server. Check the NAS server health status and logs. If\"\n                 \" needed, check the external server status and login\"\n                 \" information.\"],\n    \"14:600ce\": [\"CRITICAL\", \"ALRT_APP_SERVER_UNAVAILABLE\",\n                 \"The NAS server used by this storage resource is being \"\n                 \"restarted. No action required.\"],\n    \"14:600cf\": [\"WARNING\", \"ALRT_APP_REPL_MINOR\",\n                 \"The replication session for this application is degraded.\"],\n    \"14:600d0\": [\"WARNING\", \"ALRT_APP_REPL_CRIT\",\n                 \"The replication session for this storage resource has \"\n                 \"faulted. You need to delete this replication session and \"\n                 \"create a new replication session.\"],\n    \"14:600d1\": [\"INFO\", \"ALRT_APP_FS_OK\",\n                 \"This storage resource is operating normally. No action \"\n                 \"is required.\"],\n    \"14:600d2\": [\"WARNING\", \"ALRT_APP_FS_FILLING\",\n                 \"The file system is running out of space. Allocate more \"\n                 \"storage space to the storage resource.\"],\n    \"14:600d3\": [\"ERROR\", \"ALRT_APP_FS_FULL\",\n                 \"The file system has run out of space. Allocate more storage\"\n                 \"space to the storage resource.\"],\n    \"14:600d6\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this\"\n                 \" time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:600d9\": [\"INFO\", \"ALRT_VOL_OK\",\n                 \"The LUN is operating Normally. No action is required.\"],\n    \"14:600de\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:600e2\": [\"WARNING\", \"ALRT_APP_TEMP_UNMOUNT\",\n                 \"The storage associated with the storage resource is \"\n                 \"temporarily unavailable. This can be related to normal\"\n                 \" system activity, and your storage will be available \"\n                 \"shortly. If the storage remains unavailable, try fixing \"\n                 \"any underlying problems to restore access to the storage.\"\n                 \" If the problem persists, contact your service provider.\"],\n    \"14:600e4\": [\"CRITICAL\", \"ALRT_APP_PERM_UNMOUNT\",\n                 \"The storage associated with the storage resource is \"\n                 \"unavailable. This can be related to normal system activity, \"\n                 \"and your storage will be available shortly. If the storage \"\n                 \"remains unavailable, try fixing any underlying problems to \"\n                 \"restore access to the storage. If the problem persists, \"\n                 \"contact your service provider.\"],\n    \"14:600e7\": [\"CRITICAL\", \"ALRT_VOL_NEED_RECOVER\",\n                 \"The LUN is offline and requires recovery. this may be caused\"\n                 \" by the pool being offline. Please fix the issue on the \"\n                 \"pool first. If the problem still exists, contact your \"\n                 \"service provider.\"],\n    \"14:600e8\": [\"CRITICAL\", \"ALRT_VOL_OFFLINE\",\n                 \"The LUN is offline. This may be caused by the pool being \"\n                 \"offline. Please fix the issue on the pool first. If the \"\n                 \"problem still exists, contact your service provider.\"],\n    \"14:600e9\": [\"CRITICAL\", \"ALRT_VOL_BAD\",\n                 \"The LUN is unavailable or may have a data inconsistency. \"\n                 \"Try rebooting the storage system. If the problem persists, \"\n                 \"contact your service provider.\"],\n    \"14:600ea\": [\"WARNING\", \"ALRT_VOL_FAULT\",\n                 \"There are some issues detected on the LUN and it is \"\n                 \"degraded. please contact your service provider.\"],\n    \"14:600eb\": [\"WARNING\", \"ALRT_VOL_BAD\",\n                 \"The LUN is unavailable or may have a data inconsistency. \"\n                 \"Try rebooting the storage system. If the problem persists,\"\n                 \" contact your service provider.\"],\n    \"14:600ec\": [\"WARNING\", \"ALRT_APP_FAULT\",\n                 \"There are some issues detected on the storage resource and \"\n                 \"it is degraded. Contact your service provider.\"],\n    \"14:600ed\": [\"CRITICAL\", \"ALRT_APP_OFFLINE\",\n                 \"The storage resource is offline. This may be caused by its\"\n                 \" storage elements being offline. Please contact your service\"\n                 \" provider.\"],\n    \"14:600ee\": [\"ERROR\", \"ALRT_APP_UNAVAILABLE\",\n                 \"The NAS server used by this file system is restarting, is\"\n                 \" degraded, or is not accessing an external server. Check the\"\n                 \" NAS server health status and logs. If needed, check the\"\n                 \" external server status and login information.\"],\n    \"14:600ef\": [\"WARNING\", \"ALRT_APP_UNAVAILABLE\",\n                 \"The NAS server used by this file system is restarting, is\"\n                 \" degraded, or is not accessing an external server. Check the\"\n                 \" NAS server health status and logs. If needed, check the \"\n                 \"external server status and login information.\"],\n    \"14:600f0\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:600f2\": [\"CRITICAL\", \"ALRT_APP_BAD_FS\",\n                 \"The file system is unavailable or may have a data \"\n                 \"inconsistency. Try rebooting the storage system. If the \"\n                 \"problem persists, contact your service provider.\"],\n    \"14:600f3\": [\"CRITICAL\", \"ALRT_APP_FS_NEED_RECOVER\",\n                 \"The file system is offline and requires recovery. This may \"\n                 \"be caused by the pool being offline. Please fix the issue \"\n                 \"on the pool first. If the problem still exists, contact your\"\n                 \" service provider.\"],\n    \"14:600f4\": [\"CRITICAL\", \"ALRT_APP_FS_OFFLINE\",\n                 \"The file system is offline. This may be caused by the pool\"\n                 \" being offline. Please fix the issue on the pool first. If \"\n                 \"the problem still exists, contact your service provider.\"],\n    \"14:600f5\": [\"WARNING\", \"ALRT_APP_FS_FAULT\",\n                 \"There are some issues detected on the file system and it is\"\n                 \" degraded. Please contact your service provider.\"],\n    \"14:600f6\": [\"WARNING\", \"ALRT_APP_BAD_FS\",\n                 \"The file system is unavailable or may have a data \"\n                 \"inconsistency. Try rebooting the storage system. If the \"\n                 \"problem persists, contact your service provider.\"],\n    \"14:600f7\": [\"WARNING\", \"ALRT_APP_FS_IO_SIZE_TOO_SMALL\",\n                 \"The majority of the recent write I/O operations to the \"\n                 \"VMware NFS datastore were not aligned with the configured\"\n                 \" Host I/O size.\"],\n    \"14:600f8\": [\"WARNING\", \"ALRT_APP_FS_IO_SIZE_UNALIGNED\",\n                 \"The majority of the recent write I/O operations to the \"\n                 \"VMware NFS datastore were not aligned with 8K.\"],\n    \"14:6012c\": [\"INFO\", \"ALRT_BBU_CHARGE\",\n                 \"The battery backup unit (BBU) in your Storage Processor is\"\n                 \" Currently charging.\"],\n    \"14:6012d\": [\"CRITICAL\", \"ALRT_BBU_FAULT\",\n                 \"A battery backup unit (BBU) in your Storage Processor has \"\n                 \"faulted and needs to be replaced.\"],\n    \"14:6012e\": [\"ERROR\", \"ALRT_BBU_MISSING\",\n                 \"The battery backup unit (BBU) has been removed from your \"\n                 \"Storage Processor and needs to be reinstalled.\"],\n    \"14:6012f\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:60130\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:60131\": [\"CRITICAL\", \"ALRT_BBU_LOW\",\n                 \"The battery level in the Storage Processor (SP) is low. \"\n                 \"Please wait for the battery to charge. If the battery cannot\"\n                 \" be charged, pay attention to the alert for the battery and\"\n                 \" power supply.\"],\n    \"14:60132\": [\"CRITICAL\", \"ALRT_BBU_AC_FAULT\",\n                 \"Power supply to the battery backup unit has faulted. Check\"\n                 \" the power supply to the Storage Processor (SP).\"],\n    \"14:60133\": [\"CRITICAL\", \"ALRT_BBU_REPLACE\",\n                 \"The battery has faulted and needs to be replaced.\"],\n    \"14:60193\": [\"CRITICAL\", \"ALRT_DAE_FAULT\",\n                 \"A Disk Array Enclosure (DAE) has faulted. This may have \"\n                 \"occurred because of a faulted subcomponent. Identify and fix\"\n                 \" the issue with the subcomponent. If the problem persists,\"\n                 \" contact your service provider.\"],\n    \"14:60194\": [\"CRITICAL\", \"ALRT_LCC_FAULT\",\n                 \"A link control card (LCC) in your Disk Array Enclosure has\"\n                 \" faulted and needs to be replaced.\"],\n    \"14:60195\": [\"ERROR\", \"ALRT_DAE_MISCABLED\",\n                 \"A Disk Array Enclosure (DAE) has not been connected \"\n                 \"correctly. This may be a temporary issue because the DAE is\"\n                 \" getting connected or a Link Control Card (LCC) is getting \"\n                 \"inserted in the DAE. If the issue persists, contact your \"\n                 \"service provider.\"],\n    \"14:60196\": [\"CRITICAL\", \"ALRT_DAE_MISCONFIGURED\",\n                 \"Either the cables or the ID of a Disk Array Enclosure (DAE)\"\n                 \" have been misconfigured.\"],\n    \"14:60197\": [\"CRITICAL\", \"ALRT_DAE_MISSING\",\n                 \"A Disk Array Enclosure (DAE) has been removed and needs to \"\n                 \"be reinstalled.\"],\n    \"14:60198\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:6019c\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:6019e\": [\"CRITICAL\", \"ALRT_DAE_TOO_MANY\",\n                 \"The number of Disk Array Enclosures (DAEs) added has \"\n                 \"exceeded the maximum allowed. Remove the newly attached\"\n                 \" DAE.\"],\n    \"14:6019f\": [\"CRITICAL\", \"ALRT_DAE_UNSUPPORTED\",\n                 \"An unsupported Disk Array Enclosure (DAE) has been detected.\"\n                 \" Replace the DAE with one that the system supports.\"],\n    \"14:601a0\": [\"CRITICAL\", \"ALRT_DAE_CROSSCABLED\",\n                 \"The Disk Array Enclosure (DAE) has been cabled incorrectly.\"\n                 \" Ensure that the DAE is cabled correctly.\"],\n    \"14:601a1\": [\"WARNING\", \"ALRT_DAE_TEMPERATURE_WARNING\",\n                 \"The Disk Array Enclosure (DAE) temperature has reached the \"\n                 \"warning threshold. This may lead to the DAE shutting down.\"\n                 \" Check the hardware, environmental temperature, system logs,\"\n                 \" and other alerts to identify and fix the issue. If the \"\n                 \"problem persists, contact your service provider.\"],\n    \"14:601a2\": [\"ERROR\", \"ALRT_DAE_TEMPERATURE_FAULT\",\n                 \"The Disk Array Enclosure (DAE) temperature has reached the\"\n                 \" failure threshold. The DAE will shut down shortly. Check \"\n                 \"the hardware, environmental temperature, system logs, and \"\n                 \"other alerts to identify and fix the issue. If the problem \"\n                 \"persists, contact your service provider.\"],\n    \"14:601a3\": [\"ERROR\", \"ALRT_DAE_FAULT_DRIVE_FAULT\",\n                 \"A Disk Array Enclosure (DAE) has faulted. This may have \"\n                 \"occurred because of a faulted disk. Identify and fix the\"\n                 \" issue with the disk. If the problem persists, contact your\"\n                 \" service provider.\"],\n    \"14:601a4\": [\"ERROR\", \"ALRT_DAE_FAULT_POWERSUPPLY_FAULT\",\n                 \"A Disk Array Enclosure (DAE) has faulted. This may have \"\n                 \"occurred because of a faulted power supply. Identify and fix\"\n                 \" the issue with the power supply. If the problem persists, \"\n                 \"contact your service provider.\"],\n    \"14:601a5\": [\"ERROR\", \"ALRT_DAE_FAULT_LCC_FAULT\",\n                 \"A Disk Array Enclosure (DAE) has faulted. This may have \"\n                 \"occurred because of a faulted Link Control Card. Identify\"\n                 \" and fix the issue with the Link Control Card. If the \"\n                 \"problem persists, contact your service provider.\"],\n    \"14:601a6\": [\"ERROR\", \"ALRT_DAE_FAN_FAULT\",\n                 \"The disk array enclosure (DAE) has faulted. This may have \"\n                 \"occurred because of a faulted cooling module. Identify and \"\n                 \"fix the issue with the cooling module. If the problem \"\n                 \"persists, contact your service provider.\"],\n    \"14:601a7\": [\"ERROR\", \"ALRT_DAE_NO_REASON_FAILURE\",\n                 \"This DAE fault led is on but no specific fault is detected,\"\n                 \" this could be a transient state. Please contact your \"\n                 \"service provider if the issue persists.\"],\n    \"14:601a8\": [\"CRITICAL\", \"ALRT_DAE_FAULT_LCC_FAULT\",\n                 \"The fault LED on the disk array enclosure (DAE) is on. This\"\n                 \" may have occurred because of an issue with the Link Control\"\n                 \" Card (LCC) cables connecting to the DAE. Replace LCC cables\"\n                 \" to the enclousre first. If it does not solve the problem, \"\n                 \"replace the LCC(s) in the enclosure.\"],\n    \"14:601a9\": [\"CRITICAL\", \"ALRT_DAE_FAULT\",\n                 \"The Disk Array Enclosure (DAE) has faulted. This may have \"\n                 \"occurred because of a faulted internal component. Power \"\n                 \"cycle the enclosure first. If it does not solve the problem,\"\n                 \" replace the enclosure.\"],\n    \"14:601f4\": [\"ERROR\", \"ALRT_PWR_SUPPLY_NO_POWER\",\n                 \"A power supply in the enclosure is not receiving power. \"\n                 \"Check the power cables to be sure that each power cable \"\n                 \"is plugged in to its power supply.\"],\n    \"14:601f5\": [\"CRITICAL\", \"ALRT_PWR_SUPPLY_FAULT\",\n                 \"A power supply in your system has faulted and needs to \"\n                 \"be replaced.\"],\n    \"14:601f6\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is\"\n                 \" required.\"],\n    \"14:601f7\": [\"ERROR\", \"ALRT_PWR_SUPPLY_GONE\",\n                 \"A power supply in your system has been removed and needs \"\n                 \"to be reinstalled.\"],\n    \"14:601f8\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this\"\n                 \" time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:601f9\": [\"CRITICAL\", \"ALRT_PWR_UNSUPPORTED\",\n                 \"A power supply on your system is not supported. Replace it \"\n                 \"with a supported one.\"],\n    \"14:601fa\": [\"CRITICAL\", \"ALRT_PWR_SHUTDOWM\",\n                 \"A power supply on your system has shut down. Check the power\"\n                 \" supply cable Connections.\"],\n    \"14:601fb\": [\"CRITICAL\", \"ALRT_PWR_SMBUS_ACCESS_FAULT\",\n                 \"A power supply on your system cannot be accessed. Try \"\n                 \"reseating the power supply. If the problem persists, you may\"\n                 \" need to replace your power supply.\"],\n    \"14:601fc\": [\"WARNING\", \"ALRT_PWR_THERMAL_FAULT\",\n                 \"A power supply is operating at a high temperature. The power\"\n                 \" supply may not be the source of the problem. Gather \"\n                 \"diagnostic materials and contact your service provider.\"],\n    \"14:601fd\": [\"CRITICAL\", \"ALRT_PWR_FW_UPG_FAIL\",\n                 \"Firmware upgrade for the power supply has failed. Contact \"\n                 \"your service provider.\"],\n    \"14:60258\": [\"CRITICAL\", \"ALRT_DISK_FAULT\",\n                 \"A disk in your system has faulted. Check that the disk is \"\n                 \"seated properly. If the problem persists, replace \"\n                 \"the disk.\"],\n    \"14:6025b\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:6025c\": [\"WARNING\", \"ALRT_DISK_REBUILD\",\n                 \"A disk is resynchronizing with the system, because  it has\"\n                 \" been replaced. System performance may be affected during \"\n                 \"resynchronization. Caution: Do not do anything with the disk\"\n                 \" until it has finished synchronizing.\"],\n    \"14:6025d\": [\"ERROR\", \"ALRT_DISK_REMOVED\",\n                 \"A disk in your system has been removed and needs to be \"\n                 \"reinstalled.\"],\n    \"14:6025e\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:6025f\": [\"CRITICAL\", \"ALRT_DISK_UNUSABLE\",\n                 \"A disk in your system is unusable and needs to be \"\n                 \"replaced.\"],\n    \"14:60261\": [\"ERROR\", \"ALRT_DISK_WRONG_SLOT\",\n                 \"A disk has been moved and inserted in the wrong slot. \"\n                 \"Reposition disk in the Correct slot.\"],\n    \"14:60262\": [\"WARNING\", \"ALRT_DISK_EXCEEDS_LIMIT\",\n                 \"The disk is unusable because the total number of disks \"\n                 \"configured has reached the system limit.\"],\n    \"14:60263\": [\"CRITICAL\", \"ALRT_DISK_NOT_SYMMETRIC\",\n                 \"A disk is unusable because a system Storage Processor (SP) \"\n                 \"cannot communicate with the disk. There are several \"\n                 \"possible causes for this problem. In Unisphere, go to the\"\n                 \" System Health page to locate the SPs. Verify that there is\"\n                 \" not an SP in service mode. Check to be sure a link control\"\n                 \" card (LCC) has not faulted. Then check to be sure the SAS\"\n                 \" cable is connected securely and is not damaged. Lastly, \"\n                 \"reseat the disk by removing and reinserting it. If the \"\n                 \"problem persists, you need to shut down and restart the \"\n                 \"system Click.\"],\n    \"14:60264\": [\"ERROR\", \"ALRT_DISK_SED_DISK\",\n                 \"Inserting a Self- Encrypting Drive (SED) into a system that\"\n                 \" does not support SED functionality is not allowed. Remove\"\n                 \" the drive and replace it with a non-self- encrypting\"\n                 \" drive.\"],\n    \"14:60265\": [\"ERROR\", \"ALRT_DISK_SED_ARRAY\",\n                 \"Inserting a drive that does not have self- encrypting \"\n                 \"functionality into a Self-Encrypting Drive (SED) system is\"\n                 \" not allowed. Remove the drive and replace it with an SED\"\n                 \" drive.\"],\n    \"14:60266\": [\"ERROR\", \"ALRT_DISK_LOCKED_FOREIGN_DISK\",\n                 \"The self-encrypting drive is locked and might have been \"\n                 \"inserted in the wrong array. Insert it in the correct \"\n                 \"array, or revert the drive to its factory default by \"\n                 \"running the svc_key_restore service script. For \"\n                 \"information, go to the EMC Online Support website, \"\n                 \"access the VNXe Product page, and search for VNXe \"\n                 \"Service Commands Technical Notes.\"],\n    \"14:60267\": [\"ERROR\", \"ALRT_DISK_LOCKED_CORRUPTED_KEY\",\n                 \"The authentication key is corrupted and the disk is \"\n                 \"locked. Please put the system in Service Mode, and run the\"\n                 \" svc_key_restore service script to restore the\"\n                 \" authentication key.\"],\n    \"14:60268\": [\"INFO\", \"ALRT_DISK_SLOT_EMPTY\",\n                 \"A disk in your system has been removed. the slot is empty.\"],\n    \"14:60276\": [\"ERROR\", \"ALRT_VVNX_VDISK_OFFLINE\",\n                 \"The virtual disk is not currently attached to the storage\"\n                 \" system. Resolve any connectivity or VMware configuration \"\n                 \"issues, and then try attaching the disk to the VM.\"],\n    \"14:60277\": [\"ERROR\", \"ALRT_VVNX_VDISK_ERROR\",\n                 \"This virtual disk failed due to a system or I/O error. Check\"\n                 \" the system logs and other alerts to identify the issue. \"\n                 \"Check the VM configuration and virtual environment.\"],\n    \"14:60278\": [\"INFO\", \"ALRT_VVNX_VDISK_WRONG_SYSTEM\",\n                 \"This virtual disk is accessible, but was originally \"\n                 \"configured for a different storage system. You can choose to\"\n                 \" reconfigure the disk or continue using it. Using the disk \"\n                 \"will overwrite the existing disk configuration and data.\"],\n    \"14:60279\": [\"INFO\", \"ALRT_VVNX_VDISK_WRONG_POOL\",\n                 \"This virtual disk is working and accessible, but an existing\"\n                 \" pool configuration has been detected on it. Adding the \"\n                 \"virtual  disk to a new pool will delete all data from the \"\n                 \"previous Configuration.\"],\n    \"14:6027a\": [\"ERROR\", \"ALRT_VVNX_VDISK_TOO_SMALL\",\n                 \"The virtual disk is too small. Detach it from the VM running\"\n                 \" UnityVSA, and attach a larger virtual disk. See the Alerts\"\n                 \" page for the minimum virtual disk size allowed.\"],\n    \"14:6027b\": [\"ERROR\", \"ALRT_VVNX_VDISK_TOO_LARGE\",\n                 \"The virtual disk is too large. Detach it from the VM running\"\n                 \" UnityVSA, and attach a smaller virtual disk.\"],\n    \"14:6027c\": [\"WARNING\", \"ALRT_DISK_EOL\",\n                 \"This disk is reaching the end of its service life and needs\"\n                 \" to be replaced\"],\n    \"14:6027d\": [\"WARNING\", \"ALRT_VVNX_SPA_VDISK_NOT_REACHABLE\",\n                 \"The Storage Processor SP A cannot reach one of the virtual\"\n                 \" disks. The virtual disk is degraded. Check the VM \"\n                 \"configuration and virtual environment.\"],\n    \"14:6027e\": [\"WARNING\", \"ALRT_VVNX_SPB_VDISK_NOT_REACHABLE\",\n                 \"The Storage Processor SP B cannot reach one of the virtual \"\n                 \"disks. The virtual disk is degraded. Check the VM \"\n                 \"configuration and virtual environment.\"],\n    \"14:6027f\": [\"ERROR\", \"ALRT_VDISK_CLONED\",\n                 \"The virtual disk was cloned from another virtual disk. The \"\n                 \"system prevents the use of cloned disks to avoid corrupting\"\n                 \" potentially usable data.\"],\n    \"14:60280\": [\"INFO\", \"ALRT_DISK_EOL_IN_180_DAYS\",\n                 \"Drive is predicted to wear out in less than 180 days. If the\"\n                 \" drive is a provisioned drive and there is a spare drive\"\n                 \" available, the storage system will automatically replace\"\n                 \" it with no data loss when it reaches end- of-life.\"],\n    \"14:60281\": [\"INFO\", \"ALRT_DISK_EOL_IN_90_DAYS\",\n                 \"Drive is predicted to wear out in less than 90 days. If the\"\n                 \" drive is a provisioned drive and there is a spare drive \"\n                 \"available, the storage system will automatically replace it\"\n                 \" with no data loss when it reaches end- of-life.\"],\n    \"14:60282\": [\"WARNING\", \"ALRT_DISK_EOL_IN_30_DAYS\",\n                 \"Drive is predicted to wear out in less than 30 days. If the\"\n                 \" drive is provisioned and there is a spare drive available,\"\n                 \" the storage system will automatically replace the drive \"\n                 \"with no data loss when it reaches end-of-life. If the drive\"\n                 \" is unprovisioned, you should replace it.\"],\n    \"14:602bc\": [\"CRITICAL\", \"ALRT_LCC_FAULT\",\n                 \"A link control card (LCC) in your Disk Array Enclosure has \"\n                 \"faulted and needs to be replaced.\"],\n    \"14:602bd\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:602be\": [\"CRITICAL\", \"ALRT_LCC_REMOVED\",\n                 \"A link control card (LCC) in your Disk Array Enclosure has\"\n                 \" been removed and needs to be reinstalled.\"],\n    \"14:602bf\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:602c0\": [\"CRITICAL\", \"ALRT_LCC_SHUNTED\",\n                 \"The port in the Link Control Card (LCC) within the Disk\"\n                 \" Array Enclosure (DAE) is disabled. Verify the cabling \"\n                 \"exists. If the problem persists, you may need to replace\"\n                 \" the LCC.\"],\n    \"14:602c3\": [\"CRITICAL\", \"ALRT_LCC_UPG_FAIL\",\n                 \"Firmware upgrade for the link control card (LCC) has failed.\"\n                 \" Contact your service provider.\"],\n    \"14:602c4\": [\"ERROR\", \"ALRT_LCC_CONNECTION_FAULT\",\n                 \"The Link Control Card (LCC) has a connection fault. It may\"\n                 \" have occurred because of a faulted drive, cable, or the \"\n                 \"LCC itself. Replace any faulted disks to see whether the \"\n                 \"fault clears. If the problem persists, contact your service\"\n                 \" provider.\"],\n    \"14:60326\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is\"\n                 \" required.\"],\n    \"14:60327\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:60328\": [\"WARNING\", \"ALRT_POOL_USER_THRESH\",\n                 \"This storage pool has exceeded the capacity threshold you \"\n                 \"specified. To allocate more storage space, add additional \"\n                 \"disks to your system.\"],\n    \"14:60329\": [\"CRITICAL\", \"ALRT_POOL_SYS_DISKS_FAILED\",\n                 \"Depending on the type of disks your system uses, the loss\"\n                 \" of more two or more disks may result in data loss. If \"\n                 \"multiple faulted disk drives are in Disk Processor Enclosure\"\n                 \" (DPE) slots 0-3, contact your service provider for \"\n                 \"assistance with replacement of these system disks.\"],\n    \"14:6032a\": [\"CRITICAL\", \"ALRT_POOL_NEED_RECOVER\",\n                 \"The pool is offline and requires recovery. Contact your \"\n                 \"service provider.\"],\n    \"14:6032b\": [\"CRITICAL\", \"ALRT_POOL_OFFLINE\",\n                 \"The pool is offline. Contact your service provider.\"],\n    \"14:6032c\": [\"ERROR\", \"ALRT_POOL_BAD_VOL\",\n                 \"The pool is unavailable or may have a data inconsistency. \"\n                 \"Try rebooting the storage system. If the problem persists, \"\n                 \"contact your service provider.\"],\n    \"14:6032d\": [\"WARNING\", \"ALRT_POOL_FAULT\",\n                 \"The pool performance is degraded. Check the storage system \"\n                 \"for hardware faults. Contact your service provider.\"],\n    \"14:6032e\": [\"WARNING\", \"ALRT_POOL_BAD_VOL\",\n                 \"The pool is unavailable or may have a data inconsistency. \"\n                 \"Try rebooting the storage system. If the problem persists, \"\n                 \"contact your service provider.\"],\n    \"14:6032f\": [\"WARNING\", \"ALRT_POOL_SPACE_HARVEST_FAIL\",\n                 \"Auto-delete ran into an internal error. The system will make\"\n                 \" another attempt later. If the problem persists, contact \"\n                 \"your service provider.\"],\n    \"14:60330\": [\"WARNING\", \"ALRT_POOL_SPACE_HARVEST_LWM\",\n                 \"Storage pool could not reach the pool- used-space low \"\n                 \"threshold. To address this issue, follow the suggestions in\"\n                 \" the associated help topic.\"],\n    \"14:60331\": [\"WARNING\", \"ALRT_POOL_SPACE_HARVEST_HWM\",\n                 \"Automatic snapshot deletion paused, because the storage pool\"\n                 \" could not reach the pool-used-space high threshold. To \"\n                 \"address this issue, follow the suggestions in the\"\n                 \" associated help topic.\"],\n    \"14:60332\": [\"WARNING\", \"ALRT_POOL_SNAP_HARVEST_FAIL\",\n                 \"Auto-delete ran into an internal error. The system will make\"\n                 \" another attempt later. If the problem persists, contact\"\n                 \" your service provider.\"],\n    \"14:60333\": [\"WARNING\", \"ALRT_POOL_SNAP_HARVEST_LWM\",\n                 \"Storage pool could not reach the snapshot-used-space low \"\n                 \"threshold. To address this issue, follow the suggestions in\"\n                 \" the associated help topic.\"],\n    \"14:60334\": [\"WARNING\", \"ALRT_POOL_SNAP_HARVEST_HWM\",\n                 \"Automatic snapshot deletion paused, because the storage pool\"\n                 \" could not reach the snapshot-used- space high threshold. To\"\n                 \" address this issue, follow the suggestions in the\"\n                 \" associated help topic.\"],\n    \"14:60335\": [\"WARNING\", \"ALRT_POOL_SYSTEM_THRESH\",\n                 \"This storage pool has exceeded the system capacity\"\n                 \" threshold. To allocate more storage space, add additional \"\n                 \"disks to your system.\"],\n    \"14:60336\": [\"ERROR\", \"ALRT_POOL_CRITICAL_THRESH\",\n                 \"This storage pool exceeds the critical capacity threshold. \"\n                 \"Thin-provisioned resources may suffer data loss or become \"\n                 \"unavailable when the pool reaches full capacity. Snapshots\"\n                 \" may become invalid and replication sessions may stop \"\n                 \"synchronizing for storage resources provisioned in this \"\n                 \"pool. To allocate more storage space, add more disks to \"\n                 \"your system.\"],\n    \"14:60337\": [\"INFO\", \"ALRT_POOL_SPACE_HARVEST_RUNNING\",\n                 \"Auto-delete of snapshots has been initiated because the \"\n                 \"pool space consumption exceeded the high threshold. If\"\n                 \" automatic snapshot deletion was not expected, you can \"\n                 \"modify the pool properties to disable the feature. Add more\"\n                 \" disks to the pool or increase the automatic deletion \"\n                 \"threshold.\"],\n    \"14:60338\": [\"INFO\", \"ALRT_POOL_SNAP_HARVEST_RUNNING\",\n                 \"Auto-delete initiated as the snap consumption exceeded the\"\n                 \" high threshold. If automatic snapshot deletion was not \"\n                 \"expected, you can modify the pool properties to disable the \"\n                 \"feature. Add more disks to the pool or increase the\"\n                 \" automatic deletion threshold.\"],\n    \"14:60339\": [\"WARNING\", \"ALRT_POOL_NEED_RECOVER_LATER\",\n                 \"The storage pool is degraded and requires recovery. This is\"\n                 \" not an urgent issue. Contact your service provider and \"\n                 \"schedule downtime to perform the pool recovery procedure.\"],\n    \"14:6033a\": [\"WARNING\", \"ALRT_POOL_INSUFFICIENT_FLASH_FOR_STORAGE\",\n                 \"The pool is not performing optimally, because it does not\"\n                 \" have Flash storage. Add Flash drives to the pool. See the\"\n                 \" Best Practices for Peformance and Availability document, \"\n                 \"available at http://bit.ly/unityinfo hub, for \"\n                 \"recommendations on configuring pools.\"],\n    \"14:6033b\": [\"WARNING\", \"ALRT_POOL_ADDITIONAL_FLASH_NEEDED_FOR_STORAGE\",\n                 \"The pool is not performing optimally due to insufficient \"\n                 \"Flash storage. Add Flash drives to the pool. See the Best\"\n                 \" Practices for Peformance and Availability document, \"\n                 \"available at http://bit.ly/unityinfo hub, for \"\n                 \"recommendations on configuring pools.\"],\n    \"14:6033c\": [\"INFO\", \"ALRT_POOL_DISK_EOL_WARNING\",\n                 \"Pool has one or more drives predicted to wear out in less \"\n                 \"than 180 days. The storage system will automatically replace\"\n                 \" the affected drives with no data loss when they reach \"\n                 \"end- of-life.\"],\n    \"14:6033d\": [\"INFO\", \"ALRT_POOL_DISK_EOL_WARNING_RANGE\",\n                 \"Pool has one or more drives predicted to wear out in less\"\n                 \" than 180 days. The storage system will automatically \"\n                 \"replace the affected drives with no data loss when they\"\n                 \" reach end- of-life.\"],\n    \"14:6033e\": [\"INFO\", \"ALRT_POOL_DISK_EOL_SEVERE\",\n                 \"Pool has one or more drives predicted to wear out in less\"\n                 \" than 90 days. The storage system will automatically replace\"\n                 \" the affected drives with no data loss when they reach\"\n                 \" end- of-life.\"],\n    \"14:6033f\": [\"INFO\", \"ALRT_POOL_DISK_EOL_SEVERE_RANGE\",\n                 \"Pool has one or more drives predicted to wear out in less\"\n                 \" than 90 days. The storage system will automatically replace\"\n                 \" the affected drives with no data loss when they reach\"\n                 \" end- of-life.\"],\n    \"14:60340\": [\"WARNING\", \"ALRT_POOL_DISK_EOL_CRITICAL\",\n                 \"Pool has one or more drives predicted to wear out in less \"\n                 \"than 30 days. If there are spare drives available, the\"\n                 \" storage system will automatically replace the affected\"\n                 \" drives with no data loss when they reach end-of-life.\"],\n    \"14:60341\": [\"WARNING\", \"ALRT_POOL_DISK_EOL_CRITICAL_RANGE\",\n                 \"Pool has one or more drives predicted to wear out in less \"\n                 \"than 30 days. If there are spare drives available, the \"\n                 \"storage system will automatically replace the affected \"\n                 \"drives with no data loss when they reach end-of-life.\"],\n    \"14:60342\": [\"CRITICAL\", \"ALRT_POOL_DISK_PACO_START_FAIL\",\n                 \"The system could not start an automatic copy of data from \"\n                 \"one or more drives in the pool to replace a  drive that is\"\n                 \" wearing out, because spare drives are not available. Add\"\n                 \" drives to the pool.\"],\n    \"14:60343\": [\"WARNING\", \"ALRT_POOL_REBUILD\",\n                 \"A storage pool is rebuilding, because it lost a drive. \"\n                 \"System performance may be affected during the rebuilding.\"\n                 \" Caution: Do not access the pool until it has finished\"\n                 \" rebuilding.\"],\n    \"14:60344\": [\"INFO\", \"ALRT_POOL_FINISH_REBUILD\",\n                 \"A storage pool has finished rebuilding and is operating\"\n                 \" normally. No action is required.\"],\n    \"14:60345\": [\"INFO\", \"ALRT_LOW_SPARE_CAPACITY_STORAGE_POOL\",\n                 \"A storage pool does not have enough spare space. Caution: \"\n                 \"Suggest replacing the failed drive.\"],\n    \"14:60346\": [\"WARNING\", \"ALRT_INSUFFICIENT_SPARE_CAPACITY_STORAGE_POOL\",\n                 \"A storage pool does not have enough spare space and becomes\"\n                 \" degraded. Caution: Suggest replacing the failed drive.\"],\n    \"14:60347\": [\"INFO\", \"ALRT_OK_SPARE_CAPACITY_STORAGE_POOL\",\n                 \"The required amount of spare storage space for the pool has\"\n                 \" been restored. No action is required.\"],\n    \"14:60348\": [\"CRITICAL\", \"ALRT_POOL_DRIVE_EOL_AUTO_PACO_FAILED\",\n                 \"The pool has one or more drives that have reached \"\n                 \"end-of-life threshold and the system failed to automatically\"\n                 \" start procative copying of these drives, even though there\"\n                 \" were spare drives available. Please contact your service \"\n                 \"provider.\"],\n    \"14:60349\": [\"WARNING\", \"ALRT_POOL_DRIVE_EOL_IN_60_DAYS\",\n                 \"The pool has Flash drives of a specific type that are \"\n                 \"predicted to exceed end-of-life thresholds within 60 days. \"\n                 \"The storage system does not have enough free drives of the \"\n                 \"same type to replace them. Add the required drives to the\"\n                 \" pool.\"],\n    \"14:60388\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:60389\": [\"ERROR\", \"ALRT_NAS_FILESERVER_OFFLINE\",\n                 \"The NAS server is not accessible and its services are not \"\n                 \"available. The file system may be temporarily offline. \"\n                 \"Please contact your service provider.\"],\n    \"14:6038b\": [\"ERROR\", \"ALRT_NAS_FILESERVER_FAULTED\",\n                 \"The NAS server is faulted, possibly due to an internal \"\n                 \"error. Please contact your service provider.\"],\n    \"14:6038c\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:6038d\": [\"ERROR\", \"ALRT_NAS_CIFSSERVER_UNJOINED\",\n                 \"The SMB server is no longer joined to the domain. Check \"\n                 \"the network interface and domain settings of the NAS \"\n                 \"server and try to add the SMB server into the domain \"\n                 \"again.\"],\n    \"14:6038e\": [\"ERROR\", \"ALRT_NAS_CIFSSERVER_TIMENOTSYNC\",\n                 \"The current system time is not synchronized with the Active\"\n                 \" Directory controller of the domain. Check the system NTP \"\n                 \"(Network Time Protocol) settings to ensure the your system's\"\n                 \" time is synchronized with the time of the Active Directory \"\n                 \"controller.\"],\n    \"14:6038f\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:60390\": [\"WARNING\", \"ALRT_NAS_FILEINTERFACE_GATEWAY_UNREACHABLE\",\n                 \"The network interface gateway of the NAS server is\"\n                 \" unreachable. Review the NAS server network interface \"\n                 \"settings. If the problem persists and the NAS server \"\n                 \"network interface settings are correct, review your network\"\n                 \" environment.\"],\n    \"14:60391\": [\"ERROR\", \"ALRT_NAS_FILEINTERFACE_DUPLICATED_ADDRESS\",\n                 \"The network interface IP address of the NAS server conflicts\"\n                 \" with another host on the same subnet. Review the NAS server\"\n                 \" network interface settings for potential conflicts. If the \"\n                 \"problem persists and the NAS server network interface \"\n                 \"settings are correct, review your network environment.\"],\n    \"14:60392\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:60393\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this\"\n                 \" time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:60394\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:60396\": [\"ERROR\", \"ALRT_NAS_FILELDAPSERVER_OFFLINE\",\n                 \"The LDAP client configured for the NAS server is offline. \"\n                 \"Try resetting the settings of the LDAP client.\"],\n    \"14:60398\": [\"WARNING\", \"ALRT_NAS_FILELDAPSERVER_FAULTED\",\n                 \"The LDAP client configured for the NAS server has faulted.\"\n                 \" Contact your service provider.\"],\n    \"14:6039b\": [\"WARNING\", \"ALRT_NAS_CIFSSERVER_FAULTED\",\n                 \"The SMB server configured for the NAS server has faulted.\"\n                 \" Contact your service provider.\"],\n    \"14:603a0\": [\"ERROR\", \"ALRT_NAS_CIFSSERVER_OFFLINE\",\n                 \"The SMB server configured for the NAS server is offline. \"\n                 \"Try deleting and recreating it.\"],\n    \"14:603a2\": [\"ERROR\", \"ALRT_NAS_FILENISSERVER_OFFLINE\",\n                 \"The NIS client configured for the NAS server is offline.\"\n                 \" Try to reset settings of the NIS client.\"],\n    \"14:603a4\": [\"WARNING\", \"ALRT_NAS_FILENISSERVER_FAULTED\",\n                 \"The NIS client configured for the NAS server has faulted. \"\n                 \"Contact your service provider.\"],\n    \"14:603a7\": [\"ERROR\", \"ALRT_NAS_NFSSERVER_OFFLINE\",\n                 \"The NFS server configured for the NAS server is offline. \"\n                 \"Try disabling the NFS server, and enabling it again.\"],\n    \"14:603a9\": [\"WARNING\", \"ALRT_NAS_NFSSERVER_FAULTED\",\n                 \"The NFS server configured for the NAS server has faulted. \"\n                 \"Contact your service provider.\"],\n    \"14:603ab\": [\"WARNING\", \"ALRT_NAS_CIFSSERVER_ALL_DC_DOWN\",\n                 \"Domain controller servers configured for the SMB server are\"\n                 \" not reachable. Check network connectivity. Ensure that at \"\n                 \"least one domain controller is running and the storage \"\n                 \"system can access it.\"],\n    \"14:603ac\": [\"INFO\", \"ALRT_NAS_CIFSSERVER_SOME_DC_DOWN\",\n                 \"Some domain controller servers configured for the SMB server\"\n                 \" are not reachable.\"],\n    \"14:603ad\": [\"WARNING\", \"ALRT_NAS_FILELDAPSERVER_NOT_CONNECTED\",\n                 \"None of the LDAP servers configured for LDAP client of the\"\n                 \" NAS server are reachable. Check network connectivity. \"\n                 \"Ensure at least one LDAP server is available and the storage\"\n                 \" system can access it.\"],\n    \"14:603ae\": [\"INFO\", \"ALRT_NAS_FILELDAPSERVER_SOME_SERVERS_DOWN\",\n                 \"One or more LDAP servers configured for the LDAP client of \"\n                 \"the NAS server are not reachable.\"],\n    \"14:603af\": [\"WARNING\", \"ALRT_NAS_FILENISSERVER_WRONG_DOMAIN\",\n                 \"The domain configured for the NIS client of the NAS server \"\n                 \"is not valid. Please modify the domain name for the NIS\"\n                 \" client of the NAS server.\"],\n    \"14:603b0\": [\"INFO\", \"ALRT_NAS_FILENISSERVER_SOME_SERVERS_DOWN\",\n                 \"One or more NIS servers configured for the NIS client of the\"\n                 \" NAS server are not reachable.\"],\n    \"14:603b1\": [\"WARNING\", \"ALRT_NAS_FILENISSERVER_NOT_CONNECTED\",\n                 \"None of the NIS servers configured for the NIS client of the\"\n                 \" NAS server are reachable. Check network connectivity. \"\n                 \"Ensure that at least one NIS server is running and the \"\n                 \"storage system can access it.\"],\n    \"14:603b3\": [\"ERROR\", \"ALRT_NAS_FILEDNSSERVER_OFFLINE\",\n                 \"The DNS client configured for the NAS server is offline. \"\n                 \"Try removing the DNS settings, and then configure the DNS \"\n                 \"client settings on the NAS server again.\"],\n    \"14:603b4\": [\"INFO\", \"ALRT_NAS_FILEDNSSERVER_UNDER_CONSTRUCTION\",\n                 \"The DNS client is initializing.\"],\n    \"14:603b5\": [\"WARNING\", \"ALRT_NAS_FILEDNSSERVER_FAULTED\",\n                 \"The DNS client configured for the NAS server has faulted. \"\n                 \"Contact your service provider.\"],\n    \"14:603b7\": [\"INFO\", \"ALRT_NAS_FILEDNSSERVER_SOME_SERVERS_DOWN\",\n                 \"Some DNS servers configured for the DNS client of the NAS \"\n                 \"server are not reachable.\"],\n    \"14:603b8\": [\"WARNING\", \"ALRT_NAS_FILEDNSSERVER_NOT_CONNECTED\",\n                 \"DNS servers configured for the NIS client of the NAS server \"\n                 \"are not reachable. Check network connectivity. Ensure that \"\n                 \"at least one DNS server is running and the storage system \"\n                 \"can access it.\"],\n    \"14:603b9\": [\"WARNING\", \"ALRT_NAS_FILEINTERFACE_NO_SOURCE\",\n                 \"The file interface was deleted from the replication source\"\n                 \" NAS server, but it still exists on the replication \"\n                 \"destination NAS server. Manually remove the file interface \"\n                 \"from the destination NAS server. If this does not help, \"\n                 \"restart management services on the destination storage \"\n                 \"system.\"],\n    \"14:603ba\": [\"WARNING\", \"ALRT_NAS_FILELDAPSERVER_NO_SOURCE\",\n                 \"LDAP settings were deleted from the replication source NAS \"\n                 \"server, but they still exist on the replication destination\"\n                 \" NAS server. Manually remove LDAP settings from the \"\n                 \"destination NAS server. If this does not help, restart \"\n                 \"management services on the destination storage system.\"],\n    \"14:603bb\": [\"WARNING\", \"ALRT_NAS_FILENISSERVER_NO_SOURCE\",\n                 \"NIS settings were deleted from the replication source NAS \"\n                 \"server, but they still exist on the replication destination\"\n                 \" NAS server. Manually remove NIS settings from the \"\n                 \"destination NAS server. If this does not help, restart\"\n                 \" management services on the destination storage system.\"],\n    \"14:603bc\": [\"WARNING\", \"ALRT_NAS_FILEDNSSERVER_NO_SOURCE\",\n                 \"DNS settings were deleted from the replication source NAS \"\n                 \"server, but they still exist on the replication destination\"\n                 \" NAS server. Manually remove DNS settings from the \"\n                 \"destination NAS server. If this does not help, restart \"\n                 \"management services on the destination storage system.\"],\n    \"14:603bd\": [\"WARNING\", \"ALRT_NAS_FILEINTERFACE_OFFLINE\",\n                 \"The NAS server file interface is offline. Contact your \"\n                 \"service provider.\"],\n    \"14:603be\": [\"WARNING\", \"ALRT_NAS_FILELDAPSERVER_BADLY_CONFIGURED\",\n                 \"LDAP client on the NAS server is configured incorrectly. \"\n                 \"Verify the provided LDAP schema, LDAP client account \"\n                 \"settings, Bind Distinguished Name, and password. Check the \"\n                 \"access permissions of the LDAP client account for the \"\n                 \"configured LDAP servers.\"],\n    \"14:603bf\": [\"WARNING\",\n                 \"ALRT_NAS_FILELDAPSERVER_INAPPROPRIATE_AUTHENTICATION\",\n                 \"The LDAP client attempted to perform a type of \"\n                 \"authentication that is not allowed for the target user. \"\n                 \"This may also indicate that the client attempted to perform \"\n                 \"anonymous authentication when that is not allowed. Verify \"\n                 \"the authorization settings for the LDAP client account.\"],\n    \"14:603c0\": [\"WARNING\", \"ALRT_NAS_FILELDAPSERVER_INVALID_CREDENTIALS\",\n                 \"The LDAP client attempted to bind as a user that either does\"\n                 \" not exist, not allowed to bind, or the credentials are \"\n                 \"invalid. Verify LDAP client Bind Distinguished Name and \"\n                 \"Password, and permissions for this account.\"],\n    \"14:603c1\": [\"WARNING\", \"ALRT_NAS_FILELDAPSERVER_INSUFFICIENT_PERMISSIONS\",\n                 \"The LDAP client does not have permission to perform the \"\n                 \"requested operation. Verify authorization settings for the \"\n                 \"LDAP client account.\"],\n    \"14:603c2\": [\"WARNING\", \"ALRT_NAS_FILEINTERFACE_NO_DEVICE\",\n                 \"The system is unable to detect an Ethernet port or link \"\n                 \"aggregation on which the NAS server network interface was \"\n                 \"configured. Switch the interface to use another Ethernet \"\n                 \"port or link aggregation. If this does not help, restart the\"\n                 \" management software. If the problem persists, contact your\"\n                 \" service provider.\"],\n    \"14:603ca\": [\"ERROR\", \"ALRT_NAS_CEPP_FAULTED\",\n                 \"The CEPA server configured for the specified NAS server is\"\n                 \" not functional. Verify that the CEPA settings are valid.\"],\n    \"14:603cc\": [\"ERROR\", \"ALRT_NAS_CEPP_NOT_CONNECTED\",\n                 \"All servers configured for the CEPA server of the specified\"\n                 \" NAS server cannot be reached. Verify: 1) That the network\"\n                 \" addresses of the CEPA servers are valid. 2) That the \"\n                 \"network is available and that the CEPA facility is running \"\n                 \"on the CEPA server. 3) The network integrity between the \"\n                 \"storage system and the CEPA server.\"],\n    \"14:603cd\": [\"WARNING\", \"ALRT_NAS_CEPP_SOME_SERVERS_DOWN\",\n                 \"Some servers configured for the CEPA server of the \"\n                 \"specified NAS server cannot be reached. Verify: 1) That the\"\n                 \" network addresses of the CEPA servers are valid. 2) That \"\n                 \"the network is available and that the CEPA facility is \"\n                 \"running on the CEPA server. 3) The network integrity between\"\n                 \" the storage system and the CEPA server.\"],\n    \"14:603d3\": [\"WARNING\", \"ALRT_NAS_FILESERVER_KEYTAB_IS_NOT_UPLOADED\",\n                 \"Secure NFS is not working. Upload a keytab file to the \"\n                 \"specified NAS server.\"],\n    \"14:603e8\": [\"CRITICAL\", \"ALRT_SLIC_FAULT\",\n                 \"An I/O module in your Disk Processor Enclosure has faulted\"\n                 \" and needs to be replaced.\"],\n    \"14:603ea\": [\"ERROR\", \"ALRT_SLIC_MISMATCH\",\n                 \"The I/O modules in the Storage Processors (SP) are \"\n                 \"configured incorrectly. I/O modules must be configured \"\n                 \"symmetrically.\"],\n    \"14:603eb\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:603ed\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:603ee\": [\"CRITICAL\", \"ALRT_SLIC_UNSUPPORT\",\n                 \"An I/O module in your Disk Processor Enclosure is the wrong\"\n                 \" model type. Replace it with a supported model.\"],\n    \"14:603ef\": [\"WARNING\", \"ALRT_SLIC_UNCONFIG\",\n                 \"This I/O module has been inserted into one of your Storage\"\n                 \" Processors (SP) but has not yet been configured. Commit\"\n                 \" the I/O module.\"],\n    \"14:603f3\": [\"WARNING\", \"ALRT_SLIC_MISSING\",\n                 \"A previously configured I/O module is missing. Reboot the\"\n                 \" Storage Processors (SP) and then reseat the I/O module.\"],\n    \"14:603f4\": [\"CRITICAL\", \"ALRT_SLIC_UNINITIALIZED\",\n                 \"The inserted I/O module has not been initialized and cannot\"\n                 \" be used. Wait for the system to load drivers that \"\n                 \"initialize the I/O module.\"],\n    \"14:603f5\": [\"INFO\", \"ALRT_SLIC_EMPTY\",\n                 \"The Disk Processor Enclosure (DPE) contains a vacant I/O\"\n                 \" module slot.\"],\n    \"14:603f6\": [\"ERROR\", \"ALRT_SLIC_INCORRECT\",\n                 \"An incorrect type of I/O module has been inserted. The ports\"\n                 \" in this slot have been configured for a different type of\"\n                 \" I/O module. Replace it with a supported I/O module.\"],\n    \"14:603f7\": [\"CRITICAL\", \"ALRT_SLIC_POWEROFF\",\n                 \"The I/O module is powered off. Try rebooting the Storage\"\n                 \" Processor (SP). If the I/O module remains powered off after\"\n                 \" a reboot, you may need to replace the I/O module.\"],\n    \"14:603f8\": [\"CRITICAL\", \"ALRT_SLIC_POWERUPFAILED\",\n                 \"The system was unable to power on this I/O module. Replace\"\n                 \" the I/O module.\"],\n    \"14:603f9\": [\"CRITICAL\", \"ALRT_SLIC_NOTCOMMITTED\",\n                 \"This I/O module will remain unsupported until the current\"\n                 \" software version is committed. Reboot the system to commit\"\n                 \" the software version.\"],\n    \"14:6044c\": [\"CRITICAL\", \"ALRT_SP_FAULT\",\n                 \"The Storage Processor (SP) has faulted. Try rebooting the\"\n                 \" SP. If the fault persists or occurs repeatedly, the SP\"\n                 \" needs to be replaced.\"],\n    \"14:6044d\": [\"CRITICAL\", \"ALRT_SP_MISSING\",\n                 \"A Storage Processor is missing and needs to be reinstalled\"],\n    \"14:6044e\": [\"WARNING\", \"ALRT_SP_CACHE\",\n                 \"The write cache on the Storage Processor (SP) is \"\n                 \"temporarily disabled. An SP may be in service mode or \"\n                 \"there may be problem with a hardware component. Check \"\n                 \"related alerts and fix the underlying problems. When the\"\n                 \" problem is fixed, the write cache is automatically re-\"\n                 \"enabled.\"],\n    \"14:6044f\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is\"\n                 \" required.\"],\n    \"14:60450\": [\"CRITICAL\", \"ALRT_SP_PROBLEM\",\n                 \"An issue has occurred with the system software on this\"\n                 \" Storage Processor (SP). Before you proceed, collect service\"\n                 \" information. Ensure that the cables are connected securely\"\n                 \" and not damaged, and then reboot the SP. If rebooting the \"\n                 \"SP does not resolve the issue, reimage the SP. If the\"\n                 \"problem still persists, contact your service provider.\"],\n    \"14:60451\": [\"CRITICAL\", \"ALRT_RESCUE_MODE\",\n                 \"You may have manually put the Storage Processor (SP) in \"\n                 \"Service Mode or the SP entered Service Mode due to some\"\n                 \" problem with the SP.\"],\n    \"14:60452\": [\"CRITICAL\", \"ALRT_SP_FAULT_BLADE\",\n                 \"The Storage Processor (SP) has faulted and needs to be \"\n                 \"replaced.\"],\n    \"14:60453\": [\"CRITICAL\", \"ALRT_SP_CABLE_WRONG_SAS\",\n                 \"The SAS port on this Storage Processor (SP) is cabled\"\n                 \" incorrectly. Ensure the SAS port is cabled correctly.\"],\n    \"14:60454\": [\"CRITICAL\", \"ALRT_SP_FAULT_CPU_DIMMS\",\n                 \"The CPU module and memory modules have faulted in this \"\n                 \"Storage Processor (SP). Power cycle the system.\"],\n    \"14:60455\": [\"CRITICAL\", \"ALRT_SP_FAULT_CPU\",\n                 \"The CPU module in this Storage Processor (SP) has faulted. \"\n                 \"You need to replace the SP.\"],\n    \"14:60456\": [\"CRITICAL\", \"ALRT_SP_FAULT_CPU_SLIC0\",\n                 \"The CPU module and I/O module 0 in this Storage Processor \"\n                 \"(SP) have faulted. Power cycle the system.\"],\n    \"14:60457\": [\"CRITICAL\", \"ALRT_SP_FAULT_CPU_SLIC1\",\n                 \"The CPU module and the I/O module 1 in this Storage \"\n                 \"Processor (SP) have faulted. Power cycle the system.\"],\n    \"14:6045a\": [\"CRITICAL\", \"ALRT_SP_FAULT_DIMM0_1\",\n                 \"Memory modules 0 and 1 in this Storage Processor (SP) have\"\n                 \" faulted and need to be replaced.\"],\n    \"14:6045b\": [\"CRITICAL\", \"ALRT_SP_FAULT_DIMM0\",\n                 \"Memory module 0 in this Storage Processor (SP) has faulted \"\n                 \"and needs to be replaced.\"],\n    \"14:6045c\": [\"CRITICAL\", \"ALRT_SP_FAULT_DIMM1\",\n                 \"Memory module 1 in this Storage Processor (SP) has faulted\"\n                 \" and needs to be replaced.\"],\n    \"14:6045d\": [\"CRITICAL\", \"ALRT_SP_FAULT_DIMM2\",\n                 \"Memory module 2 in this Storage Processor (SP) has faulted \"\n                 \"and needs to be replaced.\"],\n    \"14:6045e\": [\"CRITICAL\", \"ALRT_SP_FAULT_DIMMS\",\n                 \"Memory modules in this Storage Processor (SP) have faulted \"\n                 \"and need to be replaced.\"],\n    \"14:6045f\": [\"CRITICAL\", \"ALRT_SP_FAULT_ENCLOSURE\",\n                 \"A fault in the Disk Processor Enclosure (DPE) has placed \"\n                 \"the Storage Processor (SP) in Service Mode. Power cycle \"\n                 \"the system.\"],\n    \"14:60461\": [\"CRITICAL\", \"ALRT_SP_FLAREDB_DISK_FAULT\",\n                 \"A disk on this Storage Processor (SP) has faulted and needs \"\n                 \"to be replaced.\"],\n    \"14:60462\": [\"CRITICAL\", \"ALRT_SP_INVALID_DISK_CONFIG\",\n                 \"The disk configuration on this Storage Processor (SP) is\"\n                 \" unsupported.\"],\n    \"14:60465\": [\"CRITICAL\", \"ALRT_SP_NO_IO_WITH_LCC\",\n                 \"The Storage Processor (SP) is unable to communicate with the\"\n                 \" link control card (LCC) and has been put into Service Mode.\"\n                 \" You need to shut down the system. From Unisphere, click\"\n                 \" Settings  Service System, enter your service password and\"\n                 \" click Shut Down System.\"],\n    \"14:60466\": [\"CRITICAL\", \"ALRT_SP_NO_SAS_PORT\",\n                 \"Unable to detect the SAS port on this Storage Processor\"\n                 \" (SP).\"],\n    \"14:60467\": [\"CRITICAL\", \"ALRT_SP_FAULT_POST\",\n                 \"The Power-On Self Test (POST) failed to run on this Storage\"\n                 \" Processor (SP). You need to replace this SP.\"],\n    \"14:60469\": [\"CRITICAL\", \"ALRT_SP_FAULT_SLIC0\",\n                 \"I/O module 0 in this Storage Processor (SP) has faulted and\"\n                 \" needs to be replaced.\"],\n    \"14:6046a\": [\"CRITICAL\", \"ALRT_SP_FAULT_SLIC1\",\n                 \"I/O module 1 in the Storage Processor (SP) has faulted and \"\n                 \"needs to be replaced.\"],\n    \"14:6046b\": [\"CRITICAL\", \"ALRT_SP_FAULT_SSD\",\n                 \"A solid state disk (SSD) in this Storage Processor (SP) has\"\n                 \" faulted and needs to be replaced.\"],\n    \"14:6046c\": [\"CRITICAL\", \"ALRT_SP_UNEXPECTED\",\n                 \"An unexpected error has occurred with the Storage Processor\"\n                 \" (SP). Try rebooting the SP.\"],\n    \"14:6046f\": [\"WARNING\", \"ALRT_SP_START\",\n                 \"The Storage Processor (SP) is currently rebooting. Please\"\n                 \" wait for the SP to reboot.\"],\n    \"14:60470\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:60471\": [\"WARNING\", \"ALRT_SP_USER_SERVICE_MODE\",\n                 \"The Storage Processor (SP) has been manually put in to\"\n                 \" Service Mode.\"],\n    \"14:60472\": [\"CRITICAL\", \"ALRT_SP_IO_MISCONFIGURED\",\n                 \"This I/O module has been inserted into one of your Storage \"\n                 \"Processors (SPs) but is not yet configured. Reboot the SP \"\n                 \"and commit the I/O module.\"],\n    \"14:60473\": [\"ERROR\", \"ALRT_SP_HUNG\",\n                 \"The Storage Processor (SP) did not restart successfully.\"\n                 \" Wait 5 minutes to see if the problem resolves itself. If \"\n                 \"the problem persists, you will need to restart the SP.\"],\n    \"14:60474\": [\"CRITICAL\", \"ALRT_SP_SYS_DISK_TYPE\",\n                 \"A replacement disk should be the same type (SAS, SATA, \"\n                 \"FLASH) as the disk it is replacing.\"],\n    \"14:60475\": [\"CRITICAL\", \"ALRT_SP_SYS_DISK_BLK\",\n                 \"A replacement disk should be the same type (SAS, SATA, \"\n                 \"FLASH) and have the same capacity (size and speed) as the \"\n                 \"disk it is replacing.\"],\n    \"14:60476\": [\"CRITICAL\", \"ALRT_SP_SYS_DISK_SIZE\",\n                 \"A replacement disk should have the same capacity (size and \"\n                 \"speed) as the disk it is replacing.\"],\n    \"14:60477\": [\"CRITICAL\", \"ALRT_SP_DPE_SN\",\n                 \"The Product ID / SN cannot be read from the Disk Processor\"\n                 \" Enclosure (DPE) and will need to be reprogrammed. Contact\"\n                 \" your service provider for assistance.\"],\n    \"14:60479\": [\"CRITICAL\", \"ALRT_SP_FAULT_DIMM0_2\",\n                 \"Memory modules 0 and 2 in this Storage Processor (SP) have\"\n                 \" faulted and need to be replaced.\"],\n    \"14:6047a\": [\"CRITICAL\", \"ALRT_SP_FAULT_DIMM1_3\",\n                 \"Memory modules 1 and 3 in this Storage Processor (SP) have \"\n                 \"faulted and need to be replaced.\"],\n    \"14:6047b\": [\"CRITICAL\", \"ALRT_SP_FAULT_DIMM2_3\",\n                 \"Memory modules 2 and 3 in this Storage Processor (SP) have\"\n                 \" faulted and need to be replaced.\"],\n    \"14:6047c\": [\"CRITICAL\", \"ALRT_SP_FAULT_DIMM3\",\n                 \"Memory module 3 in this Storage Processor (SP) has faulted \"\n                 \"and needs to be replaced.\"],\n    \"14:6047d\": [\"WARNING\", \"ALRT_SP_UNSAFE_REMOVE\",\n                 \"The Storage Processor (SP) is unsafe to remove. Wait for the\"\n                 \" Unsafe to Remove LED to turn off.\"],\n    \"14:6047e\": [\"WARNING\", \"ALRT_SP_DEGRADED\",\n                 \"The Storage  Processor (SP) is operating in a degraded \"\n                 \"state. Check the system logs or other alerts to identify and\"\n                 \" fix the issue. If the problem persists, you may need to \"\n                 \"replace the SP.\"],\n    \"14:60482\": [\"WARNING\", \"ALRT_SP_READ_CACHE_DISABLED\",\n                 \"The read cache on the Storage Processor (SP) is temporarily\"\n                 \" disabled. An SP may be in service mode or there may be\"\n                 \" problem with a hardware component. Check related alerts and\"\n                 \" fix the underlying problems. When the problem is fixed, the\"\n                 \" read cache is automatically re- enabled. If one SP is in\"\n                 \" service mode, rebooting the active SP will re-enable the\"\n                 \" read cache.\"],\n    \"14:60483\": [\"CRITICAL\", \"ALRT_SP_SHUTDOWN\",\n                 \"There was a problem shutting down a Storage Processor (SP).\"\n                 \" Power-cycle the SP manually.\"],\n    \"14:60485\": [\"CRITICAL\", \"ALRT_SP_SASEXPANDER_FAULT\",\n                 \"SAS expander in the Storage Processor (SP) has faulted.\"\n                 \" Check the system logs and try rebooting the SP. If the \"\n                 \"problem persists, you may need to replace the SP.\"],\n    \"14:60486\": [\"WARNING\", \"ALRT_SP_SASEXPANDER_DEGRADED\",\n                 \"The SAS expander in the Storage Processor (SP) is operating\"\n                 \" in a degraded mode. Check system logs and try rebooting the\"\n                 \" SP. If the problem persists, you may need to replace the\"\n                 \" SP.\"],\n    \"14:60487\": [\"CRITICAL\", \"ALRT_SP_RESCUE_DISK_UNKNOWN\",\n                 \"The system is unable to run the disk check. Disks status \"\n                 \"cannot be determined.\"],\n    \"14:60488\": [\"ERROR\", \"ALRT_SP_SHUTDOWN_WARNING\",\n                 \"The Storage Processor (SP) is shutting down. The temperature\"\n                 \" of the storage system may be too high to support safe \"\n                 \"operation. Check the system logs and other alerts to \"\n                 \"identify the issue. If the problem persists, contact your\"\n                 \" service provider.\"],\n    \"14:60489\": [\"ERROR\", \"ALRT_SP_AMBIENT_TEMPERATURE_FAULT\",\n                 \"The ambient temperature of Storage Processor (SP) is high.\"\n                 \" Ensure that the fan modules are operating normally and the\"\n                 \" environment temperature is OK.\"],\n    \"14:6048a\": [\"CRITICAL\", \"ALRT_VVNX_SP_FAULT\",\n                 \"The Storage Processor (SP) has faulted. Reboot or re- image\"\n                 \" the SP using service actions, or reboot the SP using \"\n                 \"vSphere. If the problem persists, contact your service\"\n                 \" provider.\"],\n    \"14:6048b\": [\"WARNING\", \"ALRT_VVNX_SP_DEGRADED\",\n                 \"The Storage Processor is operating in a degraded state. \"\n                 \"Check the system logs and other alerts to identify the \"\n                 \"issue.\"],\n    \"14:6048c\": [\"WARNING\", \"ALRT_VVNX_SP_READ_CACHE_DISABLED\",\n                 \"The read cache on the Storage Processor is temporarily \"\n                 \"disabled. Check related alerts and fix the underlying \"\n                 \"problems. When the problems are fixed, the read cache is \"\n                 \"automatically re- enabled.\"],\n    \"14:6048d\": [\"ERROR\", \"ALRT_VVNX_SP_SHUTDOWN_WARNING\",\n                 \"The Storage Processor is shutting down. Check the system \"\n                 \"logs and other alerts to identify the issue.\"],\n    \"14:6048e\": [\"CRITICAL\", \"ALRT_VVNX_SP_SHUTDOWN\",\n                 \"There was a problem shutting down a Storage Processor (SP).\"\n                 \" Reboot the SP using the hypervisor.\"],\n    \"14:6048f\": [\"CRITICAL\", \"ALRT_VVNX_SP_PROBLEM\",\n                 \"An issue has occurred with the system software on this \"\n                 \"Storage Processor (SP). Before you proceed, collect service\"\n                 \" information. Reboot the SP using service actions or the \"\n                 \"hypervisor. If the issue persists, contact your service \"\n                 \"provider or refer to the EMC community forums.\"],\n    \"14:60490\": [\"CRITICAL\", \"ALRT_VVNX_HARDWARE_CONFIG_UNSUPPORTED\",\n                 \"Virtual hardware configuration does not match the supported\"\n                 \" profiles. Try fixing the configuration from the hypervisor\"\n                 \" hosting the virtual machine. Run the svc_diag -b command \"\n                 \"on the SP for more information.\"],\n    \"14:60491\": [\"CRITICAL\", \"ALRT_SP_SAS_EXP_FW_UPG_FAIL\",\n                 \"Firmware upgrade for the SAS Expander on the storage \"\n                 \"processor has failed. Contact your service provider.\"],\n    \"14:60492\": [\"WARNING\", \"ALRT_SP_HELD_IN_RESET\",\n                 \"The storage processor has been placed in a held in reset \"\n                 \"state, a special service state for performing certain\"\n                 \" hardware services. Reboot the SP when the hardware service\"\n                 \" is completed.\"],\n    \"14:60493\": [\"CRITICAL\", \"ALRT_BAD_DIMM_CONFIG\",\n                 \"Incorrect amount or configuration of memory has placed the\"\n                 \" storage processor (SP) in service mode. Fix the memory \"\n                 \"configuration and then reboot the SP. For more information,\"\n                 \" refer to the Customer Replacement Procedure Replacing  a\"\n                 \" faulted memory module located at EMC Online Support \"\n                 \"(https://support.emc. com).\"],\n    \"14:604b0\": [\"CRITICAL\", \"ALRT_SSD_FAULT\",\n                 \"A solid state disk (SSD) in a Storage Processor (SP) has\"\n                 \" faulted and needs to be replaced.\"],\n    \"14:604b1\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:604b2\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:604b3\": [\"CRITICAL\", \"ALRT_SSD_REMOVED\",\n                 \"A solid state disk (SSD) in a Storage Processor (SP) has \"\n                 \"been removed and needs to be reinstalled.\"],\n    \"14:604b4\": [\"WARNING\", \"ALRT_SSD_FAILING\",\n                 \"This solid state drive (SSD) is reaching the end of its\"\n                 \" service life expectancy and needs to be replaced.\"],\n    \"14:60514\": [\"ERROR\", \"ALRT_SYSTEM_CRITICAL\",\n                 \"The system has experienced one or more failures, which may\"\n                 \" result in data loss. You need to take immediate action.\"\n                 \" Check related alerts and fix the underlying problems.\"],\n    \"14:60515\": [\"WARNING\", \"ALRT_SYSTEM_DEGRADED\",\n                 \"The system has experienced one or more failures resulting \"\n                 \"in degraded system performance. Check related alerts and \"\n                 \"fix the underlying problems.\"],\n    \"14:60516\": [\"ERROR\", \"ALRT_SYSTEM_MAJOR_FAILURE\",\n                 \"The system has experienced one or more major failures, \"\n                 \"which have significant impact on the system. You need to \"\n                 \"take immediate action. Check related alerts and fix the \"\n                 \"underlying problems.\"],\n    \"14:60517\": [\"ERROR\", \"ALRT_SYSTEM_MINOR_FAILURE\",\n                 \"The system has experienced one or more minor failures. Check\"\n                 \" related alerts and fix the underlying problems.\"],\n    \"14:60518\": [\"CRITICAL\", \"ALRT_SYSTEM_NON_RECOVERABLE\",\n                 \"The system has experienced one or more nonrecoverable \"\n                 \"failures, which may have resulted in data loss. Use the \"\n                 \"System Health page to see the health state of hardware \"\n                 \"and system components.\"],\n    \"14:60519\": [\"INFO\", \"ALRT_SYSTEM_OK\",\n                 \"The system is operating normally.\"],\n    \"14:6051a\": [\"INFO\", \"ALRT_SYSTEM_UNKNOWN\",\n                 \"The system health cannot be determined. Check related alerts\"\n                 \" and fix the underlying problems.\"],\n    \"14:60579\": [\"ERROR\", \"ALRT_SAS_PORT_LINK_DOWN\",\n                 \"The SAS cable connected to the SAS port may not be connected\"\n                 \" securely, or may be damaged or missing.\"],\n    \"14:6057a\": [\"INFO\", \"ALRT_PORT_LINK_DOWN_NOT_IN_USE\",\n                 \"The port link is down, but not in use. No action is \"\n                 \"required.\"],\n    \"14:6057b\": [\"ERROR\", \"ALRT_PORT_MGT_NOT_CONNECTED\",\n                 \"The network connection to the management port has been lost.\"\n                 \" Check the cable and network configuration.\"],\n    \"14:6057c\": [\"INFO\", \"ALRT_PORT_LINK_UP\",\n                 \"The port is operating normally.\"],\n    \"14:6057d\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:60580\": [\"ERROR\", \"ALRT_PORT_LINK_DOWN\",\n                 \"The port has lost communication with the network.\"],\n    \"14:60581\": [\"ERROR\", \"ALRT_DAE_TOO_MANY\",\n                 \"The number of Disk Array Enclosures (DAEs) added has \"\n                 \"exceeded the maximum allowed. Remove the newly \"\n                 \"attached DAE.\"],\n    \"14:60582\": [\"WARNING\", \"ALRT_SASPORT_DEGRADED\",\n                 \"The SAS port on the Storage Processor (SP) is operating in \"\n                 \"a degraded mode. You may need to replace the SP that \"\n                 \"contains the degraded component.\"],\n    \"14:60583\": [\"CRITICAL\", \"ALRT_SASPORT_UNINITIALIZED\",\n                 \"A SAS port on your system is not initialized. Identify the \"\n                 \"SAS port, check the system log for hardware errors or \"\n                 \"warnings. If the problem persists, you may need to replace\"\n                 \" the Storage Processor (SP).\"],\n    \"14:60584\": [\"INFO\", \"ALRT_SASPORT_EMPTY\",\n                 \"The Disk Processor Enclosure (DPE) contains a vacant \"\n                 \"SAS port.\"],\n    \"14:60585\": [\"WARNING\", \"ALRT_SASPORT_MISSING\",\n                 \"The Storage Processor (SP) cannot detect a previously \"\n                 \"configured SAS port. Check system logs and reboot the SP. \"\n                 \"If the problem persists, you may need to replace the SP.\"],\n    \"14:60586\": [\"CRITICAL\", \"ALRT_SASPORT_FAULT\",\n                 \"A SAS port has faulted. Replace the Storage Processor (SP) \"\n                 \"containing the faulted port.\"],\n    \"14:60587\": [\"CRITICAL\", \"ALRT_SASPORT_UNAVAILABLE\",\n                 \"A SAS port is not available. Check system logs and reboot\"\n                 \" the Storage Processor (SP). If the problem persists, you\"\n                 \" may need to replace the SP.\"],\n    \"14:60589\": [\"CRITICAL\", \"ALRT_SASPORT_SFP_REMOVED\",\n                 \"A Small Form-factor Pluggable (SFP) module in one of the \"\n                 \"SAS ports on your Storage Processor (SP) has been removed.\"\n                 \" Reinsert a supported SFP module.\"],\n    \"14:6058a\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:6058d\": [\"WARNING\", \"ALRT_ETHERNETPORT_DEGRADED\",\n                 \"Performance of an Ethernet port has degraded. Identify  \"\n                 \"the Ethernet port, check the cabling, and network \"\n                 \"configuration. If the problem persists, you may need to \"\n                 \"replace the Storage Processor (SP).\"],\n    \"14:6058e\": [\"CRITICAL\", \"ALRT_ETHERNETPORT_UNINITIALIZED\",\n                 \"An Ethernet port on your system is not initialized. Identify\"\n                 \"  the Ethernet port, check the cabling, and network \"\n                 \"configuration. If the problem persists, you may need to \"\n                 \"replace the Storage Processors (SPs).\"],\n    \"14:6058f\": [\"INFO\", \"ALRT_ETHERNETPORT_EMPTY\",\n                 \"The Disk Processor Enclosure (DPE) contains a vacant \"\n                 \"Ethernet port.\"],\n    \"14:60590\": [\"WARNING\", \"ALRT_ETHERNETPORT_MISSING\",\n                 \"The system is unable to detect an Ethernet port on the \"\n                 \"Storage Processor (SP). Check system logs and reboot the \"\n                 \"SP. If the problem persists, you may need to replace the \"\n                 \"SP.\"],\n    \"14:60591\": [\"CRITICAL\", \"ALRT_ETHERNETPORT_FAULTED\",\n                 \"An Ethernet port has faulted. Check system log for hardware\"\n                 \" errors or warnings and try rebooting the Storage Processor \"\n                 \"(SP). If the problem persists, you may need to replace the \"\n                 \"I/O module or the SP containing the faulted port.\"],\n    \"14:60592\": [\"CRITICAL\", \"ALRT_ETHERNETPORT_UNAVAILABLE\",\n                 \"An Ethernet port on the Storage Processor (SP) is not \"\n                 \"available. Please check the cable and network configuration,\"\n                 \" and then restart the SP. If the problem persists, you may\"\n                 \" need to replace the SP.\"],\n    \"14:60593\": [\"CRITICAL\", \"ALRT_ETHERNETPORT_DISABLED\",\n                 \"An Ethernet port on your Storage Processor (SP) is disabled.\"\n                 \" Please check system logs, cabling, and network \"\n                 \"configuration, and then restart the SP. If the problem \"\n                 \"persists, you may need to replace the SP.\"],\n    \"14:60594\": [\"ERROR\", \"ALRT_PORT_LINK_DOWN\",\n                 \"The port has lost communication with the network.\"],\n    \"14:60595\": [\"CRITICAL\", \"ALRT_ETHERNETPORT_SFP_UNSUPPORT\",\n                 \"The Small Form-factor Pluggable (SFP) module inserted in \"\n                 \"this Ethernet port is not supported. Replace it with a\"\n                 \" supported SFP module.\"],\n    \"14:60596\": [\"ERROR\", \"ALRT_ETHERNETPORT_SFP_FAULT\",\n                 \"The Small Form-factor Pluggable (SFP) module in this\"\n                 \" Ethernet port has faulted and needs to be replaced.\"],\n    \"14:60597\": [\"ERROR\", \"ALRT_ETHERNETPORT_OVERLIMIT\",\n                 \"This Ethernet port cannot be used because it exceeds \"\n                 \"the number of supported ports. Remove the I/O module that\"\n                 \" contains this port.\"],\n    \"14:60598\": [\"CRITICAL\", \"ALRT_ETHERNETPORT_INCORRECTSLIC\",\n                 \"An incorrect type of I/O module has been inserted. The\"\n                 \" system does not support the Ethernet port configuration for\"\n                 \" this port. Replace the I/O module.\"],\n    \"14:60599\": [\"CRITICAL\", \"ALRT_SASPORT_SFP_UNSUPPORT\",\n                 \"The Small Form-factor Pluggable (SFP) module inserted in \"\n                 \"this SAS port is not supported. Replace it with a supported\"\n                 \" SFP module.\"],\n    \"14:6059a\": [\"CRITICAL\", \"ALRT_SASPORT_SFP_FAULT\",\n                 \"The Small Form-factor Pluggable (SFP) module in this SAS \"\n                 \"port has faulted and needs to be replaced.\"],\n    \"14:6059b\": [\"ERROR\", \"ALRT_SASPORT_OVERLIMIT\",\n                 \"This SAS port cannot be used because it exceeds the number\"\n                 \" of supported ports. Remove the I/O module that contains\"\n                 \" this port.\"],\n    \"14:6059c\": [\"CRITICAL\", \"ALRT_SASPORT_INCORRECTSLIC\",\n                 \"An incorrect type of I/O module has been inserted. The\"\n                 \" system does not support the SAS port configuration for this\"\n                 \" port. Replace the I/O module.\"],\n    \"14:6059d\": [\"WARNING\", \"ALRT_ETHERNETPORT_SFP_REMOVED\",\n                 \"The Small Form-factor Pluggable (SFP) module in this\"\n                 \" Ethernet port has been removed. Since the port is in use,\"\n                 \" reinsert a supported SFP module.\"],\n    \"14:6059e\": [\"WARNING\", \"ALRT_VVNX_ETHERNETPORT_DEGRADED\",\n                 \"Performance of an Ethernet port has degraded. Identify the \"\n                 \"Ethernet port on the System View page, and then check the \"\n                 \"network configuration.\"],\n    \"14:6059f\": [\"CRITICAL\", \"ALRT_VVNX_ETHERNETPORT_UNINITIALIZED\",\n                 \"An Ethernet port on your system is not initialized. Identify\"\n                 \" the Ethernet port using the System View page, and then\"\n                 \" check the network configuration.\"],\n    \"14:605a0\": [\"CRITICAL\", \"ALRT_VVNX_ETHERNETPORT_FAULTED\",\n                 \"An Ethernet port on your system has faulted. Check the \"\n                 \"system log for errors or warnings, and then reboot the \"\n                 \"Storage Processor.\"],\n    \"14:605a1\": [\"CRITICAL\", \"ALRT_VVNX_ETHERNETPORT_UNAVAILABLE\",\n                 \"An Ethernet port on your system is not available. Identify\"\n                 \" the Ethernet port using the System Health page, check the \"\n                 \"network configuration, and then restart the SP.\"],\n    \"14:605a2\": [\"CRITICAL\", \"ALRT_VVNX_ETHERNETPORT_DISABLED\",\n                 \"An Ethernet port on your system is disabled. Check the \"\n                 \"system logs and network configuration, and then reboot the\"\n                 \" Storage Processor.\"],\n    \"14:605a3\": [\"INFO\", \"ALRT_ETHERNETPORT_SFP_REMOVED_NOT_IN_USE\",\n                 \"The Small Form-factor Pluggable (SFP) module in this \"\n                 \"Ethernet port has been removed. Since the port is not in\"\n                 \" use, no action is required.\"],\n    \"14:605a4\": [\"ERROR\", \"ALRT_VVNX_PORT_MGT_NOT_CONNECTED\",\n                 \"The network connection to the management port has been lost.\"\n                 \" Check the virtual environment and network configuration.\"],\n    \"14:605a5\": [\"INFO\", \"ALRT_ETHERNETPORT_SFP_ASYMMETRIC\",\n                 \"The SFPs in the Storage Processor(SP) are configured\"\n                 \" incorrectly. Its supported speeds is asymmetric with \"\n                 \"its peer's.\"],\n    \"14:605dc\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:605dd\": [\"CRITICAL\", \"ALRT_CM_REMOVED\",\n                 \"The cooling module has been removed. Insert the cooling\"\n                 \" module again.\"],\n    \"14:605de\": [\"WARNING\", \"ALRT_CM_SINGLE_FAULT\",\n                 \"One of the fans in the cooling module has faulted. Replace\"\n                 \" the cooling module.\"],\n    \"14:605df\": [\"CRITICAL\", \"ALRT_CM_MULTI_FAULT\",\n                 \"More than one of the fans in the cooling module have\"\n                 \" faulted. Replace the cooling module.\"],\n    \"14:605e0\": [\"CRITICAL\", \"ALRT_CM_SMBUS_ACCESS_FAULT\",\n                 \"The cooling module has an access issue. Reinsert the \"\n                 \"cooling module again. If the fault persists or occurs \"\n                 \"repeatedly, you may need to replace the cooling module.\"],\n    \"14:605e1\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:605e2\": [\"CRITICAL\", \"ALRT_CM_UPG_FAIL\",\n                 \"Firmware upgrade for the cooling module has failed. Contact\"\n                 \" your service provider.\"],\n    \"14:605e3\": [\"CRITICAL\", \"ALRT_CM_FAULT\",\n                 \"A cooling module has faulted. Replace the cooling module.\"],\n    \"14:60641\": [\"WARNING\", \"ALRT_DPE_FAILED_COMPONENT\",\n                 \"The Disk Processor Enclosure (DPE) has one or more faulted\"\n                 \" components.\"],\n    \"14:60642\": [\"CRITICAL\", \"ALRT_DPE_INVALID_DRIVE\",\n                 \"There is an invalid disk in the Disk Processor Enclosure \"\n                 \"(DPE). Replace the disk with the correct disk type.\"],\n    \"14:60643\": [\"CRITICAL\", \"ALRT_DPE_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) has faulted. This may \"\n                 \"have occurred because of a faulted subcomponent. Identify \"\n                 \"and fix the issue with the subcomponent. If the problem \"\n                 \"persists, contact your service provider.\"],\n    \"14:60645\": [\"CRITICAL\", \"ALRT_DPE_MISCONFIGURED\",\n                 \"The Disk Processor Enclosure (DPE) has been cabled or \"\n                 \"configured incorrectly. Refer to the Installation Guide for\"\n                 \" installation and cabling instructions. Go to the support\"\n                 \" website to access the latest product documentation.\"],\n    \"14:60646\": [\"CRITICAL\", \"ALRT_DPE_MISCONFIGURED\",\n                 \"The Disk Processor Enclosure (DPE) has been cabled or \"\n                 \"configured incorrectly. Refer to the Installation Guide for\"\n                 \" installation and cabling instructions. Go to the support\"\n                 \" website to access the latest product documentation.\"],\n    \"14:60648\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:60649\": [\"CRITICAL\", \"ALRT_PWR_SUPPLY_FAULT\",\n                 \"A power supply in your system has faulted and needs to be\"\n                 \" replaced.\"],\n    \"14:6064a\": [\"CRITICAL\", \"ALRT_SLIC_FAULT\",\n                 \"An I/O module in your Disk Processor Enclosure has faulted\"\n                 \" and needs to be replaced.\"],\n    \"14:6064b\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:6064e\": [\"CRITICAL\", \"ALRT_DPE_CROSSCABLED\",\n                 \"The cabling from Disk Processor Enclosure (DPE) to Disk \"\n                 \"Array Enclosure (DAE) is incorrect.\"],\n    \"14:6064f\": [\"WARNING\", \"ALRT_DPE_TEMPERATURE_WARNING\",\n                 \"The Disk Processor Enclosure (DPE) temperature has reached\"\n                 \" the warning threshold. This may lead to the DPE shutting \"\n                 \"down. Check the hardware, environmental temperature, system\"\n                 \" logs, and other alerts to identify and fix the issue. If \"\n                 \"the problem persists, contact your service provider.\"],\n    \"14:60650\": [\"ERROR\", \"ALRT_DPE_TEMPERATURE_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) temperature has reached \"\n                 \"the failure threshold. The DPE will shut down shortly. \"\n                 \"Check the hardware, environmental temperature, system logs,\"\n                 \" and other alerts to identify and fix the issue. If the \"\n                 \"problem persists, contact your service provider.\"],\n    \"14:60651\": [\"ERROR\", \"ALRT_DPE_FAULT_DRIVE_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) has faulted. This may\"\n                 \" have occurred because of a faulted disk. Identify and fix \"\n                 \"the issue with the disk. If the problem persists, contact \"\n                 \"your service provider.\"],\n    \"14:60652\": [\"ERROR\", \"ALRT_DPE_FAULT_POWERSUPPLY_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) has faulted. This may\"\n                 \" have occurred because of a faulted power supply. Identify\"\n                 \" and fix the issue with the power supply. If the problem \"\n                 \"persists, contact your service provider.\"],\n    \"14:60653\": [\"ERROR\", \"ALRT_DPE_FAULT_FAN_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) has faulted. This may \"\n                 \"have occurred because of a faulted cooling module. Identify\"\n                 \" and fix the issue with the cooling module. If the problem\"\n                 \" persists, contact your service provider.\"],\n    \"14:60654\": [\"ERROR\", \"ALRT_DPE_FAULT_SP_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) has faulted. This may\"\n                 \" have occurred because of a faulted SP. Identify and fix \"\n                 \"the issue with the SP. If the problem persists, contact \"\n                 \"your service provider.\"],\n    \"14:60655\": [\"ERROR\", \"ALRT_DPE_FAULT_IOPORT_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) has faulted. This may\"\n                 \" have occurred because of a faulted port. Identify and fix \"\n                 \"the issue with the port. If the problem persists, contact\"\n                 \" your service provider.\"],\n    \"14:60656\": [\"ERROR\", \"ALRT_DPE_FAULT_IOMODULE_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) has faulted. This may\"\n                 \" have occurred because of a faulted I/O module. Identify \"\n                 \"and fix the issue with the I/O module. If the problem \"\n                 \"persists, contact your service provider.\"],\n    \"14:60658\": [\"ERROR\", \"ALRT_DPE_FAULT_DIMM_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) has faulted. This may \"\n                 \"have occurred because of a faulted memory module. Identify\"\n                 \" and fix the issue with the memory module. If the problem\"\n                 \" persists, contact your service provider.\"],\n    \"14:60659\": [\"ERROR\", \"ALRT_DPE_NO_REASON_FAILURE\",\n                 \"The DPE fault led is on but no specific fault is detected,\"\n                 \" this could be a transient state. Please contact your \"\n                 \"service provider if the issue persists.\"],\n    \"14:6065a\": [\"CRITICAL\", \"ALRT_DPE_FAULT_LCC_FAULT\",\n                 \"The fault LED on the disk processor enclosure (DPE) is on.\"\n                 \" This may have occurred because of an issue with the Link \"\n                 \"Control Card (LCC) cables connecting to the DPE. Replace \"\n                 \"LCC cables to the enclosure first. If it does not solve the\"\n                 \" problem, replace the LCC(s) in the enclosure.\"],\n    \"14:6065b\": [\"CRITICAL\", \"ALRT_DPE_FAULT\",\n                 \"The Disk Processor Enclosure (DPE) has faulted. This may\"\n                 \" have occurred because of a faulted internal component.\"\n                 \" Power cycle the enclosure first. If it does not solve the \"\n                 \"problem, replace the enclosure.\"],\n    \"14:606a4\": [\"CRITICAL\", \"ALRT_MEMORY_FAULT\",\n                 \"This memory module has faulted and needs to be replaced.\"],\n    \"14:606a5\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:606a6\": [\"ERROR\", \"ALRT_MEMORY_REMOVED\",\n                 \"This memory module has been removed and needs to be \"\n                 \"reinstalled.\"],\n    \"14:606a7\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:606a8\": [\"ERROR\", \"ALRT_MEMORY_INCORRECT_SPEED\",\n                 \"This memory module speed is not correct for your storage \"\n                 \"processor model and needs to be replaced.\"],\n    \"14:60708\": [\"WARNING\", \"ALRT_CACHE_DEGRADED\",\n                 \"The cache protection module is operating in a degraded mode.\"\n                 \" Replace the failing component.\"],\n    \"14:60709\": [\"CRITICAL\", \"ALRT_CACHE_FAULT\",\n                 \"The cache protection module has faulted and needs to be \"\n                 \"replaced.\"],\n    \"14:6070a\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is\"\n                 \" required.\"],\n    \"14:6070b\": [\"CRITICAL\", \"ALRT_CACHE_MISSING\",\n                 \"The cache protection module has been removed and needs to\"\n                 \" be reinstalled.\"],\n    \"14:6070c\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:6076c\": [\"ERROR\", \"ALRT_REPL_CONN_FAULT\",\n                 \"The connection with this remote replication host has been \"\n                 \"lost. On the Replication Connections page, click the Verify \"\n                 \"and Update Connection button.\"],\n    \"14:6076d\": [\"INFO\", \"ALRT_REPL_CONN_OK\",\n                 \"Communication with the replication host is established. \"\n                 \"No action is required.\"],\n    \"14:6076e\": [\"WARNING\", \"ALRT_REPL_FAIL_OVR\",\n                 \"This replication session has failed over.\"],\n    \"14:6076f\": [\"CRITICAL\", \"REPL_RECREATE\",\n                 \"This replication session has encountered an error. Try\"\n                 \" pausing, and then resuming the replication session. If\"\n                 \" the problem persists, delete, and then create the \"\n                 \"replication session again.\"],\n    \"14:60770\": [\"ERROR\", \"ALRT_REPL_CONN_FAULT\",\n                 \"The connection with this remote replication host has been \"\n                 \"lost. On the Replication Connections page, click the Verify\"\n                 \" and Update Connection button.\"],\n    \"14:60771\": [\"INFO\", \"ALRT_REPL_OK\",\n                 \"This replication session is operating normally. No action is\"\n                 \" required.\"],\n    \"14:60772\": [\"WARNING\", \"ALRT_REPL_PAUSED\",\n                 \"This replication session has been paused. Try resuming the \"\n                 \"replication session. If the problem persists, delete, and \"\n                 \"then create the replication session again.\"],\n    \"14:60773\": [\"WARNING\", \"ALRT_REPL_SWITCHED\",\n                 \"This replication session has been switched over to the\"\n                 \" destination site.\"],\n    \"14:60774\": [\"INFO\", \"ALRT_REPL_CONN_UPDATE\",\n                 \"This replication connection is currently being updated.\"\n                 \" Please wait a few minutes for the connection to become \"\n                 \"available again.\"],\n    \"14:60775\": [\"WARNING\", \"ALRT_REPL_UPDATE_NEEDED\",\n                 \"The destination storage resource associated with this \"\n                 \"replication session has multiple source storage resources\"\n                 \" replicating to it. This may cause inconsistencies when \"\n                 \"setting up replication for new LUNs. Delete replication \"\n                 \"sessions from all but one of the source storage resources \"\n                 \"to this destination.\"],\n    \"14:60777\": [\"CRITICAL\", \"ALRT_REPL_GP_INCONSISTENT_MAP\",\n                 \"Member file systems of the source NAS server are replicating\"\n                 \" to file systems outside the destination NAS server.\"],\n    \"14:60778\": [\"CRITICAL\", \"ALRT_REPL_PARENT_NOT_REPL\",\n                 \"The member file system is replicating, but the parent NAS\"\n                 \" server cannot replicate.\"],\n    \"14:60779\": [\"INFO\", \"ALRT_REPL_REMOTESYS_UP_TO_DATE\",\n                 \"Update the remote system connection to pick up the latest\"\n                 \" interface changes on the local and remote systems.\"],\n    \"14:6077a\": [\"CRITICAL\", \"ALRT_REPL_MEM_STATE_MISMATCH\",\n                 \"The replication sessions for member file systems of a parent\"\n                 \" NAS server are not in same state.\"],\n    \"14:6077b\": [\"CRITICAL\", \"ALRT_REPL_PARENT_STATE_MISMATCH\",\n                 \"The parent NAS server replication session is not in the same\"\n                 \" state as the member file system replication session.\"],\n    \"14:6077c\": [\"ERROR\", \"ALRT_REPL_NTWKCONN\",\n                 \"One or more replication interface pairs are experiencing \"\n                 \"network connectivity issues between the local and remote \"\n                 \"systems.\"],\n    \"14:6077f\": [\"WARNING\", \"ALRT_REPL_DEST_POOL_FULL\",\n                 \"The replication session is operating in a degraded state \"\n                 \"because the storage pool on the destination system has run \"\n                 \"out of space. Expand the pool to restore normal operation.\"],\n    \"14:60780\": [\"ERROR\", \"ALRT_REPL_NO_IO_CONN\",\n                 \"An Import connection between the local system and the remote\"\n                 \" VNX system has not been created. Create an Import\"\n                 \" connection between the remote VNX system and the local\"\n                 \" system.\"],\n    \"14:60781\": [\"CRITICAL\", \"ALRT_NOT_ALL_REP_SESSION_FAILOVER\",\n                 \"At least one member of a consistency group is not in the \"\n                 \"Failed Over state. Check the Audit log for the replication\"\n                 \" failure reason, and reattempt a Failover.\"],\n    \"14:60782\": [\"CRITICAL\", \"ALRT_NOT_ALL_REP_SESSION_FAILOVER_SYNC\",\n                 \"At least one member of a consistency group is not in the\"\n                 \" Failed Over with Sync state. Check the Audit log for the\"\n                 \" replication failure reason, and reattempt a Failover with\"\n                 \" Sync.\"],\n    \"14:607d0\": [\"INFO\", \"ALRT_SED_KEY_GEN\",\n                 \"An authentication key has been generated and is valid for\"\n                 \" the self-encrypting drive system. Please back up the key \"\n                 \"immediately to an external device in case the key becomes \"\n                 \"corrupted or lost.\"],\n    \"14:607d1\": [\"ERROR\", \"ALRT_SED_KEY_RESTORE\",\n                 \"The authentication key is invalid and is not available for\"\n                 \" backup to an external device. This may prevent access to \"\n                 \"stored data. Please put the system in Service Mode, and run\"\n                 \" the svc_key_restore service script to restore the\"\n                 \" authentication key.\"],\n    \"14:607d2\": [\"INFO\", \"ALRT_SED_KEY_NO_KEY\",\n                 \"The authentication key is not present and is not available\"\n                 \" for backup to an external device. The key will be generated\"\n                 \" and available for backup after the first storage pool has\"\n                 \" been created.\"],\n    \"14:60898\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:60899\": [\"CRITICAL\", \"ALRT_HOST_CONTAINER_ERROR\",\n                 \"The system cannot connect to the virtual service because of\"\n                 \" an internal error. Retry the operation. If the problem \"\n                 \"persists, contact your service provider.\"],\n    \"14:6089a\": [\"CRITICAL\", \"ALRT_HOST_CONTAINER_CONNECTION_FAILURE\",\n                 \"The system failed to connect to the virtual service. Retry\"\n                 \" the operation. If the problem persists, contact your\"\n                 \" service provider.\"],\n    \"14:6089b\": [\"CRITICAL\", \"ALRT_HOST_CONTAINER_LOGIN_FAILURE\",\n                 \"The system cannot connect to the virtual service. Check the\"\n                 \" credentials used to access the virtual service.\"],\n    \"14:6089c\": [\"CRITICAL\", \"ALRT_HOST_CONTAINTER_CERTIFICATE_FAILURE\",\n                 \"The certificate to access virtual service is invalid. Check\"\n                 \" and update the certificate used to access the virtual\"\n                 \" service.\"],\n    \"14:6089d\": [\"INFO\", \"ALRT_HOST_CONTAINER_UNKNOWN\",\n                 \"The system is unable to refresh the host container because \"\n                 \"of an unknown issue. Wait to see if the problem resolves \"\n                 \"itself. If the problem persists, contact your service\"\n                 \" provider.\"],\n    \"14:608fc\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:608fd\": [\"WARNING\", \"ALRT_HOST_INITIATORS_NO_HA\",\n                 \"The host only has one path to the storage system. Add\"\n                 \" multiple paths between host and storage systems to \"\n                 \"establish redundancy.\"],\n    \"14:608fe\": [\"WARNING\", \"ALRT_HOST_NO_LOGGED_IN_INITIATORS\",\n                 \"The host does not have any initiators logged into the\"\n                 \" storage system. Register one or more initiators on the \"\n                 \"host to the storage system. This may also require zoning \"\n                 \"changes on the switches.\"],\n    \"14:608ff\": [\"CRITICAL\", \"ALRT_HOST_CONFLICTING_IP\",\n                 \"Host has one or more IP addresses that are associated with \"\n                 \"other hosts. Resolve the conflicts by assigning the IP \"\n                 \"address to only one host.\"],\n    \"14:60900\": [\"CRITICAL\", \"ALRT_HOST_CONFLICTING_INITIATOR\",\n                 \"Host has one or more initiators that are associated with \"\n                 \"other hosts. Resolve the conflicts by assigning the \"\n                 \"initiators to only one host.\"],\n    \"14:60901\": [\"CRITICAL\", \"ALRT_HOST_ERROR\",\n                 \"An internal issue has occurred. Retry the operation. If the\"\n                 \" problem persists, contact your service provider.\"],\n    \"14:60902\": [\"CRITICAL\", \"ALRT_HOST_CONNECTION_FAILURE\",\n                 \"Failed to connect to host. Please check your network \"\n                 \"connection.\"],\n    \"14:60903\": [\"CRITICAL\", \"ALRT_HOST_LOGIN_FAILURE\",\n                 \"The system cannot log on to the host. Check the credentials\"\n                 \" used to access the host.\"],\n    \"14:60904\": [\"CRITICAL\", \"ALRT_HOST_CERTIFICATE_FAILURE\",\n                 \"The certificate to access the host is invalid. Check the\"\n                 \" certificate used to access host.\"],\n    \"14:60905\": [\"CRITICAL\", \"ALRT_HOST_DUPLICATE_UUID\",\n                 \"Multiple hosts are using the same UUID. Change or remove\"\n                 \" the duplicate host UUID that is in conflict with \"\n                 \"this host.\"],\n    \"14:60906\": [\"INFO\", \"ALRT_HOST_UNKNOWN\",\n                 \"The system is unable to refresh a managed server because\"\n                 \" of an unknown issue.\"],\n    \"14:60960\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is\"\n                 \" required.\"],\n    \"14:60961\": [\"WARNING\", \"ALRT_INITIATOR_NO_LOGGED_IN_PATH\",\n                 \"The initiator does not have any logged in initiator paths.\"\n                 \" Check the connection between the initiator and the storage\"\n                 \" system.\"],\n    \"14:60962\": [\"WARNING\", \"ALRT_INITIATOR_NOT_ASSOC_WITH_HOST\",\n                 \"The initiator is not associated with any host. Register the\"\n                 \" initiator with a known storage system.\"],\n    \"14:60963\": [\"CRITICAL\", \"ALRT_INITIATOR_CONFLICTING_HOST_UUID\",\n                 \"The initiator is registered with more than one host. Resolve\"\n                 \" the conflicts by assigning the initiators to only \"\n                 \"one host.\"],\n    \"14:609c4\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:609c5\": [\"INFO\", \"ALRT_PORT_LINK_UP\",\n                 \"The port is operating normally.\"],\n    \"14:609c6\": [\"WARNING\", \"ALRT_FCPORT_DEGRADED\",\n                 \"Performance of a Fibre Channel (FC) port on one of the I/O\"\n                 \" modules has degraded. Identify the port, check the cabling,\"\n                 \" and network configuration. If the problem persists, you\"\n                 \" may need to replace the I/O module.\"],\n    \"14:609c7\": [\"CRITICAL\", \"ALRT_FCPORT_UNINITIALIZED\",\n                 \"A Fibre Channel (FC) port on one of the I/O modules not\"\n                 \" initialized. Identify the port, check the cabling, and\"\n                 \" network configuration. If the problem persists, you may \"\n                 \"need to replace the I/O module.\"],\n    \"14:609c8\": [\"INFO\", \"ALRT_FCPORT_EMPTY\",\n                 \"The Disk Processor Enclosure (DPE) contains a vacant Fibre\"\n                 \" Channel (FC) port.\"],\n    \"14:609c9\": [\"WARNING\", \"ALRT_FCPORT_MISSING\",\n                 \"The system is unable to detect a Fibre Channel (FC) port on\"\n                 \" one of the I/O modules. Check system logs and reboot the \"\n                 \"I/O module. If the problem persists, you may need to replace\"\n                 \" the I/O module.\"],\n    \"14:609ca\": [\"CRITICAL\", \"ALRT_FCPORT_FAULT\",\n                 \"A Fibre Channel (FC) port has faulted. Check system log for\"\n                 \" hardware errors or warnings. If the problem persists, you \"\n                 \"may need to replace the I/O module.\"],\n    \"14:609cc\": [\"WARNING\", \"ALRT_FCPORT_SFP_REMOVED\",\n                 \"The Small Form-factor Pluggable (SFP) module in this Fibre\"\n                 \" Channel (FC) port has been removed. Since the port is in \"\n                 \"use, reinsert a supported SFP module.\"],\n    \"14:609cd\": [\"INFO\", \"ALRT_FCPORT_SFP_REMOVED_NOT_IN_USE\",\n                 \"The Small Form-factor Pluggable (SFP) module in this Fibre\"\n                 \" Channel (FC) port has been removed. Since the port is not\"\n                 \" in use, no action is required.\"],\n    \"14:609cf\": [\"ERROR\", \"ALRT_FCPORT_LINKDOWN\",\n                 \"The Fibre Channel (FC) port has lost communication with the\"\n                 \" network.\"],\n    \"14:609d0\": [\"INFO\", \"ALRT_PORT_LINK_DOWN_NOT_IN_USE\",\n                 \"The port link is down, but not in use. No action is \"\n                 \"required.\"],\n    \"14:609d1\": [\"CRITICAL\", \"ALRT_FCPORT_SFP_UNSUPPORT\",\n                 \"The Small Form-factor Pluggable (SFP) module inserted in \"\n                 \"this Fibre Channel (FC) port is not supported. Replace it \"\n                 \"with a supported SFP module.\"],\n    \"14:609d2\": [\"CRITICAL\", \"ALRT_FCPORT_SFP_FAULT\",\n                 \"The Small Form-factor Pluggable (SFP) module in this Fibre\"\n                 \" Channel (FC) port has faulted and needs to be replaced.\"],\n    \"14:609d3\": [\"ERROR\", \"ALRT_FCPORT_OVERLIMIT\",\n                 \"This Fibre Channel (FC) port cannot be used because it \"\n                 \"exceeds the number of supported ports. Remove the I/O \"\n                 \"module that contains this port.\"],\n    \"14:609d4\": [\"CRITICAL\", \"ALRT_FCPORT_INCORRECTSLIC\",\n                 \"An incorrect type of I/O module has been inserted. The \"\n                 \"system cannot support the Fibre Channel (FC) port \"\n                 \"configuration for this port. Replace the I/O module.\"],\n    \"14:609d5\": [\"INFO\", \"ALRT_FCPORT_SFP_ASYMMETRIC\",\n                 \"The SFPs in the Storage Processor(SP) are configured \"\n                 \"incorrectly. Its supported speeds is asymmetric with its \"\n                 \"peer's.\"],\n    \"14:60a28\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:60a29\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this\"\n                 \" time. This may be an intermittent problem. Please wait to\"\n                 \" see if the problem resolves itself.\"],\n    \"14:60a2d\": [\"WARNING\", \"ALRT_FASTCACHE_DEGRADED\",\n                 \"FAST Cache performance is degraded because it has one or\"\n                 \" more disks with problems. Replace the faulted disks.\"],\n    \"14:60a2e\": [\"CRITICAL\", \"ALRT_FASTCACHE_FAULT\",\n                 \"FAST Cache is offline because it has two or more disks \"\n                 \"with problems. Contact your service provider.\"],\n    \"14:60a33\": [\"INFO\", \"ALRT_FASTCACHE_DISK_EOL_WARNING\",\n                 \"FAST Cache has one or more drives predicted to wear out in\"\n                 \" less than 180 days. The storage system will automatically \"\n                 \"replace the affected drives with no data loss when they \"\n                 \"reach end-of-life.\"],\n    \"14:60a34\": [\"INFO\", \"ALRT_FASTCACHE_DISK_EOL_WARNING_RANGE\",\n                 \"FAST Cache has one or more drives predicted to wear out in\"\n                 \" less than 180 days. The storage system will automatically \"\n                 \"replace the affected drives with no data loss when they \"\n                 \"reach end-of-life.\"],\n    \"14:60a35\": [\"INFO\", \"ALRT_FASTCACHE_DISK_EOL_SEVERE\",\n                 \"FAST Cache has one or more drives predicted to wear out in \"\n                 \"less than 90 days. The storage system will automatically\"\n                 \" replace the affected drives with no data loss when they \"\n                 \"reach end-of-life.\"],\n    \"14:60a36\": [\"INFO\", \"ALRT_FASTCACHE_DISK_EOL_SEVERE_RANGE\",\n                 \"FAST Cache has one or more drives predicted to wear out in \"\n                 \"less than 90 days. The storage system will automatically \"\n                 \"replace the affected drives with no data loss when they \"\n                 \"reach end-of-life.\"],\n    \"14:60a37\": [\"WARNING\", \"ALRT_FASTCACHE_DISK_EOL_CRITICAL\",\n                 \"FAST Cache has drives predicted to wear out in less than \"\n                 \"30 days. If there are spare drives available, the storage\"\n                 \" system will automatically replace the affected drives with \"\n                 \"no data loss when they reach end- of-life.\"],\n    \"14:60a38\": [\"WARNING\", \"ALRT_FASTCACHE_DISK_EOL_CRITICAL_RANGE\",\n                 \"FAST Cache has drives predicted to wear out in less than 30\"\n                 \" days. If there are spare drives available, the storage \"\n                 \"system will automatically replace the affected drives with \"\n                 \"no data loss when they reach end- of-life.\"],\n    \"14:60a39\": [\"CRITICAL\", \"ALRT_FASTCACHE_DISK_PACO_START_FAIL_NO_SPARE\",\n                 \"The system could not start an automatic copy of data from \"\n                 \"one or more drives in FAST Cache to replace a drive that is\"\n                 \" wearing out, because spare drives are not available. Add\"\n                 \" drives to the FAST Cache.\"],\n    \"14:60a3a\": [\"CRITICAL\", \"ALRT_FASTCACHE_DRIVE_EOL_AUTO_PACO_FAILED\",\n                 \"The system could not start an automatic copy of data from\"\n                 \" one or more drives in FAST Cache to replace drives that\"\n                 \" are wearing out, even though there are spare drives\"\n                 \" available. Please contact your service provider.\"],\n    \"14:60a3b\": [\"WARNING\", \"ALRT_FASTCACHE_DRIVE_EOL_IN_60_DAYS\",\n                 \"The FAST Cache has Flash drives of a specific type that are\"\n                 \" predicted to exceed end-of-life thresholds within 60 days.\"\n                 \" The storage system does not have enough free drives of the\"\n                 \" same type to replace them. Add drives to the FAST Cache.\"],\n    \"14:60bb8\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:60bb9\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:60bba\": [\"WARNING\", \"ALRT_POOL_ALLOC_SYSTEM_THRESHOLD\",\n                 \"Storage resource allocation from one of the pools has \"\n                 \"exceed the 85% threshold. Allocate more storage space \"\n                 \"from the pool to the storage resource.\"],\n    \"14:60bbb\": [\"WARNING\", \"ALRT_POOL_ALLOC_CRITICAL_THRESHOLD\",\n                 \"Storage resource allocation from one of the pools has exceed\"\n                 \" the 95% threshold. Allocate more storage space from the \"\n                 \"pool to the storage resource.\"],\n    \"14:60bbc\": [\"ERROR\", \"ALRT_POOL_ALLOC_CRITICAL_THRESHOLD_OVERSUBSCRIBE\",\n                 \"Storage resource allocation from one of the pools has\"\n                 \" exceed the 95% threshold, and the storage resource is \"\n                 \"oversubscribed. Allocate more storage space from the pool \"\n                 \"to the storage resource.\"],\n    \"14:60c1c\": [\"INFO\", \"ALRT_COMPONENT_OK\",\n                 \"The component is operating normally. No action is \"\n                 \"required.\"],\n    \"14:60c1d\": [\"INFO\", \"ALRT_UNKNOWN\",\n                 \"The health of the component cannot be determined at this \"\n                 \"time. This may be an intermittent problem. Please wait to \"\n                 \"see if the problem resolves itself.\"],\n    \"14:60c1e\": [\"ERROR\", \"ALRT_SSC_REMOVED\",\n                 \"The system status card (SSC) has been removed and needs to\"\n                 \" be installed again. Removing the SSC may result in faults \"\n                 \"or removal of other components in this enclosure.\"],\n    \"14:60c1f\": [\"CRITICAL\", \"ALRT_SSC_FAULT\",\n                 \"The system status card has faulted and needs to be \"\n                 \"replaced.\"],\n    \"14:60c80\": [\"INFO\", \"ALRT_UNCOMMITTEDPORT_UNINITIALIZED\",\n                 \"The uncommitted port has not been initialized. It needs to\"\n                 \" be committed before it can be used.\"],\n    \"14:60c81\": [\"INFO\", \"ALRT_UNCOMMITTEDPORT_SFP_REMOVED\",\n                 \"The Small Form-factor Pluggable (SFP) module in this \"\n                 \"uncommitted port has been removed. Since the port is not\"\n                 \" in use, no action is required.\"],\n    \"14:60c82\": [\"WARNING\", \"ALRT_UNCOMMITTEDPORT_SFP_UNSUPPORT\",\n                 \"The Small Form-factor Pluggable (SFP) module inserted in\"\n                 \" this uncommitted port is not supported. Replace it with a\"\n                 \" supported SFP module.\"],\n    \"14:60c83\": [\"ERROR\", \"ALRT_UNCOMMITTEDPORT_SFP_FAULT\",\n                 \"The Small Form-factor Pluggable (SFP) module in this \"\n                 \"uncommitted port has faulted and needs to be replaced.\"],\n    \"14:60ce4\": [\"WARNING\", \"ALRT_BACKINGSTORE_ONE_CONNECTION\",\n                 \"Only one SP of backing store is connected.\"],\n    \"14:60ce5\": [\"CRITICAL\", \"ALRT_BACKINGSTORE_CONNECTION_LOST\",\n                 \"The system has lost connection to the backing store. \"\n                 \"Contact your service provider.\"],\n    \"14:60ce6\": [\"INFO\", \"ALRT_BACKINGSTORE_BAD_CONNECTION\",\n                 \"The backing store has faulty connections. Contact your \"\n                 \"service provider.\"],\n    \"14:60ce7\": [\"INFO\", \"ALRT_BACKINGSTORE_OK\",\n                 \"The backing store is operating normally.\"],\n    \"14:60ce8\": [\"INFO\", \"ALRT_BACKINGSTORE_UNKNOWN\",\n                 \"The health of the backing store cannot be determined. \"\n                 \"Contact your service provider.\"],\n    \"14:60d48\": [\"INFO\", \"ALRT_VMWARE_PE_OK\",\n                 \"The protocol endpoint is operating normally. No action is\"\n                 \" required.\"],\n    \"14:60d49\": [\"INFO\", \"ALRT_VMWARE_PE_UNKNOWN\",\n                 \"The health of the protocol endpoint cannot be determined \"\n                 \"at this time.\"],\n    \"14:60d4a\": [\"WARNING\", \"ALRT_VMWARE_PE_DEGRADED\",\n                 \"There are issues detected on the protocol endpoint for the\"\n                 \" virtual volume and it is degraded.\"],\n    \"14:60d4b\": [\"CRITICAL\", \"ALRT_VMWARE_PE_OFFLINE\",\n                 \"The NAS protocol endpoint is offline. This may be caused by \"\n                 \"the NAS server being offline.\"],\n    \"14:60d4c\": [\"CRITICAL\", \"ALRT_VMWARE_PE_FAILURE\",\n                 \"The VMware Protocol Endpoint is offline. This can be caused \"\n                 \"by host access configuration failure.\"],\n    \"14:60dac\": [\"INFO\", \"ALRT_MIGRATION_SESSION_OK\",\n                 \"The import session is operating normally.\"],\n    \"14:60dad\": [\"CRITICAL\",\n                 \"ALRT_MIGRATION_SESSION_FAILED_CONNECTION_FAILURE\",\n                 \"The import session failed to import data in initial/\"\n                 \"incremental copy due to connection failure. Check the import\"\n                 \" connection between source and destination manually. After \"\n                 \"connection recovery, import will restart automatically. If \"\n                 \"the error persists, cancel the import session.\"],\n    \"14:60dae\": [\"ERROR\", \"ALART_MIGRATION_SESSION_COMMIT_FAILED\",\n                 \"The import session failed to commit. Commit the import \"\n                 \"session again.\"],\n    \"14:60daf\": [\"ERROR\", \"ALRT_MIGRATION_SESSION_START_FAILED\",\n                 \"The import session failed to provision the target resource. \"\n                 \"It may have failed during one of these steps: 'Validate \"\n                 \"before starting import', 'Create target NAS server','Create\"\n                 \" target file system(s)','Dump source file system quota(s)'\"\n                 \" or 'Add source export entries'. Check job and task status \"\n                 \"to get error details. After the error is fixed, resume the \"\n                 \"import session. If the error persists, cancel the import\"\n                 \" session.\"],\n    \"14:60db0\": [\"CRITICAL\",\n                 \"ALRT_MIGRATION_SESSION_FAILED_PAUSED_TARGET_IO_FAILURE\",\n                 \"The import session failed and paused importing data during \"\n                 \"initial/incremental copy due to target IO failure.\"],\n    \"14:60db1\": [\"CRITICAL\", \"ALRT_MIGRATION_SESSION_FAILED_UNRECOVERABLE\",\n                 \"The import session failed due to unrecoverable failure. \"\n                 \"Cancel the import for data integrity consideration.\"],\n    \"14:60db2\": [\"CRITICAL\",\n                 \"ALRT_MIGRATION_SESSION_FAILED_PAUSED_SOURCE_IO_FAILURE\",\n                 \"The import session failed and paused importing data during \"\n                 \"initial/incremental copy due to source IO failure.\"],\n    \"14:60db3\": [\"ERROR\", \"ALRT_MIGRATION_SESSION_CONFIGURATION_FAILED\",\n                 \"The import session has configuration failure. Resume the \"\n                 \"import session. If the error persists, cancel the import\"\n                 \" session.\"],\n    \"14:60db4\": [\"CRITICAL\", \"ALRT_MIGRATION_SESSION_CUTOVER_FAILED\",\n                 \"The import session failed to cutover. Check and fix the \"\n                 \"error reported in related job; otherwise, cancel the \"\n                 \"import.\"],\n    \"14:60db5\": [\"CRITICAL\", \"ALRT_MIGRATION_SESSION_FAILED_SOURCE_IO_FAILURE\",\n                 \"The import session failed importing data during initial/\"\n                 \"incremental copy due to source IO failure.\"],\n    \"14:60db6\": [\"CRITICAL\", \"ALRT_MIGRATION_SESSION_FAILED_TARGET_IO_FAILURE\",\n                 \"The import session failed importing data during initial/\"\n                 \"incremental copy due to target IO failure\"],\n    \"14:60db7\": [\"CRITICAL\",\n                 \"ALRT_MIGRATION_SESSION_FAILED_PAUSED_CONNECTION_FAILURE\",\n                 \"The import session failed and paused importing data during\"\n                 \" initial/incremental copy due to connection failure. Check \"\n                 \"the import connection between source and destination \"\n                 \"manually. After connection recovery, import will restart \"\n                 \"automatically. If the error persists, cancel the import \"\n                 \"session.\"],\n    \"14:60db8\": [\"CRITICAL\", \"ALRT_MIGRATION_SESSION_CANCELLING_FAILED\",\n                 \"The import session failed to cancel. Cancel the import for \"\n                 \"data integrity consideration.\"],\n    \"14:60dba\": [\"NOTICE\", \"ALRT_MIGRATION_SESSION_PAUSED\",\n                 \"The import session is paused. Resume the import session.\"],\n    \"14:60dbb\": [\"ERROR\", \"ALRT_MIGRATION_SESSION_FAULTED\",\n                 \"The import session is faulted. Cancel the import session.\"],\n    \"14:60dbc\": [\"ERROR\", \"ALRT_MIGRATION_SESSION_OFFLINE\",\n                 \"The import session is offline. Cancel the import session.\"],\n    \"14:60dbd\": [\"CRITICAL\", \"ALRT_MIGRATION_SESSION_FAILED_NON_RECOVERABLE\",\n                 \"The import session failed due to a non- recoverable error.\"\n                 \" Go to session properties dialog to check health details\"\n                 \" and resolution steps in GUI or use UECMCLI command ¡°\"\n                 \"import/session/elem ent show -detail¡± to check session \"\n                 \"health details and resolution steps.\"],\n    \"14:60e10\": [\"INFO\", \"ALRT_MOVE_SESSION_OK\",\n                 \"The specified storage resource move session is operating\"\n                 \" normally.\"],\n    \"14:60e11\": [\"INFO\", \"ALRT_MOVE_SESSION_UNKNOWN\",\n                 \"The health of storage resource move session cannot be\"\n                 \" determined.\"],\n    \"14:60e12\": [\"ERROR\", \"ALRT_MOVE_SESSION_FAILED\",\n                 \"The specified storage resource move session has failed. \"\n                 \"Contact your service provider.\"],\n    \"14:60e13\": [\"ERROR\", \"ALRT_MOVE_SESSION_STOP_POOL_OFFLINE\",\n                 \"The pool went offline and the specified storage resource \"\n                 \"move session cannot continue. Contact your service \"\n                 \"provider.\"],\n    \"14:60e14\": [\"ERROR\", \"ALRT_MOVE_SESSION_STOP_NO_SPACE\",\n                 \"The pool exhausted the space available and the specified \"\n                 \"storage resource move session cannot continue. Remove the\"\n                 \" move session, free up space on the destination pool, and \"\n                 \"restart the move session.\"],\n    \"14:60e15\": [\"ERROR\", \"ALRT_MOVE_SESSION_INTERNAL_ERR\",\n                 \"The specified storage resource move session encountered an \"\n                 \"internal error. Contact your service provider.\"],\n    \"14:60e74\": [\"INFO\", \"ALRT_ELEMENT_IMPORT_SESSION_OK\",\n                 \"The element import session is operating normally.\"],\n    \"14:60e75\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_NONRECOVERABLE\",\n                 \"The import session failed due to a non- recoverable failure.\"\n                 \" Cancel the import session and determine the integrity of \"\n                 \"the data.\"],\n    \"14:60e76\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_UNABLE_TO_LOCATE_DEVICE\",\n                 \"Element import session related to a Sancopy session ran into\"\n                 \" error, 0x712AC007, Unable to locate the device. Check that \"\n                 \"the device with this WWN exists. (WWN). This can be due to\"\n                 \" FC zoning or iSCSI connection configuration between the VNX\"\n                 \" and Unity systems. Follow documentation to configure \"\n                 \"connectivity between all SP pairs between the VNX and Unity \"\n                 \"systems. Once the FC/iSCSI connection configuration is \"\n                 \"validated, run the Verify and Update operation for the \"\n                 \"remote system connection to VNX. This will discover/update\"\n                 \" all.\"],\n    \"14:60e77\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_\"\n                 \"BAD_BLOCK_ON_SOURCE_DEVICE\",\n                 \"Element import session related to a Sancopy session ran \"\n                 \"into error: 0x712AC015: A bad block was encountered on the\"\n                 \" source device. (WWN). This is a non- recoverable error.\"\n                 \" Cancel the session. The resource cannot be imported.\"],\n    \"14:60e78\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_UNABLE_TO_ACCESS_DEVICE\",\n                 \"Unable to access the device. (WWN). Check cables and FC\"\n                 \" zoning or iSCSI connection configuration between the VNX \"\n                 \"and Unity systems. Ensure connectivity between all SP pairs\"\n                 \" between the VNX and Unity systems. Once the FC/iSCSI \"\n                 \"connection configuration is validated, run the Verify and \"\n                 \"Update operation for the remote system connection to the \"\n                 \"VNX, which will discover/update all configuration changes.\"\n                 \" Run the Resume operation on the import session to recover \"\n                 \"the session from the error state.\"],\n    \"14:60e79\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_LU_TRESPASSED\",\n                 \"This LUN will need to be manually trespassed over to the SP\"\n                 \" that started the session. (WWN). This is due to LUN \"\n                 \"trespassed state. To resolve this issue, tresspass over the \"\n                 \"LUN to the same SP on which the SAN Copy session was \"\n                 \"created . Once resolved, log in to the Unity system and run\"\n                 \" the Resume operation for this import session to recover\"\n                 \" from the error.\"],\n    \"14:60e7a\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_SOURCE_DEVICE_\"\n                 \"INACCESSIBLE\",\n                 \"Transfer failed because the source device is inaccessible\"\n                 \" from the peer SP. This is probably due to incorrect \"\n                 \"FC zoning on the switch or the device is not configured \"\n                 \"in the correct storage group. (WWN). Configure connectivity\"\n                 \" between all SP pairs between the VNX and Unity systems.\"\n                 \" Once the FC or iSCSI connection configuration is validated,\"\n                 \" run Verify and Update operation for the Remote System\"\n                 \" connection to the VNX to discover/update all configuration\"\n                 \" changes; then, run Resume operation on the import \"\n                 \"session.\"],\n    \"14:60e7b\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_LOW_USER_LINK_BANDWIDTH\",\n                 \"The User Link Bandwidth must be >= 16 kilobits. The error \"\n                 \"occurred due to bandwidth setting changes made through the \"\n                 \"VNX UI. Reset the link bandwidth to the default value.\"],\n    \"14:60e7c\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_CONCURRENT_SANCOPY_\"\n                 \"SESSION_DESTINATIONS\",\n                 \"The command failed because one or more failed destinations\"\n                 \" exist on this SAN Copy Session due to concurrent sancopy\"\n                 \" sync to different targets. Do not add any new targets to\"\n                 \" the SAN Copy session created by the Unity system. Remove\"\n                 \" any non-Unity targets added to the SAN Copy session to \"\n                 \"recover from the error.\"],\n    \"14:60e7d\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_COMMUNICATING_WITH\"\n                 \"_SNAPVIEW\",\n                 \"A non-recoverable error occurred: An error occured \"\n                 \"communicating with SnapView.\"],\n    \"14:60e7e\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_INCONSISTENT_STATE\",\n                 \"The session has completed successfully but is in an \"\n                 \"inconsistent state.\"],\n    \"14:60e7f\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_DESTINATION_IN_\"\n                 \"INCONSISTENT_STATE\",\n                 \"A non-recoverable error occurred: The session has completed\"\n                 \" successfully but is in an inconsistent state.\"],\n    \"14:60e80\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_RESUME_ON_AUTO_RECOVERY\",\n                 \"A non-recoverable error occurred: Resume of copy session %2\"\n                 \" failed on auto-recovery.\"],\n    \"14:60e81\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_DUE_TO_ALL_PATHS_FAILURE\",\n                 \"A non-recoverable error occurred: Copy session %2 failed due\"\n                 \" to all paths failure on device with WWN %3.\"],\n    \"14:60e82\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_ACCESS_DENIED_TO_DEVICE\",\n                 \"A non-recoverable error occurred: Access denied to the \"\n                 \"device. (WWN).\"],\n    \"14:60e83\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_NOT_ENOUGH_MEMORY\",\n                 \"A non-recoverable error occurred: Not enough memory \"\n                 \"resources exist to complete the request.\"],\n    \"14:60e84\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_SOURCE_DEVICE_FAILED\",\n                 \"The source device specified in the session failed. (WWN). \"\n                 \"This can be due to either a Raidgroup or Storage Pool being\"\n                 \" offline or corruption on source LUN on VNX. Verify that the\"\n                 \" source LUN is in a good state. Once the resource is in a\"\n                 \" good state, run Resume of session from Unity UI.\"],\n    \"14:60e85\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_DESTINATION_DEVICE\"\n                 \"_FAILED\",\n                 \"The following target device specified in the session failed.\"\n                 \" (WWN). This can be due to the storage pool being offline or\"\n                 \" corruption of the target LUN. Verify that the target LUN is\"\n                 \" in a good state. Once the resource is in a good state, run\"\n                 \" Resume operation of session from Unity UI.\"],\n    \"14:60e86\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_DESTINATION_DEVICE_NOT_\"\n                 \"FOUND\",\n                 \"The destination device could not be found due to either\"\n                 \" incorrect zoning on the switch or the device is not in the \"\n                 \"correct storage group. (WWN). This can be due to FC Zoning \"\n                 \"or iSCSI Connection configuration between VNX and Unity \"\n                 \"arrays. Configure connectivity between all SP pairs between\"\n                 \" the VNX and Unity systems. Once the FC or iSCSI connection\"\n                 \" configuration is validated, run the Verify and Update \"\n                 \"operation for the Remote System connection to the VNX to \"\n                 \"discover/update all.\"],\n    \"14:60e87\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_TARGET_LU_NOT_\"\n                 \"INITIALIZED\",\n                 \"A non-recoverable error occurred: Target LUN list has not\"\n                 \" been initialized yet.\"],\n    \"14:60e88\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_COMMAND_TIMED_OUT\",\n                 \"A non-recoverable error occurred: The command timed out \"\n                 \"waiting on another SAN Copy operation to complete.\"],\n    \"14:60e89\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_VERIFYING_FRONT_END_\"\n                 \"DEVICE_TIMEDOUT\",\n                 \"A non-recoverable error occurred: Verifying front end devic\"\n                 \"e timed out.\"],\n    \"14:60e8a\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_VERIFYING_FRONT_END_\"\n                 \"DEVICE_TIMEDOUT_ANOTHER_OPERATION\",\n                 \"A non-recoverable error occurred: Verifying front end device\"\n                 \" timed out waiting for another front end operation to \"\n                 \"complete.\"],\n    \"14:60e8b\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_VERIFYING_SOURCE_\"\n                 \"CONNECTIVITY_TIMEDOUT\",\n                 \"A non-recoverable error occurred: Operation timed out \"\n                 \"trying to verify the connectivity to the source device.\"],\n    \"14:60e8c\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_VERIFYING_DESTINATION\"\n                 \"_CONNECTIVITY_TIMEDOUT\",\n                 \"A non-recoverable error occurred: Operation timed out \"\n                 \"trying to verify the connectivity to the target device.\"],\n    \"14:60e8d\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_RLP_LUN_IO_FAILURE\",\n                 \"A non-recoverable error occurred: Operation failed due to \"\n                 \"an unrecoverable I/O failure of a reserved LUN.\"],\n    \"14:60e8e\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_LIMIT_OF_TOTAL_\"\n                 \"SESSIONS_FOR_SANCOPYE_REACHED\",\n                 \"A non-recoverable error occurred: This copy session could \"\n                 \"not be created because the limit of total sessions for \"\n                 \"SAN Copy/E has been reached.\"],\n    \"14:60e8f\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_LIMIT_OF_INCREMENTAL\"\n                 \"_SESSIONS_FOR_SANCOPYE_REACHED\",\n                 \"This copy session could not be created because the limit \"\n                 \"of incremental sessions for SAN Copy/E has been reached. \"\n                 \"Resolve the limit issue by deleting an existing incremental \"\n                 \"session related to systems other than the Unity system or \"\n                 \"remove some MirrorView/A sessions from the system. Once the\"\n                 \" limit issue is resolved, run the resume operation on teh \"\n                 \"import session from the Unity system.\"],\n    \"14:60e90\": [\"INFO\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_COPY_COMMAND_QUEUED\",\n                 \"Copy command is queued due to SAN Copy concurrent sync \"\n                 \"limits interference from a VNX admininstrator scheduled\"\n                 \" start and the Unity scheduled start. Stop or abort any SAN\"\n                 \" Copy starts issued on VNX systems on imports happening to\"\n                 \" non- Unity systems.\"],\n    \"14:60e91\": [\"INFO\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_ON_SOURCE_OR\"\n                 \"_DESTINATIONS\",\n                 \"The session failed because either the source or all targets\"\n                 \" have failed due to failure status on the source or target\"\n                 \" device of the SAN Copy session. Log in to the VNX system \"\n                 \"and resolve the SAN Copy error reported for this element \"\n                 \"session and resume the SAN Copy session from the VNX UI.\"],\n    \"14:60e92\": [\"INFO\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_DEVICE_CANNOT_BE_LOCATED\",\n                 \"Element import session related Sancopy session ran into \"\n                 \"error: 0x712A0030: Unable to locate the device. Check that \"\n                 \"the device with this WWN exists Session ran into an non- \"\n                 \"recoverable error. Please collect support materials from \"\n                 \"both VNX and Unity system. Report an issue with EMC Support\"\n                 \" for resolution. Please cancel the session.\"],\n    \"14:60e93\": [\"WARNING\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_NO_UNUSED_RLP_LUNS\",\n                 \"There are no unused LUNs available in the reserved LUN pool\"\n                 \" (RPL) for session create or start. Add LUNs to the RLP\"\n                 \" pool, then resume the import session operation.\"],\n    \"14:60e94\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_RLP_LUN_NOT_SUPPORT_\"\n                 \"INCREMENTAL_SESSIONS\",\n                 \"A non-recoverable error occurred: Existing reserved LUN does\"\n                 \" not support incremental sessions\"],\n    \"14:60e95\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_SNAPVIEW_RESERVED_\"\n                 \"LUN_NOT_ENOUGH_SPACE\",\n                 \"A non-recoverable error occurred: A SnapView reserved LUN \"\n                 \"did not have sufficient space for the minimum map regions.\"],\n    \"14:60e96\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_TOO_MANY_SNAPSHOTS_ON_\"\n                 \"SINGLE_LU\",\n                 \"A non-recoverable error occurred: Too many snapshots have\"\n                 \" been created on a single source LUN.\"],\n    \"14:60e97\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_CANNOT_OPEN_RESERVED_LUN\",\n                 \"A non-recoverable error occurred: The reserved LUN cannot\"\n                 \" be opened.\"],\n    \"14:60e98\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_CANNOT_GET_RESERVED_\"\n                 \"LUN_INFO\",\n                 \"A non-recoverable error has occurred: Unable to get the \"\n                 \"geometry information for reserved LUN.\"],\n    \"14:60e99\": [\"ERROR\", \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_NO_SPACE_ON_RLP\",\n                 \"No more room exists in the reserved LUN pool (RLP). An RLP\"\n                 \" LUN or space is unavailable to create or start a session.\"\n                 \" Add LUNs to the RLP pool, then resume the operation.\"],\n    \"14:60e9a\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_TOTAL_NUMBER_SUPPORTED_\"\n                 \"INCREMENTAL_SESSIONS_REACHED\",\n                 \"This incremental copy session could not be created because \"\n                 \"the maximum incremental SAN Copy sessions limit on the VNX\"\n                 \" has been reached. The limit is shared with the MirrorView \"\n                 \"Async feature. Resolve the limit issue by removing an\"\n                 \" unwanted or unused SAN Copy session related to systems \"\n                 \"other than the Unity system or remove some MirrorView/A \"\n                 \"sessions from the system. Once the limit issue is resolved,\"\n                 \" run the Resume operation on the import session from the \"\n                 \"Unity system.\"],\n    \"14:60e9b\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_LIMIT_OF_TOTAL_SANCOPY\"\n                 \"_SESSIONS_REACHED\",\n                 \"This incremental copy session could not be created because\"\n                 \" the maximum incremental SAN Copy sessions limit on the\"\n                 \" VNX has been reached. The limit is shared with the \"\n                 \"MirrorView Async feature. Resolve the limit issue by\"\n                 \" removing an unwanted or unused SAN Copy session related \"\n                 \"to systems other than the Unity system or remove some \"\n                 \"MirrorView/A sessions from the system. Once the limit \"\n                 \"issue is resolved, run the Resume operation on the import \"\n                 \"session from the Unity system.\"],\n    \"14:60e9c\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_LIMIT_OF_TOTAL_I\"\n                 \"NCREMENTAL_SANCOPY_SESSIONS_REACHED\",\n                 \"This incremental copy session could not be created \"\n                 \"because the maximum incremental SAN Copy sessions limit \"\n                 \"on the VNX has been reached. The limit is shared with the \"\n                 \"MirrorView Async feature. Resolve the limit issue by \"\n                 \"removing an unwanted or unused SAN Copy session related to \"\n                 \"systems other than the Unity system or remove some\"\n                 \" MirrorView/A sessions from the system. Once the limit \"\n                 \"issue is resolved, run the Resume operation on the import\"\n                 \" session from the Unity system.\"],\n    \"14:60e9d\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_LOST_COMMUNICATION\",\n                 \"Communication with the source array has been lost. On the \"\n                 \"Remote System Connection page, click Verify and Update\"\n                 \" Connection. If that does not correct the issue, verify \"\n                 \"that the physical network is operational.\"],\n    \"14:60e9e\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_RLP_MAXIMUM_DEVICES\",\n                 \"The reserved LUN pool (RLP) has its maximum number of \"\n                 \"devices. An RLP LUN or space is unavailable to create or\"\n                 \" start a session. Add LUNs to the RLP pool, then resume \"\n                 \"the operation.\"],\n    \"14:60e9f\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_NO_CACHE_DEVICES\",\n                 \"The user attempted to start a session without cache devices.\"\n                 \" A reserved LUN pool (RLP) LUN or space is unavailable to \"\n                 \"create or start a session. Add LUNs to the RLP pool, then \"\n                 \"resume the operation.\"],\n    \"14:60ea0\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_TARGET_\"\n                 \"INSUFFICIENT_SPACE\",\n                 \"Failed to write to target device due to insufficient \"\n                 \"storage space, which can be caused by a pool out of space or\"\n                 \" target device error state on the Unity system. Verify the \"\n                 \"condition of the target device, or pool, or both. Add or\"\n                 \" free storage space in the pool, or correct the resource \"\n                 \"state, or both and then resume the operation.\"],\n    \"14:60ea1\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_DEVICE_NOT_READY\",\n                 \"Element import session related to a SAN Copy session failed\"\n                 \" because the device is not ready. One cause can be a reboot\"\n                 \" of the VNX system, which would cause the SAN Copy session\"\n                 \" to go to the paused state. Resolve the VNX reboot issue \"\n                 \"and verify that the source LUN or LUNs are completly \"\n                 \"recovered. Then from the Unity console, run the Resume \"\n                 \"operation on the import session to recover.\"],\n    \"14:60ea2\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_COMMUNICATING_WITH_\"\n                 \"SNAPVIEW_1\",\n                 \"A non-recoverable error occurred: An error occured \"\n                 \"communicating with SnapView.\"],\n    \"14:60ea3\": [\"CRITICAL\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_SOURCE_DEVICE\"\n                 \"_UNAVAILABLE\",\n                 \"Element import session related Sancopy session failed \"\n                 \"because the source device is unavailable for IO operations.\"\n                 \" Ensure that the device is not a MirrorView secondary image,\"\n                 \" a SnapView Clone, an inactive Snapshot, or a detached or\"\n                 \" offline VNX Snapshot Mount Point. If the session still\"\n                 \" fails, gather SPcollects and contact your service \"\n                 \"provider.\"],\n    \"14:60ea4\": [\"WARNING\", \"ALRT_IMPORT_REMOTE_SYSTEM_DEGRADED\",\n                 \"Connection to one of the remote VNX system management \"\n                 \"IP addresses cannot be made. Check and restore connectivity \"\n                 \"to both management IP addresses on the VNX system.\"],\n    \"14:60ea5\": [\"ERROR\",\n                 \"ALRT_ELEMENT_IMPORT_SESSION_FAILED_SOURCE_IN_IMPORT_SESSION\",\n                 \"The import session could not be created because the source\"\n                 \" resource is already in import session. Resolve the issue\"\n                 \" by removing the SAN Copy session for this resource on the\"\n                 \" VNX. Once the issue is resolved, wait for few minutes and\"\n                 \" run the Resume operation on the import session from the\"\n                 \" Unity system.\"],\n    \"14:60f00\": [\"INFO\", \"ALRT_ROUTE_OK\",\n                 \"The component is operating normally.\"],\n    \"14:60f01\": [\"ERROR\", \"ALRT_ROUTE_INVALID_IP_VERSION\",\n                 \"There is an IPv4/IPv6 mismatch between the network route's\"\n                 \" destination and/or gateway, and the source IP interface.\"\n                 \" Edit the destination and/or gateway attributes of the\"\n                 \" route.\"],\n    \"14:60f02\": [\"ERROR\", \"ALRT_ROUTE_SRC_IP_NOT_FOUND\",\n                 \"The source IP interface of the network route does not \"\n                 \"exist.\"],\n    \"14:60f03\": [\"ERROR\", \"ALRT_ROUTE_DIFF_SUBNET\",\n                 \"The gateway of the network route is inaccessible, because it\"\n                 \" is not on the same subnet as the source interface. Modify\"\n                 \" the attributes of the network route or source Interface to \"\n                 \"associate them with the same subnet.\"],\n    \"14:60f04\": [\"ERROR\", \"ALRT_ROUTE_NOT_OPERATIONAL\",\n                 \"The network route is not operational. Delete the route and\"\n                 \" create a new one, if necessary.\"],\n    \"14:61008c\": [\"NOTICE\",\n                  \"ALRT_MIGRATION_SESSION_CUTOVER_THRESHOLD_PERCENTAGE\"\n                  \"_REMAINING\",\n                  \"Import session reached cutover threshold and is cutover \"\n                  \"ready.\"],\n    \"14:62001f\": [\"WARNING\", \"ALRT_QOS_MAX_PERF_CLASSES\",\n                  \"Maximum number of I/O limit resources has been reached.\"],\n    \"14:640001\": [\"CRITICAL\", \"ALRT_UDOCTOR_FAIL_TO_START\",\n                  \"An error has occurred that is preventing the UDoctor \"\n                  \"service from starting up. Contact your service provider.\"],\n    \"14:640002\": [\"ERROR\", \"ALRT_UDOCTOR_GENERAL_ALERT\",\n                  \"The UDoctor service has detected an error and generated\"\n                  \" this alert. For more information, refer to the relevant\"\n                  \" knowledgebase article on the support website or contact \"\n                  \"your service provider.\"],\n    \"14:640003\": [\"CRITICAL\", \"ALRT_UDOCTOR_CRITICAL_ALERT\",\n                  \"The UDoctor service has detected an error and generated \"\n                  \"this alert. For more information, refer to the relevant \"\n                  \"knowledgebase article on the support website or contact \"\n                  \"your service provider.\"],\n    \"14:70001\": [\"ERROR\", \"ALRT_SEND_FAILED\",\n                 \"The storage system failed to communicate an event message \"\n                 \"via the Email server, SNMP servers, or ESRS gateway or \"\n                 \"servers. Resolve the problem with the Email, ESRS, or \"\n                 \"SNMP servers.\"],\n    \"14:70003\": [\"ERROR\", \"ALRT_BAD_PRVCY\",\n                 \"Set the SNMP privacy protocol to one of the valid values: \"\n                 \"DES or AES.\"],\n    \"14:70004\": [\"ERROR\", \"ALRT_BAD_AUTH\",\n                 \"Set the SNMP authentication protocol to one of the valid \"\n                 \"values: MD5 or SHA.\"],\n    \"14:80001\": [\"INFO\", \"DESC_TEST_UI_ALERT\",\n                 \"This is a test message to be shown in a UI pop-up.\"],\n    \"14:90001\": [\"INFO\", \"DESC_TEST_PHONE_HOME_ALERT\",\n                 \"This is a test message to be sent via ConnectHome.\"],\n    \"201:20000\": [\"INFO\", \"ALRT_NTP_OK\",\n                  \"The system can now reach the NTP server.\"],\n    \"201:20001\": [\"WARNING\", \"ALRT_NTP_PART_NO_CONNECT\",\n                  \"The system has a partial connection to the NTP server.\"],\n    \"201:20002\": [\"ERROR\", \"ALRT_NTP_NO_CONNECT\",\n                  \"The system could not connect to the Time Server (NTP).\"\n                  \" Check your NTP settings.\"],\n    \"301:24001\": [\"WARNING\", \"ALRT_STORAGE_SERVER_RESTART\",\n                  \"The NAS servers that are configured to run on this Storage\"\n                  \" Processor (SP) have stopped and will be automatically \"\n                  \"restarted. ? This may affect host connections, which may\"\n                  \" need to be reconnected to your storage resources. If the \"\n                  \"problem persists, contact your service provider.\"],\n    \"301:30000\": [\"INFO\", \"ALRT_CONTROLLED_REBOOT_START\",\n                  \"This Storage Processor (SP) is currently rebooting. No \"\n                  \"action is required.\"],\n    \"301:30001\": [\"INFO\", \"ALRT_CONTROLLED_REBOOT_FINISHED\",\n                  \"The Storage Processor (SP) has finished rebooting. No \"\n                  \"action is required.\"],\n    \"301:30002\": [\"INFO\", \"ALRT_CONTROLLED_SERVICE_START\",\n                  \"This Storage Processor (SP) is currently rebooting into \"\n                  \"Service Mode. No action is required.\"],\n    \"301:3000e\": [\"INFO\", \"ALRT_CONTROLLED_SYSTEMSHUTDOWN_START\",\n                  \"The Storage Processor (SP) is shutting down. The shut down \"\n                  \"and power up procedure must be performed in a particular\"\n                  \" order. If you have not already printed the power up\"\n                  \" instructions, go to the EMC Support website to locate \"\n                  \"product documentation.\"],\n    \"301:30010\": [\"NOTICE\", \"PLATFORM_HARDWARE_PERSIST_STARTED\",\n                  \"A hardware commit operation has started. The system may \"\n                  \"reboot multiple  times. Please do not interrupt this\"\n                  \" process.\"],\n    \"301:30011\": [\"NOTICE\", \"PLATFORM_HARDWARE_COMMIT_COMPLETE\",\n                  \"Your new hardware configuration has been committed and is \"\n                  \"now ready for use.\"],\n    \"301:3001a\": [\"INFO\", \"ALRT_VVNX_CONTROLLED_SYSTEMSHUTDOWN_START\",\n                  \"The Storage Processor (SP) is shutting down.\"],\n    \"301:31004\": [\"ERROR\", \"ALRT_DEBUG_PROCESS_CRASH\",\n                  \"A process crashed, and it might impact the whole system.\"\n                  \" Please check the core dump by svc_dc -lcd after data \"\n                  \"collection is complete (it usually needs several\"\n                  \" minutes).\"],\n    \"301:32001\": [\"ERROR\", \"PLATFORM_HARDWARE_COMMIT_FAILED\",\n                  \"The hardware configuration could not be committed. Please\"\n                  \" try again.\"],\n    \"301:40001\": [\"INFO\", \"ALRT_METRICS_DB_RECOVERED\",\n                  \"Performance metrics are available now. No action is \"\n                  \"required.\"],\n    \"301:48000\": [\"ERROR\", \"ALRT_METRICS_DB_FAIL\",\n                  \"Performance metrics are unavailable due to a system error.\"\n                  \" Contact your service provider.\"]\n}\nIOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Input/output operations per second\"\n}\nREAD_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Read input/output operations per second\"\n}\nWRITE_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Write input/output operations per second\"\n}\nTHROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data is \"\n                   \"successfully transferred in MB/s\"\n}\nREAD_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data read is \"\n                   \"successfully transferred in MB/s\"\n}\nWRITE_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data write is \"\n                   \"successfully transferred in MB/s\"\n}\nRESPONSE_TIME_DESCRIPTION = {\n    \"unit\": \"ms\",\n    \"description\": \"Average time taken for an IO \"\n                   \"operation in ms\"\n}\nCACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of io that are cache hits\"\n}\nREAD_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of read ops that are cache hits\"\n}\nWRITE_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of write ops that are cache hits\"\n}\nIO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of IO requests in KB\"\n}\nREAD_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of read IO requests in KB\"\n}\nWRITE_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of write IO requests in KB\"\n}\nCPU_USAGE_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of CPU usage\"\n}\nMEMORY_USAGE_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of DISK memory usage in percentage\"\n}\nSERVICE_TIME = {\n    \"unit\": 'ms',\n    \"description\": \"Service time of the resource in ms\"\n}\nVOLUME_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"ioSize\": IO_SIZE_DESCRIPTION,\n    \"readIoSize\": READ_IO_SIZE_DESCRIPTION,\n    \"writeIoSize\": WRITE_IO_SIZE_DESCRIPTION\n}\nPORT_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION\n}\nDISK_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION\n}\nFILESYSTEM_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"ioSize\": IO_SIZE_DESCRIPTION,\n    \"readIoSize\": READ_IO_SIZE_DESCRIPTION,\n    \"writeIoSize\": WRITE_IO_SIZE_DESCRIPTION\n}\n"
  },
  {
    "path": "delfin/drivers/dell_emc/unity/rest_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport threading\n\nimport requests\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import cryptor\nfrom delfin import exception\nfrom delfin.drivers.dell_emc.unity import consts\nfrom delfin.drivers.utils.rest_client import RestClient\n\nLOG = logging.getLogger(__name__)\n\n\nclass RestHandler(RestClient):\n    REST_AUTH_URL = '/api/types/loginSessionInfo/instances'\n    REST_STORAGE_URL = '/api/types/system/instances'\n    REST_CAPACITY_URL = '/api/types/systemCapacity/instances'\n    REST_SOFT_VERSION_URL = '/api/types/installedSoftwareVersion/instances'\n    REST_LUNS_URL = '/api/types/lun/instances'\n    REST_POOLS_URL = '/api/types/pool/instances'\n    REST_ALERTS_URL = '/api/types/alert/instances'\n    REST_DEL_ALERTS_URL = '/api/instances/alert/'\n    REST_LOGOUT_URL = '/api/types/loginSessionInfo/action/logout'\n    AUTH_KEY = 'EMC-CSRF-TOKEN'\n    REST_CONTROLLER_URL = '/api/types/storageProcessor/instances'\n    REST_DISK_URL = '/api/types/disk/instances'\n    REST_FCPORT_URL = '/api/types/fcPort/instances'\n    REST_ETHPORT_URL = '/api/types/ethernetPort/instances'\n    REST_IP_URL = '/api/types/ipInterface/instances'\n    REST_FILESYSTEM_URL = '/api/types/filesystem/instances'\n    REST_NFSSHARE_URL = '/api/types/nfsShare/instances'\n    REST_CIFSSHARE_URL = '/api/types/cifsShare/instances'\n    REST_QTREE_URL = '/api/types/treeQuota/instances'\n    REST_USERQUOTA_URL = '/api/types/userQuota/instances'\n    REST_QUOTACONFIG_URL = '/api/types/quotaConfig/instances'\n    REST_VIRTUAL_DISK_URL = '/api/types/virtualDisk/instances'\n    STATE_SOLVED = 2\n\n    def __init__(self, **kwargs):\n        super(RestHandler, self).__init__(**kwargs)\n        self.session_lock = threading.Lock()\n\n    def login(self):\n        \"\"\"Login dell_emc unity storage array.\"\"\"\n        try:\n            with self.session_lock:\n                if self.session is None:\n                    self.init_http_head()\n                self.session.headers.update({\"X-EMC-REST-CLIENT\": \"true\"})\n                self.session.auth = requests.auth.HTTPBasicAuth(\n                    self.rest_username, cryptor.decode(self.rest_password))\n                res = self.call_with_token(RestHandler.REST_AUTH_URL)\n                if res.status_code == 200:\n                    self.session.headers[RestHandler.AUTH_KEY] = \\\n                        cryptor.encode(res.headers[RestHandler.AUTH_KEY])\n                else:\n                    LOG.error(\"Login error.URL: %s,Reason: %s.\",\n                              RestHandler.REST_AUTH_URL, res.text)\n                    if 'Unauthorized' in res.text:\n                        raise exception.InvalidUsernameOrPassword()\n                    elif 'Forbidden' in res.text:\n                        raise exception.InvalidIpOrPort()\n                    else:\n                        raise exception.StorageBackendException(\n                            six.text_type(res.text))\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise e\n\n    def call_with_token(self, url, data=None, method='GET',\n                        calltimeout=consts.DEFAULT_TIMEOUT):\n        auth_key = None\n        if self.session:\n            auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None)\n            if auth_key:\n                self.session.headers[RestHandler.AUTH_KEY] \\\n                    = cryptor.decode(auth_key)\n        res = self.do_call(url, data, method, calltimeout)\n        if auth_key:\n            self.session.headers[RestHandler.AUTH_KEY] = auth_key\n        return res\n\n    def logout(self):\n        try:\n            if self.san_address:\n                self.call(RestHandler.REST_LOGOUT_URL, None, 'POST')\n            if self.session:\n                self.session.close()\n        except Exception as e:\n            err_msg = \"Logout error: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    def get_rest_info(self, url, data=None, method='GET',\n                      calltimeout=consts.DEFAULT_TIMEOUT):\n        retry_times = consts.REST_RETRY_TIMES\n        while retry_times >= 0:\n            try:\n                res = self.call(url, data, method, calltimeout)\n                if res.status_code == 200:\n                    return res.json()\n                err_msg = \"rest response abnormal,status_code:%s,res.json:%s\" \\\n                          % (res.status_code, res.json())\n                LOG.error(err_msg)\n            except Exception as e:\n                LOG.error(e)\n            retry_times -= 1\n        return None\n\n    def call(self, url, data=None, method='GET',\n             calltimeout=consts.DEFAULT_TIMEOUT):\n        try:\n            res = self.call_with_token(url, data, method, calltimeout)\n            if res.status_code == 401:\n                LOG.error(\"Failed to get token, status_code:%s,error_mesg:%s\" %\n                          (res.status_code, res.text))\n                self.login()\n                res = self.call_with_token(url, data, method, calltimeout)\n            elif res.status_code == 503:\n                raise exception.InvalidResults(res.text)\n            return res\n        except Exception as e:\n            LOG.error(\"Method:%s,url:%s failed: %s\" % (method, url,\n                                                       six.text_type(e)))\n            raise e\n\n    def get_all_pools(self):\n        url = '%s?%s' % (RestHandler.REST_POOLS_URL,\n                         'fields=id,name,health,type,sizeFree,'\n                         'sizeTotal,sizeUsed,sizeSubscribed')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_storage(self):\n        url = '%s?%s' % (RestHandler.REST_STORAGE_URL,\n                         'fields=name,model,serialNumber,health')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_capacity(self):\n        url = '%s?%s' % (RestHandler.REST_CAPACITY_URL,\n                         'fields=sizeFree,sizeTotal,sizeUsed,'\n                         'sizeSubscribed,totalLogicalSize')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_soft_version(self):\n        url = '%s?%s' % (RestHandler.REST_SOFT_VERSION_URL,\n                         'fields=version')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_luns(self, page_number):\n        url = '%s?%s&page=%s' % (RestHandler.REST_LUNS_URL,\n                                 'fields=id,name,health,type,sizeAllocated,'\n                                 'sizeTotal,sizeUsed,pool,wwn,isThinEnabled',\n                                 page_number)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_alerts(self, page_number):\n        url = '%s?%s&page=%s' % (RestHandler.REST_ALERTS_URL,\n                                 'fields=id,timestamp,severity,component,'\n                                 'messageId,message,description,'\n                                 'descriptionId,state',\n                                 page_number)\n        result_json = self.get_rest_info(\n            url, None, 'GET', consts.ALERT_TIMEOUT)\n        return result_json\n\n    def get_all_alerts_without_state(self, page_number):\n        url = '%s?%s&page=%s' % (RestHandler.REST_ALERTS_URL,\n                                 'fields=id,timestamp,severity,component,'\n                                 'messageId,message,description,'\n                                 'descriptionId',\n                                 page_number)\n        result_json = self.get_rest_info(\n            url, None, 'GET', consts.ALERT_TIMEOUT)\n        return result_json\n\n    def remove_alert(self, alert_id):\n        data = {\"state\": RestHandler.STATE_SOLVED}\n        url = '%s%s/action/modify' % (RestHandler.REST_DEL_ALERTS_URL,\n                                      alert_id)\n        result_json = self.get_rest_info(url, data, method='POST')\n        return result_json\n\n    def get_all_controllers(self):\n        url = '%s?%s' % (RestHandler.REST_CONTROLLER_URL,\n                         'fields=id,name,health,model,slotNumber,'\n                         'manufacturer,memorySize')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_disks(self):\n        url = '%s?%s' % (RestHandler.REST_DISK_URL,\n                         'fields=id,name,health,model,slotNumber,'\n                         'manufacturer,version,emcSerialNumber,wwn,'\n                         'rpm,size,diskGroup,diskTechnology')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_fcports(self):\n        url = '%s?%s' % (RestHandler.REST_FCPORT_URL,\n                         'fields=id,name,health,slotNumber,storageProcessor,'\n                         'currentSpeed,wwn')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_ethports(self):\n        url = '%s?%s' % (RestHandler.REST_ETHPORT_URL,\n                         'fields=id,name,health,portNumber,storageProcessor,'\n                         'speed,isLinkUp,macAddress')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_port_interface(self):\n        url = '%s?%s' % (RestHandler.REST_IP_URL,\n                         'fields=id,ipPort,ipProtocolVersion,'\n                         'ipAddress,netmask')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_filesystems(self):\n        url = '%s?%s' % (RestHandler.REST_FILESYSTEM_URL,\n                         'fields=id,name,health,sizeAllocated,accessPolicy,'\n                         'sizeTotal,sizeUsed,isThinEnabled,pool,flrVersion')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_filesystems_without_flr(self):\n        url = '%s?%s' % (RestHandler.REST_FILESYSTEM_URL,\n                         'fields=id,name,health,sizeAllocated,accessPolicy,'\n                         'sizeTotal,sizeUsed,isThinEnabled,pool')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_nfsshares(self):\n        url = '%s?%s' % (RestHandler.REST_NFSSHARE_URL,\n                         'fields=id,filesystem,name,path')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_cifsshares(self):\n        url = '%s?%s' % (RestHandler.REST_CIFSSHARE_URL,\n                         'fields=id,filesystem,name,path')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_qtrees(self):\n        url = '%s?%s' % (RestHandler.REST_QTREE_URL,\n                         'fields=id,filesystem,description,path,hardLimit,'\n                         'softLimit,sizeUsed,quotaConfig')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_userquotas(self):\n        url = '%s?%s' % (RestHandler.REST_USERQUOTA_URL,\n                         'fields=id,filesystem,hardLimit,softLimit,'\n                         'sizeUsed,treeQuota,uid')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_quota_configs(self):\n        url = '%s?%s' % (RestHandler.REST_QUOTACONFIG_URL,\n                         'fields=id,filesystem,treeQuota,quotaPolicy')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_host_initiators(self, page):\n        url = '/api/types/hostInitiator/instances?%s&page=%s' % \\\n              ('fields=id,health,type,parentHost,initiatorId', page)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_hosts(self, page):\n        url = '/api/types/host/instances?%s&page=%s' \\\n              % ('fields=id,health,name,description,osType', page)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_host_ip(self):\n        url = '/api/types/hostIPPort/instances?%s' % \\\n              ('fields=id,name,address,netmask,host')\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_host_lun(self, page):\n        url = '/api/types/hostLUN/instances?%s&page=%s' % \\\n              ('fields=id,host,lun', page)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_history_metrics(self, path, page):\n        url = '/api/types/metricValue/instances?filter=path EQ \"%s\"&page=%s'\\\n              % (path, page)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_virtual_disks(self):\n        url = '%s?%s' % (RestHandler.REST_VIRTUAL_DISK_URL,\n                         'fields=health,name,spaScsiId,tierType,sizeTotal,'\n                         'id,model,manufacturer,wwn')\n        result_json = self.get_rest_info(url)\n        return result_json\n"
  },
  {
    "path": "delfin/drivers/dell_emc/unity/unity.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\n\nimport six\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.dell_emc.unity import rest_handler, alert_handler, consts\nfrom delfin.drivers.dell_emc.unity.alert_handler import AlertHandler\n\nLOG = log.getLogger(__name__)\n\n\nclass UnityStorDriver(driver.StorageDriver):\n    \"\"\"UnityStorDriver implement the DELL EMC Storage driver\"\"\"\n    HEALTH_OK = (5, 7)\n    STORAGE_STATUS_MAP = {5: constants.StorageStatus.NORMAL,\n                          7: constants.StorageStatus.NORMAL,\n                          15: constants.StorageStatus.NORMAL,\n                          20: constants.StorageStatus.NORMAL,\n                          10: constants.StorageStatus.DEGRADED\n                          }\n    FILESYSTEM_FLR_MAP = {0: constants.WORMType.NON_WORM,\n                          1: constants.WORMType.ENTERPRISE,\n                          2: constants.WORMType.COMPLIANCE\n                          }\n    FILESYSTEM_SECURITY_MAP = {0: constants.NASSecurityMode.NATIVE,\n                               1: constants.NASSecurityMode.UNIX,\n                               2: constants.NASSecurityMode.NTFS\n                               }\n    CONTROLLER_STATUS_MAP = {5: constants.ControllerStatus.NORMAL,\n                             7: constants.ControllerStatus.NORMAL,\n                             10: constants.ControllerStatus.DEGRADED\n                             }\n    DISK_TYPE_MAP = {1: constants.DiskPhysicalType.SAS,\n                     2: constants.DiskPhysicalType.NL_SAS,\n                     6: constants.DiskPhysicalType.FLASH,\n                     7: constants.DiskPhysicalType.FLASH,\n                     8: constants.DiskPhysicalType.FLASH,\n                     9: constants.DiskPhysicalType.FLASH,\n                     99: constants.DiskPhysicalType.VMDISK\n                     }\n    VOLUME_PERF_METRICS = {\n        'readIops': 'sp.*.storage.lun.*.readsRate',\n        'writeIops': 'sp.*.storage.lun.*.writesRate',\n        'readThroughput': 'sp.*.storage.lun.*.readBytesRate',\n        'writeThroughput': 'sp.*.storage.lun.*.writeBytesRate',\n        'responseTime': 'sp.*.storage.lun.*.responseTime',\n        'readIoSize': 'sp.*.storage.lun.*.avgReadSize',\n        'writeIoSize': 'sp.*.storage.lun.*.avgWriteSize'\n    }\n    DISK_PERF_METRICS = {\n        'readIops': 'sp.*.physical.disk.*.readsRate',\n        'writeIops': 'sp.*.physical.disk.*.writesRate',\n        'readThroughput': 'sp.*.physical.disk.*.readBytesRate',\n        'writeThroughput': 'sp.*.physical.disk.*.writeBytesRate',\n        'responseTime': 'sp.*.physical.disk.*.responseTime'\n    }\n    ETHERNET_PORT_METRICS = {\n        'readThroughput': 'sp.*.net.device.*.bytesInRate',\n        'writeThroughput': 'sp.*.net.device.*.bytesOutRate',\n        'readIops': 'sp.*.net.device.*.pktsInRate',\n        'writeIops': 'sp.*.net.device.*.pktsOutRate',\n    }\n    FC_PORT_METRICS = {\n        'readIops': 'sp.*.fibreChannel.fePort.*.readsRate',\n        'writeIops': 'sp.*.fibreChannel.fePort.*.writesRate',\n        'readThroughput': 'sp.*.fibreChannel.fePort.*.readBytesRate',\n        'writeThroughput': 'sp.*.fibreChannel.fePort.*.writeBytesRate'\n    }\n    ISCSI_PORT_METRICS = {\n        'readIops': 'sp.*.iscsi.fePort.*.readsRate',\n        'writeIops': 'sp.*.iscsi.fePort.*.writesRate',\n        'readThroughput': 'sp.*.iscsi.fePort.*.readBytesRate',\n        'writeThroughput': 'sp.*.iscsi.fePort.*.writeBytesRate'\n    }\n    FILESYSTEM_PERF_METRICS = {\n        'readIops': 'sp.*.storage.filesystem.*.readsRate',\n        'writeIops': 'sp.*.storage.filesystem.*.writesRate',\n        'readThroughput': 'sp.*.storage.filesystem.*.readBytesRate',\n        'writeThroughput': 'sp.*.storage.filesystem.*.writeBytesRate',\n        'readIoSize': 'sp.*.storage.filesystem.*.readSizeAvg',\n        'writeIoSize': 'sp.*.storage.filesystem.*.writeSizeAvg'\n    }\n    PERF_TYPE_MAP = {\n        'readIops': {'write': 'writeIops',\n                     'total': 'iops'},\n        'readThroughput': {'write': 'writeThroughput',\n                           'total': 'throughput'},\n        'readIoSize': {'write': 'writeIoSize',\n                       'total': 'ioSize'},\n    }\n    MS_PER_HOUR = 60 * 60 * 1000\n\n    OS_TYPE_MAP = {'AIX': constants.HostOSTypes.AIX,\n                   'Citrix XenServer': constants.HostOSTypes.XEN_SERVER,\n                   'HP-UX': constants.HostOSTypes.HP_UX,\n                   'IBM VIOS': constants.HostOSTypes.UNKNOWN,\n                   'Linux': constants.HostOSTypes.LINUX,\n                   'Mac OS': constants.HostOSTypes.MAC_OS,\n                   'Solaris': constants.HostOSTypes.SOLARIS,\n                   'VMware ESXi': constants.HostOSTypes.VMWARE_ESX,\n                   'Windows Client': constants.HostOSTypes.WINDOWS,\n                   'Windows Server': constants.HostOSTypes.WINDOWS\n                   }\n    INITIATOR_STATUS_MAP = {5: constants.InitiatorStatus.ONLINE,\n                            7: constants.InitiatorStatus.ONLINE,\n                            15: constants.InitiatorStatus.ONLINE,\n                            20: constants.InitiatorStatus.ONLINE,\n                            10: constants.InitiatorStatus.OFFLINE\n                            }\n    HOST_STATUS_MAP = {5: constants.HostStatus.NORMAL,\n                       7: constants.HostStatus.NORMAL,\n                       15: constants.HostStatus.NORMAL,\n                       20: constants.HostStatus.NORMAL,\n                       10: constants.HostStatus.DEGRADED\n                       }\n    INITIATOR_TYPE_MAP = {0: constants.InitiatorType.UNKNOWN,\n                          1: constants.InitiatorType.FC,\n                          2: constants.InitiatorType.ISCSI\n                          }\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.rest_handler = rest_handler.RestHandler(**kwargs)\n        self.rest_handler.login()\n\n    def reset_connection(self, context, **kwargs):\n        self.rest_handler.logout()\n        self.rest_handler.verify = kwargs.get('verify', False)\n        self.rest_handler.login()\n\n    def close_connection(self):\n        self.rest_handler.logout()\n\n    def get_disk_capacity(self, context):\n        raw_capacity = 0\n        try:\n            disk_info = self.list_disks(context)\n            if disk_info:\n                for disk in disk_info:\n                    raw_capacity += disk.get('capacity')\n        except Exception:\n            LOG.info(\"get disk info fail in get_disk_capacity\")\n        return raw_capacity\n\n    def get_storage(self, context):\n        system_info = self.rest_handler.get_storage()\n        capacity = self.rest_handler.get_capacity()\n        version_info = self.rest_handler.get_soft_version()\n        if not system_info or not capacity:\n            err_msg = \"unity get system or capacity info failed\"\n            LOG.error(err_msg)\n            raise exception.StorageBackendException(err_msg)\n        system_entries = system_info.get('entries')\n        for system in system_entries:\n            content = system.get('content', {})\n            name = content.get('name')\n            model = content.get('model')\n            serial_number = content.get('serialNumber')\n            health_value = content.get('health', {}).get('value')\n            status = UnityStorDriver.STORAGE_STATUS_MAP.get(\n                health_value, constants.StorageStatus.ABNORMAL)\n            break\n        capacity_info = capacity.get('entries')\n        for per_capacity in capacity_info:\n            content = per_capacity.get('content', {})\n            free = content.get('sizeFree')\n            total = content.get('sizeTotal')\n            used = content.get('sizeUsed')\n            subs = content.get('sizeSubscribed')\n            break\n        if version_info:\n            soft_version = version_info.get('entries')\n            for soft_info in soft_version:\n                content = soft_info.get('content', {})\n                if content:\n                    version = content.get('id')\n                    break\n        raw_capacity = self.get_disk_capacity(context)\n        raw_capacity = raw_capacity if raw_capacity else int(total)\n        system_result = {\n            'name': name,\n            'vendor': 'DELL EMC',\n            'model': model,\n            'status': status,\n            'serial_number': serial_number,\n            'firmware_version': version,\n            'location': '',\n            'subscribed_capacity': int(subs),\n            'total_capacity': int(total),\n            'raw_capacity': raw_capacity,\n            'used_capacity': int(used),\n            'free_capacity': int(free)\n        }\n        return system_result\n\n    def list_storage_pools(self, context):\n        pool_info = self.rest_handler.get_all_pools()\n        pool_list = []\n        pool_type = constants.StorageType.UNIFIED\n        if pool_info is not None:\n            pool_entries = pool_info.get('entries')\n            for pool in pool_entries:\n                content = pool.get('content', {})\n                health_value = content.get('health').get('value')\n                if health_value in UnityStorDriver.HEALTH_OK:\n                    status = constants.StorageStatus.NORMAL\n                else:\n                    status = constants.StorageStatus.ABNORMAL\n                pool_result = {\n                    'name': content.get('name'),\n                    'storage_id': self.storage_id,\n                    'native_storage_pool_id': str(content.get('id')),\n                    'description': content.get('description'),\n                    'status': status,\n                    'storage_type': pool_type,\n                    'total_capacity': int(content.get('sizeTotal')),\n                    'subscribed_capacity': int(content.get('sizeSubscribed')),\n                    'used_capacity': int(content.get('sizeUsed')),\n                    'free_capacity': int(content.get('sizeFree'))\n                }\n                pool_list.append(pool_result)\n        return pool_list\n\n    def volume_handler(self, volumes, volume_list):\n        if volumes is not None:\n            vol_entries = volumes.get('entries')\n            for volume in vol_entries:\n                content = volume.get('content', {})\n                total = content.get('sizeTotal')\n                used = content.get('sizeAllocated')\n                vol_type = constants.VolumeType.THICK\n                if content.get('isThinEnabled') is True:\n                    vol_type = constants.VolumeType.THIN\n                health_value = content.get('health').get('value')\n                if health_value in UnityStorDriver.HEALTH_OK:\n                    status = constants.StorageStatus.NORMAL\n                else:\n                    status = constants.StorageStatus.ABNORMAL\n                volume_result = {\n                    'name': content.get('name'),\n                    'storage_id': self.storage_id,\n                    'description': content.get('description'),\n                    'status': status,\n                    'native_volume_id': str(content.get('id')),\n                    'native_storage_pool_id': content.get('pool').get('id'),\n                    'wwn': content.get('wwn'),\n                    'type': vol_type,\n                    'total_capacity': int(total),\n                    'used_capacity': int(used),\n                    'free_capacity': int(total - used)\n                }\n                volume_list.append(volume_result)\n\n    def list_volumes(self, context):\n        page_number = 1\n        volume_list = []\n        while True:\n            luns = self.rest_handler.get_all_luns(page_number)\n            if luns is None:\n                break\n            if 'entries' not in luns:\n                break\n            if len(luns['entries']) < 1:\n                break\n            self.volume_handler(luns, volume_list)\n            page_number = page_number + 1\n\n        return volume_list\n\n    def list_alerts(self, context, query_para=None):\n        page_number = 1\n        alert_model_list = []\n        while True:\n            alert_list = self.rest_handler.get_all_alerts(page_number)\n            if not alert_list:\n                alert_list = self.rest_handler.get_all_alerts_without_state(\n                    page_number)\n            if alert_list is None:\n                break\n            if 'entries' not in alert_list:\n                break\n            if len(alert_list['entries']) < 1:\n                break\n            alert_handler.AlertHandler() \\\n                .parse_queried_alerts(alert_model_list, alert_list, query_para)\n            page_number = page_number + 1\n\n        return alert_model_list\n\n    def list_controllers(self, context):\n        try:\n            controller_list = []\n            controller_info = self.rest_handler.get_all_controllers()\n            if controller_info is not None:\n                pool_entries = controller_info.get('entries')\n                for pool in pool_entries:\n                    content = pool.get('content')\n                    if not content:\n                        continue\n                    health_value = content.get('health', {}).get('value')\n                    status = UnityStorDriver.CONTROLLER_STATUS_MAP.get(\n                        health_value,\n                        constants.ControllerStatus.FAULT\n                    )\n                    controller_result = {\n                        'name': content.get('name'),\n                        'storage_id': self.storage_id,\n                        'native_controller_id': content.get('id'),\n                        'status': status,\n                        'location': content.get('slotNumber'),\n                        'memory_size':\n                            int(content.get('memorySize')) * units.Mi\n                    }\n                    controller_list.append(controller_result)\n            return controller_list\n        except Exception as err:\n            err_msg = \"Failed to get controller attributes from Unity: %s\" %\\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    @staticmethod\n    def handle_port_ip(ip, result):\n        if ip is None:\n            ip = result\n        else:\n            ip = '%s;%s' % (ip, result)\n        return ip\n\n    def get_eth_ports(self):\n        port_list = []\n        ports = self.rest_handler.get_all_ethports()\n        ip_interfaces = self.rest_handler.get_port_interface()\n        if ports:\n            port_entries = ports.get('entries')\n            for port in port_entries:\n                content = port.get('content')\n                if not content:\n                    continue\n                health_value = content.get('health', {}).get('value')\n                if health_value in UnityStorDriver.HEALTH_OK:\n                    status = constants.PortHealthStatus.NORMAL\n                else:\n                    status = constants.PortHealthStatus.ABNORMAL\n                conn_status = constants.PortConnectionStatus.CONNECTED if \\\n                    content.get('isLinkUp') is True \\\n                    else constants.PortConnectionStatus.DISCONNECTED\n                ipv4 = None\n                ipv4_mask = None\n                ipv6 = None\n                ipv6_mask = None\n                if ip_interfaces:\n                    for ip_info in ip_interfaces.get('entries'):\n                        ip_content = ip_info.get('content')\n                        if not ip_content:\n                            continue\n                        if content.get('id') == ip_content.get(\n                                'ipPort').get('id'):\n                            if ip_content.get('ipProtocolVersion') == 4:\n                                ipv4 = UnityStorDriver.handle_port_ip(\n                                    ipv4, ip_content.get('ipAddress'))\n                                ipv4_mask = UnityStorDriver.handle_port_ip(\n                                    ipv4_mask, ip_content.get('netmask'))\n                            else:\n                                ipv6 = UnityStorDriver.handle_port_ip(\n                                    ipv6, ip_content.get('ipAddress'))\n                                ipv6_mask = UnityStorDriver.handle_port_ip(\n                                    ipv6_mask, ip_content.get('netmask'))\n                port_result = {\n                    'name': content.get('name'),\n                    'storage_id': self.storage_id,\n                    'native_port_id': content.get('id'),\n                    'location': content.get('name'),\n                    'connection_status': conn_status,\n                    'health_status': status,\n                    'type': constants.PortType.ETH,\n                    'logical_type': '',\n                    'speed': int(content.get('speed')) * units.M\n                    if content.get('speed') is not None else None,\n                    'max_speed': int(content.get('speed')) * units.M\n                    if content.get('speed') is not None else None,\n                    'native_parent_id':\n                        content.get('storageProcessor', {}).get('id'),\n                    'wwn': '',\n                    'mac_address': content.get('macAddress'),\n                    'ipv4': ipv4,\n                    'ipv4_mask': ipv4_mask,\n                    'ipv6': ipv6,\n                    'ipv6_mask': ipv6_mask\n                }\n                port_list.append(port_result)\n        return port_list\n\n    def get_fc_ports(self):\n        port_list = []\n        ports = self.rest_handler.get_all_fcports()\n        if ports:\n            port_entries = ports.get('entries')\n            for port in port_entries:\n                content = port.get('content')\n                if not content:\n                    continue\n                health_value = content.get('health', {}).get('value')\n                connect_value = \\\n                    content.get('health', {}).get('descriptionIds', [])\n                if 'ALRT_PORT_LINK_DOWN_NOT_IN_USE' in connect_value:\n                    conn_status = constants.PortConnectionStatus.DISCONNECTED\n                elif 'ALRT_PORT_LINK_UP' in connect_value:\n                    conn_status = constants.PortConnectionStatus.CONNECTED\n                else:\n                    conn_status = constants.PortConnectionStatus.UNKNOWN\n                if health_value in UnityStorDriver.HEALTH_OK:\n                    status = constants.PortHealthStatus.NORMAL\n                else:\n                    status = constants.PortHealthStatus.ABNORMAL\n                port_result = {\n                    'name': content.get('name'),\n                    'storage_id': self.storage_id,\n                    'native_port_id': content.get('id'),\n                    'location': content.get('name'),\n                    'connection_status': conn_status,\n                    'health_status': status,\n                    'type': constants.PortType.FC,\n                    'logical_type': '',\n                    'speed': int(content.get('currentSpeed')) * units.G\n                    if content.get('currentSpeed') is not None else None,\n                    'max_speed': int(content.get('currentSpeed')) * units.G\n                    if content.get('currentSpeed') is not None else None,\n                    'native_parent_id':\n                        content.get('storageProcessor', {}).get('id'),\n                    'wwn': content.get('wwn')\n                }\n                port_list.append(port_result)\n        return port_list\n\n    def list_ports(self, context):\n        try:\n            port_list = []\n            port_list.extend(self.get_eth_ports())\n            port_list.extend(self.get_fc_ports())\n            return port_list\n        except Exception as err:\n            err_msg = \"Failed to get ports attributes from Unity: %s\" % \\\n                      (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    def list_disks(self, context):\n        try:\n            disks = self.rest_handler.get_all_disks()\n            disk_list = []\n            if disks and disks.get('entries'):\n                disk_entries = disks.get('entries')\n                for disk in disk_entries:\n                    content = disk.get('content')\n                    if not content:\n                        continue\n                    health_value = content.get('health', {}).get('value')\n                    slot_info = \\\n                        content.get('health', {}).get('descriptionIds', [])\n                    if 'ALRT_DISK_SLOT_EMPTY' in slot_info:\n                        continue\n                    if health_value in UnityStorDriver.HEALTH_OK:\n                        status = constants.DiskStatus.NORMAL\n                    else:\n                        status = constants.DiskStatus.ABNORMAL\n                    physical_type = UnityStorDriver.DISK_TYPE_MAP.get(\n                        content.get('diskTechnology'),\n                        constants.DiskPhysicalType.UNKNOWN)\n                    disk_result = {\n                        'name': content.get('name'),\n                        'storage_id': self.storage_id,\n                        'native_disk_id': content.get('id'),\n                        'serial_number': content.get('emcSerialNumber'),\n                        'manufacturer': content.get('manufacturer'),\n                        'model': content.get('model'),\n                        'firmware': content.get('version'),\n                        'speed': int(content.get('rpm')),\n                        'capacity': int(content.get('size')),\n                        'status': status,\n                        'physical_type': physical_type,\n                        'logical_type': '',\n                        'native_disk_group_id':\n                            content.get('diskGroup', {}).get('id'),\n                        'location': content.get('name')\n                    }\n                    disk_list.append(disk_result)\n            else:\n                disk_list = self.get_virtual_disk()\n            return disk_list\n\n        except Exception as err:\n            err_msg = \"Failed to get disk attributes from Unity: %s\" % \\\n                      (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    def list_filesystems(self, context):\n        try:\n            files = self.rest_handler.get_all_filesystems()\n            if not files:\n                files = self.rest_handler.get_all_filesystems_without_flr()\n            fs_list = []\n            if files is not None:\n                fs_entries = files.get('entries')\n                for file in fs_entries:\n                    content = file.get('content')\n                    if not content:\n                        continue\n                    health_value = content.get('health', {}).get('value')\n                    if health_value in UnityStorDriver.HEALTH_OK:\n                        status = constants.FilesystemStatus.NORMAL\n                    else:\n                        status = constants.FilesystemStatus.FAULTY\n                    fs_type = constants.VolumeType.THICK\n                    if content.get('isThinEnabled') is True:\n                        fs_type = constants.VolumeType.THIN\n                    worm = UnityStorDriver.FILESYSTEM_FLR_MAP.get(\n                        content.get('flrVersion'),\n                        constants.WORMType.NON_WORM)\n                    security_model = \\\n                        UnityStorDriver.FILESYSTEM_SECURITY_MAP.get(\n                            content.get('accessPolicy'),\n                            constants.NASSecurityMode.NATIVE\n                        )\n                    fs = {\n                        'name': content.get('name'),\n                        'storage_id': self.storage_id,\n                        'native_filesystem_id': content.get('id'),\n                        'native_pool_id': content.get('pool', {}).get('id'),\n                        'status': status,\n                        'type': fs_type,\n                        'total_capacity': int(content.get('sizeTotal')),\n                        'used_capacity': int(content.get('sizeUsed')),\n                        'free_capacity': int(content.get('sizeTotal')) - int(\n                            content.get('sizeUsed')),\n                        'worm': worm,\n                        'security_mode': security_model\n                    }\n                    fs_list.append(fs)\n            return fs_list\n        except Exception as err:\n            err_msg = \"Failed to get filesystem attributes from Unity: %s\"\\\n                      % (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    def list_qtrees(self, context):\n        try:\n            qts = self.rest_handler.get_all_qtrees()\n            qt_list = []\n            if qts is not None:\n                qts_entries = qts.get('entries')\n                for qtree in qts_entries:\n                    content = qtree.get('content')\n                    if not content:\n                        continue\n                    qt = {\n                        'name': content.get('path'),\n                        'storage_id': self.storage_id,\n                        'native_qtree_id': content.get('id'),\n                        'native_filesystem_id':\n                            content.get('filesystem', {}).get('id'),\n                        'path': content.get('path')\n                    }\n                    qt_list.append(qt)\n            return qt_list\n        except Exception as err:\n            err_msg = \"Failed to get qtree attributes from Unity: %s\"\\\n                      % (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    def get_share_qtree(self, path, qtree_list):\n        qtree_id = None\n        if not qtree_list:\n            return qtree_id\n        qts_entries = qtree_list.get('entries')\n        for qtree in qts_entries:\n            content = qtree.get('content')\n            if not content:\n                continue\n            if content.get('path') == path:\n                qtree_id = content.get('id')\n                break\n        return qtree_id\n\n    def get_share(self, protocol, qtree_list, filesystems):\n        try:\n            share_list = []\n            if protocol == 'cifs':\n                shares = self.rest_handler.get_all_cifsshares()\n                protocol = constants.ShareProtocol.CIFS\n            else:\n                shares = self.rest_handler.get_all_nfsshares()\n                protocol = constants.ShareProtocol.NFS\n            if shares is not None:\n                share_entries = shares.get('entries')\n                for share in share_entries:\n                    content = share.get('content')\n                    if not content:\n                        continue\n                    file_name = ''\n                    if filesystems:\n                        file_entries = filesystems.get('entries')\n                        for file in file_entries:\n                            file_content = file.get('content')\n                            if not file_content:\n                                continue\n                            if file_content.get('id') == content.get(\n                                    'filesystem', {}).get('id'):\n                                file_name = file_content.get('name')\n                                break\n                    path = '/%s%s' % (file_name, content.get('path')) if \\\n                        file_name != '' else content.get('path')\n                    fs = {\n                        'name': content.get('name'),\n                        'storage_id': self.storage_id,\n                        'native_share_id': content.get('id'),\n                        'native_qtree_id': self.get_share_qtree(\n                            content.get('path'), qtree_list),\n                        'native_filesystem_id':\n                            content.get('filesystem', {}).get('id'),\n                        'path': path,\n                        'protocol': protocol\n                    }\n                    share_list.append(fs)\n            return share_list\n        except Exception as err:\n            err_msg = \"Failed to get share attributes from Unity: %s\"\\\n                      % (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    def list_shares(self, context):\n        try:\n            share_list = []\n            qtrees = self.rest_handler.get_all_qtrees()\n            filesystems = self.rest_handler.get_all_filesystems()\n            if not filesystems:\n                filesystems = \\\n                    self.rest_handler.get_all_filesystems_without_flr()\n            share_list.extend(self.get_share('cifs', qtrees, filesystems))\n            share_list.extend(self.get_share('nfs', qtrees, filesystems))\n            return share_list\n        except Exception as err:\n            err_msg = \"Failed to get shares attributes from Unity: %s\"\\\n                      % (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    def get_tree_quotas(self):\n        quotas_list = []\n        qts = self.rest_handler.get_all_qtrees()\n        if qts is None:\n            return quotas_list\n        qt_entries = qts.get('entries')\n        for quota in qt_entries:\n            content = quota.get('content')\n            if not content:\n                continue\n            qt = {\n                \"native_quota_id\": content.get('id'),\n                \"type\": constants.QuotaType.TREE,\n                \"storage_id\": self.storage_id,\n                \"native_filesystem_id\":\n                    content.get('filesystem', {}).get('id'),\n                \"native_qtree_id\": content.get('id'),\n                \"capacity_hard_limit\": content.get('hardLimit'),\n                \"capacity_soft_limit\": content.get('softLimit'),\n                \"used_capacity\": int(content.get('sizeUsed'))\n            }\n            quotas_list.append(qt)\n        return quotas_list\n\n    def get_user_quotas(self):\n        quotas_list = []\n        user_qts = self.rest_handler.get_all_userquotas()\n        if user_qts is None:\n            return quotas_list\n        user_entries = user_qts.get('entries')\n        for user_quota in user_entries:\n            content = user_quota.get('content')\n            if not content:\n                continue\n            qt = {\n                \"native_quota_id\": content.get('id'),\n                \"type\": constants.QuotaType.USER,\n                \"storage_id\": self.storage_id,\n                \"native_filesystem_id\":\n                    content.get('filesystem', {}).get('id'),\n                \"native_qtree_id\": content.get('treeQuota', {}).get('id'),\n                \"capacity_hard_limit\": content.get('hardLimit'),\n                \"capacity_soft_limit\": content.get('softLimit'),\n                \"used_capacity\": int(content.get('sizeUsed')),\n                \"user_group_name\": str(content.get('uid'))\n            }\n            quotas_list.append(qt)\n        return quotas_list\n\n    def list_quotas(self, context):\n        try:\n            quotas_list = []\n            quotas_list.extend(self.get_tree_quotas())\n            quotas_list.extend(self.get_user_quotas())\n            return quotas_list\n        except Exception as err:\n            err_msg = \"Failed to get quotas attributes from Unity: %s\"\\\n                      % (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return AlertHandler.parse_alert(context, alert)\n\n    def clear_alert(self, context, alert):\n        return self.rest_handler.remove_alert(alert)\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n\n    def list_storage_host_initiators(self, context):\n        try:\n            initiator_list = []\n            page = 1\n            while True:\n                initiators = self.rest_handler.get_host_initiators(page)\n                if not initiators:\n                    return initiator_list\n                if 'entries' not in initiators or \\\n                        len(initiators['entries']) < 1:\n                    break\n                init_entries = initiators.get('entries')\n                for initiator in init_entries:\n                    content = initiator.get('content')\n                    if not content:\n                        continue\n                    health_value = content.get('health', {}).get('value')\n                    status = UnityStorDriver.INITIATOR_STATUS_MAP.get(\n                        health_value,\n                        constants.InitiatorStatus.UNKNOWN\n                    )\n                    init_result = {\n                        \"name\": content.get('initiatorId'),\n                        \"storage_id\": self.storage_id,\n                        \"native_storage_host_initiator_id\": content.get('id'),\n                        \"wwn\": content.get('initiatorId'),\n                        \"status\": status,\n                        \"type\": UnityStorDriver.INITIATOR_TYPE_MAP.get(\n                            content.get('type')),\n                        \"native_storage_host_id\": content.get(\n                            'parentHost', {}).get('id')\n                    }\n                    initiator_list.append(init_result)\n                page += 1\n            return initiator_list\n        except Exception as e:\n            LOG.error(\"Failed to get initiators from unity\")\n            raise e\n\n    def list_storage_hosts(self, context):\n        try:\n            host_list = []\n            page = 1\n            while True:\n                hosts = self.rest_handler.get_all_hosts(page)\n                if not hosts:\n                    return host_list\n                if 'entries' not in hosts or len(hosts['entries']) < 1:\n                    break\n                ips = self.rest_handler.get_host_ip()\n                host_entries = hosts.get('entries')\n                for host in host_entries:\n                    host_ip = None\n                    content = host.get('content')\n                    if not content:\n                        continue\n                    health_value = content.get('health', {}).get('value')\n                    status = UnityStorDriver.HOST_STATUS_MAP.get(\n                        health_value,\n                        constants.HostStatus.OFFLINE\n                    )\n                    if ips:\n                        ip_entries = ips.get('entries')\n                        for ip in ip_entries:\n                            ip_content = ip.get('content')\n                            if not ip_content:\n                                continue\n                            if ip_content.get('host', {}).get('id') \\\n                                    == content.get('id'):\n                                host_ip = ip_content.get('address')\n                                break\n                    if content.get('osType'):\n                        if 'VMware ESXi' in content.get('osType'):\n                            os_type = constants.HostOSTypes.VMWARE_ESX\n                        else:\n                            os_type = UnityStorDriver.OS_TYPE_MAP.get(\n                                content.get('osType'),\n                                constants.HostOSTypes.UNKNOWN)\n                    else:\n                        os_type = None\n                    host_result = {\n                        \"name\": content.get('name'),\n                        \"description\": content.get('description'),\n                        \"storage_id\": self.storage_id,\n                        \"native_storage_host_id\": content.get('id'),\n                        \"os_type\": os_type,\n                        \"status\": status,\n                        \"ip_address\": host_ip\n                    }\n                    host_list.append(host_result)\n                page += 1\n            return host_list\n        except Exception as e:\n            LOG.error(\"Failed to get host metrics from unity\")\n            raise e\n\n    def list_masking_views(self, context):\n        try:\n            view_list = []\n            page = 1\n            while True:\n                views = self.rest_handler.get_host_lun(page)\n                if not views:\n                    return view_list\n                if 'entries' not in views or len(views['entries']) < 1:\n                    break\n                view_entries = views.get('entries')\n                for view in view_entries:\n                    content = view.get('content')\n                    view_result = {\n                        \"name\": content.get('id'),\n                        \"native_storage_host_id\":\n                            content.get('host', {}).get('id'),\n                        \"storage_id\": self.storage_id,\n                        \"native_volume_id\": content.get('lun', {}).get('id'),\n                        \"native_masking_view_id\": content.get('id'),\n                    }\n                    view_list.append(view_result)\n                page += 1\n            return view_list\n\n        except Exception as e:\n            LOG.error(\"Failed to get view metrics from unity\")\n            raise e\n\n    def get_metrics_loop(self, target, start_time,\n                         end_time, metrics, path):\n        page = 1\n        bend = False\n        time_map = {'latest_time': 0}\n        if not path:\n            return\n        while True:\n            if bend is True:\n                break\n            results = self.rest_handler.get_history_metrics(path, page)\n            if not results:\n                break\n            if 'entries' not in results:\n                break\n            if len(results['entries']) < 1:\n                break\n            bend = UnityStorDriver.get_metric_value(\n                target, start_time, end_time, metrics, results, time_map)\n            page += 1\n\n    def get_history_metrics(self, resource_type, targets,\n                            start_time, end_time):\n        metrics = []\n        for target in targets:\n            path = None\n            if resource_type == constants.ResourceType.VOLUME:\n                path = self.VOLUME_PERF_METRICS.get(target)\n            elif resource_type == constants.ResourceType.DISK:\n                path = self.DISK_PERF_METRICS.get(target)\n            elif resource_type == constants.ResourceType.FILESYSTEM:\n                path = self.FILESYSTEM_PERF_METRICS.get(target)\n            elif resource_type == constants.ResourceType.PORT:\n                self.get_metrics_loop(target, start_time, end_time, metrics,\n                                      self.ETHERNET_PORT_METRICS.get(target))\n                self.get_metrics_loop(target, start_time, end_time, metrics,\n                                      self.FC_PORT_METRICS.get(target))\n                continue\n            if path:\n                self.get_metrics_loop(target, start_time, end_time,\n                                      metrics, path)\n        return metrics\n\n    @staticmethod\n    def get_metric_value(target, start_time, end_time, metrics,\n                         results, time_map):\n        try:\n            if results is None:\n                return True\n            entries = results.get('entries')\n            for entry in entries:\n                content = entry.get('content')\n                if not content or not content.get('values'):\n                    continue\n                occur_time = int(time.mktime(time.strptime(\n                    content.get('timestamp'),\n                    AlertHandler.TIME_PATTERN))\n                ) * AlertHandler.SECONDS_TO_MS\n                hour_offset = (time.mktime(time.localtime()) - time.mktime(\n                    time.gmtime())) / AlertHandler.SECONDS_PER_HOUR\n                occur_time = occur_time + (int(hour_offset) *\n                                           UnityStorDriver.MS_PER_HOUR)\n                if occur_time < start_time:\n                    return True\n                if time_map.get('latest_time') <= occur_time \\\n                        and time_map.get('latest_time') != 0:\n                    continue\n                time_map['latest_time'] = occur_time\n                if start_time <= occur_time <= end_time:\n                    for sp_value in content.get('values'):\n                        perf_value = content.get('values').get(sp_value)\n                        for key, value in perf_value.items():\n                            bfind = False\n                            value = float(value)\n                            for metric in metrics:\n                                if metric.get('resource_id') == key and \\\n                                        metric.get('type') == target:\n                                    if metric.get('values').get(\n                                            occur_time):\n                                        if target == 'responseTime':\n                                            metric.get(\n                                                'values')[occur_time] = \\\n                                                max(value, metric.get(\n                                                    'values').get(\n                                                    occur_time))\n                                        else:\n                                            metric.get('values')[\n                                                occur_time] += value\n                                    else:\n                                        metric.get('values')[occur_time] \\\n                                            = value\n                                    bfind = True\n                                    break\n                            if bfind is False:\n                                metric_value = {\n                                    'type': target,\n                                    'resource_id': key,\n                                    'values': {occur_time: value}\n                                }\n                                metrics.append(metric_value)\n        except Exception as err:\n            err_msg = \"Failed to collect history metrics from Unity: %s, \" \\\n                      \"target:%s\" % (six.text_type(err), target)\n            LOG.error(err_msg)\n        return False\n\n    @staticmethod\n    def count_total_perf(metrics):\n        if metrics is None:\n            return\n        for metric in metrics:\n            write_tye = None\n            total_type = None\n            if UnityStorDriver.PERF_TYPE_MAP.get(metric.get('type')):\n                write_tye = UnityStorDriver.PERF_TYPE_MAP.get(\n                    metric.get('type')).get('write')\n                total_type = UnityStorDriver.PERF_TYPE_MAP.get(\n                    metric.get('type')).get('total')\n            else:\n                continue\n            for metric_write in metrics:\n                if metric_write.get('resource_id') == \\\n                        metric.get('resource_id') \\\n                        and metric_write.get('type') == write_tye:\n                    total = {\n                        'type': total_type,\n                        'resource_id': metric.get('resource_id')\n                    }\n                    bfind_total = False\n                    for tr, read in metric.get('values').items():\n                        for tw, write in metric_write.get(\n                                'values').items():\n                            if tr == tw:\n                                value = read + write\n                                if total.get('values'):\n                                    total['values'][tr] = value\n                                else:\n                                    total['values'] = {tr: value}\n                                bfind_total = True\n                                break\n                    if bfind_total:\n                        metrics.append(total)\n                    break\n\n    @staticmethod\n    def package_metrics(storage_id, resource_type, metrics, metrics_list):\n        for metric in metrics_list:\n            unit = None\n            if resource_type == constants.ResourceType.PORT:\n                unit = consts.PORT_CAP[metric.get('type')]['unit']\n            elif resource_type == constants.ResourceType.VOLUME:\n                unit = consts.VOLUME_CAP[metric.get('type')]['unit']\n            elif resource_type == constants.ResourceType.DISK:\n                unit = consts.DISK_CAP[metric.get('type')]['unit']\n            elif resource_type == constants.ResourceType.FILESYSTEM:\n                unit = consts.FILESYSTEM_CAP[metric.get('type')]['unit']\n            labels = {\n                'storage_id': storage_id,\n                'resource_type': resource_type,\n                'resource_id': metric.get('resource_id'),\n                'type': 'RAW',\n                'unit': unit\n            }\n            if 'THROUGHPUT' in metric.get('type').upper() or \\\n                    'RESPONSETIME' in metric.get('type').upper():\n                for tm in metric.get('values'):\n                    metric['values'][tm] = metric['values'][tm] / units.k\n            value = constants.metric_struct(name=metric.get('type'),\n                                            labels=labels,\n                                            values=metric.get('values'))\n            metrics.append(value)\n\n    def collect_perf_metrics(self, context, storage_id,\n                             resource_metrics, start_time,\n                             end_time):\n        metrics = []\n        try:\n            if resource_metrics.get(constants.ResourceType.VOLUME):\n                volume_metrics = self.get_history_metrics(\n                    constants.ResourceType.VOLUME,\n                    resource_metrics.get(constants.ResourceType.VOLUME),\n                    start_time,\n                    end_time)\n                UnityStorDriver.count_total_perf(volume_metrics)\n                UnityStorDriver.package_metrics(storage_id,\n                                                constants.ResourceType.VOLUME,\n                                                metrics, volume_metrics)\n            if resource_metrics.get(constants.ResourceType.DISK):\n                disk_metrics = self.get_history_metrics(\n                    constants.ResourceType.DISK,\n                    resource_metrics.get(constants.ResourceType.DISK),\n                    start_time,\n                    end_time)\n                UnityStorDriver.count_total_perf(disk_metrics)\n                UnityStorDriver.package_metrics(storage_id,\n                                                constants.ResourceType.DISK,\n                                                metrics, disk_metrics)\n            if resource_metrics.get(constants.ResourceType.PORT):\n                port_metrics = self.get_history_metrics(\n                    constants.ResourceType.PORT,\n                    resource_metrics.get(constants.ResourceType.PORT),\n                    start_time,\n                    end_time)\n                UnityStorDriver.count_total_perf(port_metrics)\n                UnityStorDriver.package_metrics(storage_id,\n                                                constants.ResourceType.PORT,\n                                                metrics, port_metrics)\n            if resource_metrics.get(constants.ResourceType.FILESYSTEM):\n                file_metrics = self.get_history_metrics(\n                    constants.ResourceType.FILESYSTEM,\n                    resource_metrics.get(constants.ResourceType.FILESYSTEM),\n                    start_time,\n                    end_time)\n                UnityStorDriver.count_total_perf(file_metrics)\n                UnityStorDriver.package_metrics(\n                    storage_id, constants.ResourceType.FILESYSTEM,\n                    metrics, file_metrics)\n        except Exception as err:\n            err_msg = \"Failed to collect metrics from Unity: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return metrics\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        \"\"\"Get capability of supported driver\"\"\"\n        return {\n            'is_historic': True,\n            'resource_metrics': {\n                constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n                constants.ResourceType.PORT: consts.PORT_CAP,\n                constants.ResourceType.DISK: consts.DISK_CAP,\n                constants.ResourceType.FILESYSTEM: consts.FILESYSTEM_CAP\n            }\n        }\n\n    def get_latest_perf_timestamp(self, context):\n        latest_time = 0\n        page = 1\n        results = self.rest_handler.get_history_metrics(\n            UnityStorDriver.VOLUME_PERF_METRICS.get('readIops'), page)\n        if not results:\n            results = self.rest_handler.get_history_metrics(\n                UnityStorDriver.ETHERNET_PORT_METRICS.get('readIops'), page)\n        if results:\n            if 'entries' in results:\n                entries = results.get('entries')\n                for entry in entries:\n                    content = entry.get('content')\n                    if not content:\n                        continue\n                    occur_time = int(time.mktime(time.strptime(\n                        content.get('timestamp'),\n                        AlertHandler.TIME_PATTERN))\n                    ) * AlertHandler.SECONDS_TO_MS\n                    hour_offset = \\\n                        (time.mktime(time.localtime()) -\n                         time.mktime(time.gmtime()))\\\n                        / AlertHandler.SECONDS_PER_HOUR\n                    occur_time = occur_time + (int(hour_offset) *\n                                               UnityStorDriver.MS_PER_HOUR)\n                    latest_time = occur_time\n                    break\n        return latest_time\n\n    def get_virtual_disk(self):\n        try:\n            disks = self.rest_handler.get_virtual_disks()\n            disk_list = []\n            if disks is not None:\n                disk_entries = disks.get('entries')\n                for disk in disk_entries:\n                    content = disk.get('content')\n                    if not content:\n                        continue\n                    health_value = content.get('health', {}).get('value')\n                    slot_info = \\\n                        content.get('health', {}).get('descriptionIds', [])\n                    if 'ALRT_DISK_SLOT_EMPTY' in slot_info:\n                        continue\n                    if health_value in UnityStorDriver.HEALTH_OK:\n                        status = constants.DiskStatus.NORMAL\n                    else:\n                        status = constants.DiskStatus.ABNORMAL\n                    disk_result = {\n                        'name': content.get('name'),\n                        'storage_id': self.storage_id,\n                        'native_disk_id': content.get('id'),\n                        'capacity': int(content.get('sizeTotal')),\n                        'status': status,\n                        'manufacturer': content.get('manufacturer'),\n                        'model': content.get('model'),\n                        'physical_type': constants.DiskPhysicalType.VMDISK\n                    }\n                    disk_list.append(disk_result)\n            return disk_list\n        except Exception as err:\n            err_msg = \"Failed to get virtual disk from Unity: %s\" % \\\n                      (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/alert_handler/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/alert_handler/alert_mapper.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This file contains mapping of alert information to vmax specific description\n\n# component type to descriptive info\ncomponent_type_mapping = {\n    '1024': 'Symmetrix',\n    '1025': 'Service Processor',\n    '1026': 'Device',\n    '1027': 'Physical Disk',\n    '1028': 'Director',\n    '1029': 'Port',\n    '1030': 'SRDF sub-system',\n    '1031': 'SRDF group',\n    '1032': 'Snap Save Device Pool',\n    '1033': 'Cache / Memory',\n    '1034': 'Power or Battery subsystem',\n    '1035': 'Environmental (e.g.: Temperature, Smoke)',\n    '1036': 'Diagnostics',\n    '1037': 'Communications sub-system',\n    '1038': 'External Lock',\n    '1039': 'Fan',\n    '1040': 'Link Controller Card',\n    '1041': 'Enclosure, Enclosure-Slot or MIBE',\n    '1042': 'SRDF/A DSE Device Pool',\n    '1043': 'Thin Device Data Pool',\n    '1044': 'Solutions Enabler DG group',\n    '1045': 'Solutions Enabler CG group',\n    '1046': 'Management Module',\n    '1047': 'IO Module Carrier',\n    '1048': 'Director - Environmental',\n    '1049': 'Storage Group',\n    '1050': 'Migration Session',\n    '1051': 'Symmetrix Disk Group'\n}\n\n# Alarm id to alarm name mapping\n# Currently this contains limited list, to be extended\nalarm_id_name_mapping = {\n    '1': 'SYMAPI_AEVENT2_UID_EVT_RESTARTED',\n    '2': 'SYMAPI_AEVENT2_UID_EVT_EVENTS_LOST',\n    '3': 'SYMAPI_AEVENT2_UID_EVT_EVENTS_OVERFLOW',\n    '1050': 'SYMAPI_AEVENT2_UID_MOD_DIAG_TRACE_TRIG',\n    '1051': 'SYMAPI_AEVENT2_UID_MOD_DIAG_TRACE_TRIG_REM'\n}\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass OidMapper(object):\n    \"\"\"Functions/attributes for oid to alert info mapper\"\"\"\n\n    # Map to translate trap oid strings to oid names\n    OID_MAP = {\"1.3.6.1.3.94.1.11.1.3\": \"connUnitEventId\",\n               \"1.3.6.1.3.94.1.11.1.6\": \"connUnitEventSeverity\",\n               \"1.3.6.1.3.94.1.11.1.7\": \"connUnitEventType\",\n               \"1.3.6.1.3.94.1.11.1.8\": \"connUnitEventObject\",\n               \"1.3.6.1.3.94.1.11.1.9\": \"connUnitEventDescr\",\n               \"1.3.6.1.3.94.1.6.1.20\": \"connUnitName\",\n               \"1.3.6.1.3.94.1.6.1.3\": \"connUnitType\",\n               \"1.3.6.1.4.1.1139.3.8888.1.0\": \"emcAsyncEventSource\",\n               \"1.3.6.1.4.1.1139.3.8888.2.0\": \"emcAsyncEventCode\",\n               \"1.3.6.1.4.1.1139.3.8888.3.0\": \"emcAsyncEventComponentType\",\n               \"1.3.6.1.4.1.1139.3.8888.4.0\": \"emcAsyncEventComponentName\"}\n\n    def __init__(self):\n        pass\n\n    @staticmethod\n    def map_oids(alert):\n        \"\"\"Translate oids using static map.\"\"\"\n        alert_model = dict()\n\n        for attr in alert:\n            # Remove the instance number at the end of oid before mapping\n            oid_str = attr.rsplit('.', 1)[0]\n            key = OidMapper.OID_MAP.get(oid_str, None)\n            alert_model[key] = alert[attr]\n\n        return alert_model\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\nfrom time import gmtime, strftime\n\nfrom oslo_log import log\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers.dell_emc.vmax.alert_handler import alert_mapper\nfrom delfin.drivers.dell_emc.vmax.alert_handler import oid_mapper\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertHandler(object):\n    \"\"\"Alert handling functions for vmax snmp traps\"\"\"\n\n    # Translation of trap severity to alert model severity\n    # Values are:\n    # unknown 1, emergency 2, alert 3, critical 4, error 5,\n    # warning 6, alert 3, notify 7, info 8, debug 9, mark 10\n    SEVERITY_MAP = {\"2\": constants.Severity.FATAL,\n                    \"3\": constants.Severity.CRITICAL,\n                    \"4\": constants.Severity.CRITICAL,\n                    \"5\": constants.Severity.MAJOR,\n                    \"6\": constants.Severity.WARNING,\n                    \"7\": constants.Severity.WARNING,\n                    \"8\": constants.Severity.INFORMATIONAL,\n                    \"9\": constants.Severity.INFORMATIONAL,\n                    \"10\": constants.Severity.INFORMATIONAL}\n\n    # Attributes mandatory in alert info to proceed with model filling\n    _mandatory_alert_attributes = ('emcAsyncEventCode',\n                                   'connUnitEventSeverity',\n                                   'connUnitEventType', 'connUnitEventDescr',\n                                   'connUnitType',\n                                   'emcAsyncEventComponentType',\n                                   'emcAsyncEventComponentName',\n                                   'emcAsyncEventSource')\n\n    @staticmethod\n    def parse_alert(context, alert):\n        \"\"\"Parse alert data got from alert manager and fill the alert model.\"\"\"\n\n        alert = oid_mapper.OidMapper.map_oids(alert)\n        # Check for mandatory alert attributes\n        for attr in AlertHandler._mandatory_alert_attributes:\n            if not alert.get(attr):\n                msg = \"Mandatory information %s missing in alert message. \" \\\n                      % attr\n                raise exception.InvalidInput(msg)\n\n        alert_model = {}\n\n        # Fill alarm id and fill alarm_name with corresponding mapping names\n        alert_model['alert_id'] = alert['emcAsyncEventCode']\n        alert_model['alert_name'] = alert_mapper.alarm_id_name_mapping.get(\n            alert_model['alert_id'], alert_model['alert_id'])\n\n        alert_model['severity'] = AlertHandler.SEVERITY_MAP.get(\n            alert['connUnitEventSeverity'],\n            constants.Severity.INFORMATIONAL)\n\n        alert_model['category'] = constants.Category.NOT_SPECIFIED\n        alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n\n        alert_model['sequence_number'] = alert['connUnitEventId']\n\n        # trap info do not contain occur time, update with received time\n        # Get date and time and convert to epoch format\n        pattern = '%Y-%m-%d %H:%M:%S'\n        curr_time = strftime(pattern, gmtime())\n\n        alert_model['occur_time'] = int(time.mktime(time.strptime(curr_time,\n                                                                  pattern)))\n        alert_model['description'] = alert['connUnitEventDescr']\n        alert_model['recovery_advice'] = 'None'\n        alert_model['resource_type'] = alert['connUnitType']\n\n        # Location is name-value pair having component type and component name\n        component_type = alert_mapper.component_type_mapping.get(\n            alert.get('emcAsyncEventComponentType'), \"\")\n        alert_model['location'] = 'Array id=' \\\n                                  + alert['connUnitName'] \\\n                                  + ',Component type=' \\\n                                  + component_type \\\n                                  + ',Component name=' \\\n                                  + alert['emcAsyncEventComponentName'] \\\n                                  + ',Event source=' \\\n                                  + alert['emcAsyncEventSource']\n        if alert['connUnitName']:\n            alert_model['serial_number'] = alert['connUnitName']\n        return alert_model\n\n    def add_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Config the trap receiver in storage system.\"\"\"\n        pass\n\n    def remove_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Remove trap receiver configuration from storage system.\"\"\"\n        pass\n\n    def clear_alert(self, context, storage_id, alert):\n        \"\"\"Clear alert from storage system.\"\"\"\n        pass\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log\n\nfrom delfin import exception\nfrom delfin.common import constants\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertHandler(object):\n    \"\"\"Alert handling functions for unisphere alerts\"\"\"\n\n    # Alert Id and name are not part of queried alerts\n    DEFAULT_UNISPHERE_ALERT_NAME = \"Unisphere alert about vmax\"\n    DEFAULT_UNISPHERE_ALERT_ID = 0xFFFFFFFF\n\n    # Translation of queried alert severity to alert model severity\n    SEVERITY_MAP = {\"FATAL\": constants.Severity.FATAL,\n                    \"CRITICAL\": constants.Severity.CRITICAL,\n                    \"WARNING\": constants.Severity.WARNING,\n                    \"NORMAL\": constants.Severity.INFORMATIONAL,\n                    \"INFORMATION\": constants.Severity.INFORMATIONAL}\n\n    def __init__(self):\n        pass\n\n    def parse_queried_alerts(self, alert_list):\n        \"\"\"Parse queried alerts and convert to alert model.\"\"\"\n\n        alert_model_list = []\n        for alert in alert_list:\n            try:\n                alert_model = dict()\n                alert_model['alert_id'] = self.DEFAULT_UNISPHERE_ALERT_ID\n                alert_model['alert_name'] = self.DEFAULT_UNISPHERE_ALERT_NAME\n\n                alert_model['severity'] = self.SEVERITY_MAP.get(\n                    alert['severity'], constants.Severity.NOT_SPECIFIED)\n\n                # category and type are not part of queried alerts\n                alert_model['category'] = constants.Category.FAULT\n                alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n\n                alert_model['sequence_number'] = alert['alertId']\n                alert_model['occur_time'] = alert['created_date_milliseconds']\n                alert_model['description'] = alert['description']\n                alert_model['recovery_advice'] = 'None'\n                alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n\n                # Location is name-value pair\n                alert_model['location'] = 'type=' + alert['type']\n                alert_model_list.append(alert_model)\n            except Exception as e:\n                LOG.error(e)\n                msg = (\"Failed to build alert model as some attributes \"\n                       \"missing in alert message\")\n                raise exception.InvalidResults(msg)\n        return alert_model_list\n\n    def add_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Config the trap receiver in storage system.\"\"\"\n        pass\n\n    def remove_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Remove trap receiver configuration from storage system.\"\"\"\n        pass\n\n    def clear_alert(self, context, storage_id, alert):\n        \"\"\"Clear alert from storage system.\"\"\"\n        pass\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/client.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers.dell_emc.vmax import constants as consts\nfrom delfin.drivers.dell_emc.vmax import rest, perf_utils\n\nLOG = log.getLogger(__name__)\n\nEMBEDDED_UNISPHERE_ARRAY_COUNT = 1\n\n\nclass VMAXClient(object):\n    \"\"\" Client class for communicating with VMAX storage \"\"\"\n\n    def __init__(self, **kwargs):\n        self.uni_version = None\n        self.array_id = {}\n        rest_access = kwargs.get('rest')\n        if rest_access is None:\n            raise exception.InvalidInput('Input rest_access is missing')\n        self.rest = rest.VMaxRest()\n        self.rest.set_rest_credentials(rest_access)\n        self.reset_connection(**kwargs)\n\n    def reset_connection(self, **kwargs):\n        \"\"\" Reset connection to VMAX storage with new configs \"\"\"\n        self.rest.verify = kwargs.get('verify', False)\n        self.rest.establish_rest_session()\n\n    def init_connection(self, access_info):\n        \"\"\" Given the access_info get a connection to VMAX storage \"\"\"\n        try:\n            ver, self.uni_version = self.rest.get_uni_version()\n            LOG.info('Connected to Unisphere Version: {0}'.format(ver))\n        except exception.InvalidUsernameOrPassword as e:\n            msg = \"Failed to connect VMAX. Reason: {}\".format(e.msg)\n            LOG.error(msg)\n            raise e\n        except (exception.SSLCertificateFailed,\n                exception.SSLHandshakeFailed) as e:\n            msg = (\"Failed to connect to VMAX: {}\".format(e))\n            LOG.error(msg)\n            raise\n        except Exception as err:\n            msg = (\"Failed to connect to VMAX. Host or Port is not correct: \"\n                   \"{}\".format(err))\n            LOG.error(msg)\n            raise exception.InvalidIpOrPort()\n\n        if not self.uni_version:\n            msg = \"Invalid input. Failed to get vmax unisphere version\"\n            raise exception.InvalidInput(msg)\n\n    def add_storage(self, access_info):\n        storage_name = access_info.get('storage_name')\n\n        try:\n            # Get array details from unisphere\n            array = self.rest.get_array_detail(version=self.uni_version)\n            if not array:\n                msg = \"Failed to get array details\"\n                raise exception.InvalidInput(msg)\n\n            if len(array['symmetrixId']) == EMBEDDED_UNISPHERE_ARRAY_COUNT:\n                if not storage_name:\n                    storage_name = array['symmetrixId'][0]\n                elif storage_name != array['symmetrixId'][0]:\n                    msg = \"Invalid storage_name. Expected: {}\". \\\n                        format(array['symmetrixId'])\n                    raise exception.InvalidInput(msg)\n            else:\n                if not storage_name:\n                    msg = \"Input storage_name is missing. Supported ids: {}\". \\\n                        format(array['symmetrixId'])\n                    raise exception.InvalidInput(msg)\n\n                array_ids = array.get('symmetrixId', list())\n                if storage_name not in array_ids:\n                    msg = \"Failed to get VMAX array id from Unisphere\"\n                    raise exception.InvalidInput(msg)\n\n            self.array_id[access_info['storage_id']] = storage_name\n\n        except Exception:\n            LOG.error(\"Failed to add storage from VMAX\")\n            raise\n\n    def get_array_details(self, storage_id):\n        try:\n            array_id = self.array_id.get(storage_id)\n            # Get the VMAX array properties\n            return self.rest.get_vmax_array_details(version=self.uni_version,\n                                                    array=array_id)\n        except Exception:\n            LOG.error(\"Failed to get array details from VMAX\")\n            raise\n\n    def get_storage_capacity(self, storage_id):\n        try:\n            storage_info = self.rest.get_system_capacity(\n                self.array_id[storage_id], self.uni_version)\n\n            total_capacity = 0\n            used_capacity = 0\n            free_capacity = 0\n            raw_capacity = 0\n            subscribed_capacity = 0\n            if int(self.uni_version) < 90:\n                physical_capacity = storage_info.get('physicalCapacity')\n                total_cap = storage_info.get('total_usable_cap_gb')\n                used_cap = storage_info.get('total_allocated_cap_gb')\n                subscribed_cap = storage_info.get('total_subscribed_cap_gb')\n                total_raw = physical_capacity.get('total_capacity_gb')\n                free_cap = total_cap - used_cap\n\n                total_capacity = int(total_cap * units.Gi)\n                used_capacity = int(used_cap * units.Gi)\n                free_capacity = int(free_cap * units.Gi)\n                raw_capacity = int(total_raw * units.Gi)\n                subscribed_capacity = int(subscribed_cap * units.Gi)\n\n            else:\n                system_capacity = storage_info['system_capacity']\n                physical_capacity = storage_info.get('physicalCapacity')\n                total_cap = system_capacity.get('usable_total_tb')\n                used_cap = system_capacity.get('usable_used_tb')\n                subscribed_cap = system_capacity.get('subscribed_total_tb')\n                total_raw = physical_capacity.get('total_capacity_gb')\n                free_cap = total_cap - used_cap\n\n                total_capacity = int(total_cap * units.Ti)\n                used_capacity = int(used_cap * units.Ti)\n                free_capacity = int(free_cap * units.Ti)\n                raw_capacity = int(total_raw * units.Gi)\n                subscribed_capacity = int(subscribed_cap * units.Ti)\n\n            return total_capacity, used_capacity, free_capacity,\\\n                raw_capacity, subscribed_capacity\n\n        except Exception:\n            LOG.error(\"Failed to get capacity from VMAX\")\n            raise\n\n    def list_storage_pools(self, storage_id):\n        try:\n            # Get list of SRP pool names\n            pools = self.rest.get_srp_by_name(\n                self.array_id[storage_id],\n                self.uni_version, srp='')['srpId']\n\n            pool_list = []\n            for pool in pools:\n                pool_info = self.rest.get_srp_by_name(\n                    self.array_id[storage_id],\n                    self.uni_version, srp=pool)\n\n                total_cap = 0\n                used_cap = 0\n                subscribed_cap = 0\n                if int(self.uni_version) < 90:\n                    total_cap = pool_info['total_usable_cap_gb'] * units.Gi\n                    used_cap = pool_info['total_allocated_cap_gb'] * units.Gi\n                    subscribed_cap = \\\n                        pool_info['total_subscribed_cap_gb'] * units.Gi\n                else:\n                    srp_cap = pool_info['srp_capacity']\n                    total_cap = srp_cap['usable_total_tb'] * units.Ti\n                    used_cap = srp_cap['usable_used_tb'] * units.Ti\n                    subscribed_cap = srp_cap['subscribed_total_tb'] * units.Ti\n\n                p = {\n                    \"name\": pool,\n                    \"storage_id\": storage_id,\n                    \"native_storage_pool_id\": pool_info[\"srpId\"],\n                    \"description\": \"Dell EMC VMAX Pool\",\n                    \"status\": constants.StoragePoolStatus.NORMAL,\n                    \"storage_type\": constants.StorageType.BLOCK,\n                    \"total_capacity\": int(total_cap),\n                    \"used_capacity\": int(used_cap),\n                    \"free_capacity\": int(total_cap - used_cap),\n                    \"subscribed_capacity\": int(subscribed_cap),\n                }\n\n                pool_list.append(p)\n\n            return pool_list\n\n        except Exception:\n            LOG.error(\"Failed to get pool metrics from VMAX\")\n            raise\n\n    def list_volumes(self, storage_id):\n\n        try:\n            # Get default SRPs assigned for the array\n            default_srps = self.rest.get_default_srps(\n                self.array_id[storage_id], version=self.uni_version)\n            # List all volumes except data volumes\n            volumes = self.rest.get_volume_list(\n                self.array_id[storage_id], version=self.uni_version,\n                params={'data_volume': 'false'})\n\n            # TODO: Update constants.VolumeStatus to make mapping more precise\n            switcher = {\n                'Ready': constants.VolumeStatus.AVAILABLE,\n                'Not Ready': constants.VolumeStatus.AVAILABLE,\n                'Mixed': constants.VolumeStatus.AVAILABLE,\n                'Write Disabled': constants.VolumeStatus.AVAILABLE,\n                'N/A': constants.VolumeStatus.ERROR,\n            }\n\n            volume_list = []\n            for volume in volumes:\n                # Get volume details\n                vol = self.rest.get_volume(self.array_id[storage_id],\n                                           self.uni_version, volume)\n\n                emulation_type = vol['emulation']\n                total_cap = vol['cap_mb'] * units.Mi\n                used_cap = (total_cap * vol['allocated_percent']) / 100.0\n                free_cap = total_cap - used_cap\n\n                status = switcher.get(vol.get('status'),\n                                      constants.VolumeStatus.AVAILABLE)\n\n                description = \"Dell EMC VMAX volume\"\n                if vol['type'] == 'TDEV':\n                    description = \"Dell EMC VMAX 'thin device' volume\"\n\n                name = volume\n                if vol.get('volume_identifier'):\n                    name = volume + ':' + vol['volume_identifier']\n\n                v = {\n                    \"name\": name,\n                    \"storage_id\": storage_id,\n                    \"description\": description,\n                    \"status\": status,\n                    \"native_volume_id\": vol['volumeId'],\n                    \"wwn\": vol['wwn'],\n                    \"type\": constants.VolumeType.THIN,\n                    \"total_capacity\": int(total_cap),\n                    \"used_capacity\": int(used_cap),\n                    \"free_capacity\": int(free_cap),\n                }\n\n                if vol['num_of_storage_groups'] == 1:\n                    sg = vol['storageGroupId'][0]\n                    sg_info = self.rest.get_storage_group(\n                        self.array_id[storage_id], self.uni_version, sg)\n                    v['native_storage_pool_id'] = \\\n                        sg_info.get('srp', default_srps[emulation_type])\n                    v['compressed'] = sg_info.get('compression', False)\n                else:\n                    v['native_storage_pool_id'] = default_srps[emulation_type]\n\n                volume_list.append(v)\n\n            return volume_list\n\n        except Exception:\n            LOG.error(\"Failed to get list volumes from VMAX\")\n            raise\n\n    def list_controllers(self, storage_id):\n        try:\n            # Get list of Directors\n            directors = self.rest.get_director_list(self.array_id[storage_id],\n                                                    self.uni_version)\n            controller_list = []\n            for director in directors:\n                director_info = self.rest.get_director(\n                    self.array_id[storage_id], self.uni_version, director)\n\n                status = constants.ControllerStatus.NORMAL\n                if \"OFF\" in director_info.get('availability', '').upper():\n                    status = constants.ControllerStatus.OFFLINE\n\n                controller = {\n                    'name': director_info['directorId'],\n                    'storage_id': storage_id,\n                    'native_controller_id': director_info['directorId'],\n                    'status': status,\n                    'location':\n                        'slot_' +\n                        str(director_info.get('director_slot_number')),\n                    'soft_version': None,\n                    'cpu_info': 'Cores-'\n                                + str(director_info.get('num_of_cores')),\n                    'memory_size': None\n\n                }\n                controller_list.append(controller)\n            return controller_list\n\n        except Exception:\n            LOG.error(\"Failed to get controller metrics from VMAX\")\n            raise\n\n    def list_ports(self, storage_id):\n        try:\n            # Get list of Directors\n            directors = self.rest.get_director_list(self.array_id[storage_id],\n                                                    self.uni_version)\n        except Exception:\n            LOG.error(\"Failed to get director list,\"\n                      \" while getting port metrics from VMAX\")\n            raise\n        switcher = {\n            'A': constants.PortLogicalType.MANAGEMENT,\n            'B': constants.PortLogicalType.SERVICE,\n            'C': constants.PortLogicalType.BACKEND,\n        }\n        port_list = []\n        for director in directors:\n            try:\n                port_keys = self.rest.get_port_list(\n                    self.array_id[storage_id], self.uni_version, director)\n                for port_key in port_keys:\n                    port_info = self.rest.get_port(\n                        self.array_id[storage_id], self.uni_version,\n                        director, port_key['portId'])['symmetrixPort']\n\n                    connection_status = \\\n                        constants.PortConnectionStatus.CONNECTED\n                    if port_info.get('port_status',\n                                     '').upper().find('OFF') != -1:\n                        connection_status = \\\n                            constants.PortConnectionStatus.DISCONNECTED\n\n                    port_type = constants.PortType.OTHER\n                    if port_info.get('type', '').upper().find('FIBRE') != -1:\n                        port_type = constants.PortType.FC\n                    if port_info.get('type', '').upper().find('ETH') != -1:\n                        port_type = constants.PortType.ETH\n\n                    name = \"{0}:{1}\".format(port_key['directorId'],\n                                            port_key['portId'])\n\n                    director_emulation = port_key['directorId'][4]\n                    logical_type = switcher.get(\n                        director_emulation, constants.PortLogicalType.OTHER)\n                    if logical_type == constants.PortLogicalType.OTHER:\n                        port_prefix = port_key['directorId'][:2]\n                        if port_prefix in ['FA', 'FE', 'EA', 'EF', 'SE']:\n                            logical_type = constants.PortLogicalType.FRONTEND\n                        if port_prefix in ['DA', 'DF', 'DX']:\n                            logical_type = constants.PortLogicalType.BACKEND\n\n                    speed = int(port_info.get('negotiated_speed',\n                                              '0')) * units.Gi\n                    max_speed = int(port_info.get('max_speed',\n                                                  '0')) * units.Gi\n                    port_dict = {\n                        'name': name,\n                        'storage_id': storage_id,\n                        'native_port_id': port_key['portId'],\n                        'location': 'director_' + port_key['directorId'],\n                        'connection_status': connection_status,\n                        'health_status': constants.PortHealthStatus.NORMAL,\n                        'type': port_type,\n                        'logical_type': logical_type,\n                        'speed': speed,\n                        'max_speed': max_speed,\n                        'native_parent_id': port_key['directorId'],\n                        'wwn': port_info.get('identifier', None),\n                        'mac_address': None,\n                        'ipv4': port_info.get('ipv4_address'),\n                        'ipv4_mask': port_info.get('ipv4_netmask'),\n                        'ipv6': port_info.get('ipv6_address'),\n                        'ipv6_mask': None,\n                    }\n                    port_list.append(port_dict)\n\n            except Exception:\n                LOG.error(\"Failed to get port list for director: {}\"\n                          .format(director))\n\n            return port_list\n\n    def list_disks(self, storage_id):\n        if int(self.uni_version) < 91:\n            return []\n        try:\n            # Get list of Disks\n            disks = self.rest.get_disk_list(self.array_id[storage_id],\n                                            self.uni_version)\n            disk_list = []\n            for disk in disks:\n                disk_info = self.rest.get_disk(\n                    self.array_id[storage_id], self.uni_version, disk)\n\n                disk_item = {\n                    'name': disk,\n                    'storage_id': storage_id,\n                    'native_disk_id': disk,\n                    'manufacturer': disk_info['vendor'],\n                    'capacity': int(disk_info['capacity']) * units.Gi,\n                }\n                disk_list.append(disk_item)\n            return disk_list\n\n        except Exception:\n            LOG.error(\"Failed to get disk details from VMAX\")\n            raise\n\n    def list_storage_host_initiators(self, storage_id):\n        try:\n            # Get list of initiators\n            initiators = self.rest.get_initiator_list(\n                self.array_id[storage_id], self.uni_version)\n\n            initiator_list = []\n            for initiator in initiators:\n                initiator_info = self.rest.get_initiator(\n                    self.array_id[storage_id], self.uni_version, initiator)\n                type_string = initiator_info.get('type', '').upper()\n                initiator_type = constants.InitiatorType.UNKNOWN\n                if 'FIBRE' in type_string:\n                    initiator_type = constants.InitiatorType.FC\n                if 'ISCSI' in type_string:\n                    initiator_type = constants.InitiatorType.ISCSI\n\n                initiator_status = constants.InitiatorStatus.ONLINE\n                if not initiator_info.get('on_fabric', False):\n                    initiator_status = constants.InitiatorStatus.OFFLINE\n\n                initiator_item = {\n                    'name': initiator,\n                    'storage_id': storage_id,\n                    'native_storage_host_initiator_id': initiator,\n                    'alias': initiator_info.get('alias'),\n                    'wwn': initiator_info.get('initiatorId'),\n                    'type': initiator_type,\n                    'status': initiator_status,\n                    'native_storage_host_id': initiator_info.get('host'),\n                }\n                initiator_list.append(initiator_item)\n            return initiator_list\n\n        except Exception:\n            LOG.error(\"Failed to get host initiator details from VMAX\")\n            raise\n\n    def list_storage_hosts(self, storage_id):\n        try:\n            # Get list of storage hosts\n            hosts = self.rest.get_host_list(self.array_id[storage_id],\n                                            self.uni_version)\n            host_list = []\n            for host in hosts:\n                host_info = self.rest.get_host(\n                    self.array_id[storage_id], self.uni_version, host)\n\n                host_item = {\n                    'storage_id': storage_id,\n                    'native_storage_host_id': host_info.get('hostId'),\n                    'name': host_info.get('hostId'),\n                    'os_type': constants.HostOSTypes.UNKNOWN,\n                    'status': constants.HostStatus.NORMAL,\n                }\n                host_list.append(host_item)\n            return host_list\n\n        except Exception:\n            LOG.error(\"Failed to get storage host details from VMAX\")\n            raise\n\n    def list_storage_host_groups(self, storage_id):\n        try:\n            # Get list of storage host groups\n            host_groups = self.rest.get_host_group_list(\n                self.array_id[storage_id], self.uni_version)\n            host_group_list = []\n            storage_host_grp_relation_list = []\n            for host_group in host_groups:\n                host_group_info = self.rest.get_host_group(\n                    self.array_id[storage_id], self.uni_version, host_group)\n                host_group_item = {\n                    'name': host_group,\n                    'storage_id': storage_id,\n                    'native_storage_host_group_id': host_group,\n                }\n                host_group_list.append(host_group_item)\n\n                for storage_host in host_group_info['host']:\n                    storage_host_group_relation = {\n                        'storage_id': storage_id,\n                        'native_storage_host_group_id': host_group,\n                        'native_storage_host_id': storage_host.get('hostId')\n                    }\n                    storage_host_grp_relation_list \\\n                        .append(storage_host_group_relation)\n\n            result = {\n                'storage_host_groups': host_group_list,\n                'storage_host_grp_host_rels': storage_host_grp_relation_list\n            }\n\n            return result\n\n        except Exception:\n            LOG.error(\"Failed to get storage host group details from VMAX\")\n            raise\n\n    def list_port_groups(self, storage_id):\n        try:\n            # Get list of port groups\n            port_groups = self.rest.get_port_group_list(\n                self.array_id[storage_id], self.uni_version)\n            port_group_list = []\n            port_group_relation_list = []\n            for port_group in port_groups:\n                port_group_info = self.rest.get_port_group(\n                    self.array_id[storage_id], self.uni_version, port_group)\n                port_group_item = {\n                    'name': port_group,\n                    'storage_id': storage_id,\n                    'native_port_group_id': port_group,\n                }\n                port_group_list.append(port_group_item)\n\n                for port in port_group_info['symmetrixPortKey']:\n                    port_name = port['directorId'] + ':' + port['portId']\n                    port_group_relation = {\n                        'storage_id': storage_id,\n                        'native_port_group_id': port_group,\n                        'native_port_id': port_name\n                    }\n                    port_group_relation_list.append(port_group_relation)\n            result = {\n                'port_groups': port_group_list,\n                'port_grp_port_rels': port_group_relation_list\n            }\n            return result\n\n        except Exception:\n            LOG.error(\"Failed to get port group details from VMAX\")\n            raise\n\n    def list_volume_groups(self, storage_id):\n        try:\n            # Get list of volume groups\n            volume_groups = self.rest.get_volume_group_list(\n                self.array_id[storage_id], self.uni_version)\n            volume_group_list = []\n            volume_group_relation_list = []\n            for volume_group in volume_groups:\n                # volume_group_info = self.rest.get_volume_group(\n                #     self.array_id, self.uni_version, volume_group)\n\n                volume_group_item = {\n                    'name': volume_group,\n                    'storage_id': storage_id,\n                    'native_volume_group_id': volume_group,\n                }\n                volume_group_list.append(volume_group_item)\n\n                # List all volumes except data volumes\n                volumes = self.rest.get_volume_list(\n                    self.array_id[storage_id], version=self.uni_version,\n                    params={'data_volume': 'false',\n                            'storageGroupId': volume_group})\n                if not volumes:\n                    continue\n                for volume in volumes:\n                    volume_group_relation = {\n                        'storage_id': storage_id,\n                        'native_volume_group_id': volume_group,\n                        'native_volume_id': volume\n                    }\n                    volume_group_relation_list.append(volume_group_relation)\n\n            result = {\n                'volume_groups': volume_group_list,\n                'vol_grp_vol_rels': volume_group_relation_list\n            }\n            return result\n\n        except Exception:\n            LOG.error(\"Failed to get volume group details from VMAX\")\n            raise\n\n    def list_masking_views(self, storage_id):\n        try:\n            # Get list of masking_views\n            masking_views = self.rest.get_masking_view_list(\n                self.array_id[storage_id], self.uni_version)\n            masking_view_list = []\n            for masking_view in masking_views:\n                mv_info = self.rest.get_masking_view(\n                    self.array_id[storage_id], self.uni_version, masking_view)\n\n                masking_view_item = {\n                    'name': masking_view,\n                    'storage_id': storage_id,\n                    'native_masking_view_id': mv_info['maskingViewId'],\n                    'native_storage_host_id': mv_info.get('hostId'),\n                    'native_storage_host_group_id': mv_info.get(\n                        'hostGroupId'),\n                    'native_volume_group_id': mv_info.get('storageGroupId'),\n                    'native_port_group_id': mv_info.get('portGroupId'),\n                }\n                masking_view_list.append(masking_view_item)\n            return masking_view_list\n\n        except Exception:\n            LOG.error(\"Failed to get masking views details from VMAX\")\n            raise\n\n    def list_alerts(self, storage_id, query_para):\n        \"\"\"Get all alerts from an array.\"\"\"\n        return self.rest.get_alerts(query_para, version=self.uni_version,\n                                    array=self.array_id[storage_id])\n\n    def clear_alert(self, storage_id, sequence_number):\n        \"\"\"Clear alert for given sequence number.\"\"\"\n        return self.rest.clear_alert(sequence_number,\n                                     version=self.uni_version,\n                                     array=self.array_id[storage_id])\n\n    def get_storage_metrics(self, storage_id, metrics, start_time, end_time):\n        \"\"\"Get performance metrics.\"\"\"\n        try:\n            perf_list = self.rest.get_storage_metrics(\n                self.array_id[storage_id], metrics, start_time, end_time)\n\n            return perf_utils.construct_metrics(storage_id,\n                                                consts.STORAGE_METRICS,\n                                                consts.STORAGE_CAP,\n                                                perf_list)\n        except Exception:\n            LOG.error(\"Failed to get STORAGE metrics for VMAX\")\n            raise\n\n    def get_pool_metrics(self, storage_id, metrics, start_time, end_time):\n        \"\"\"Get performance metrics.\"\"\"\n        try:\n            perf_list = self.rest.get_pool_metrics(\n                self.array_id[storage_id], metrics, start_time, end_time)\n\n            metrics_array = perf_utils.construct_metrics(\n                storage_id, consts.POOL_METRICS, consts.POOL_CAP, perf_list)\n\n            return metrics_array\n        except Exception:\n            LOG.error(\"Failed to get STORAGE POOL metrics for VMAX\")\n            raise\n\n    def get_port_metrics(self, storage_id, metrics, start_time, end_time):\n        \"\"\"Get performance metrics.\"\"\"\n        try:\n            be_perf_list, fe_perf_list, rdf_perf_list = \\\n                self.rest.get_port_metrics(self.array_id[storage_id],\n                                           metrics, start_time, end_time)\n\n            metrics_array = []\n            metrics_list = perf_utils.construct_metrics(\n                storage_id, consts.BEPORT_METRICS,\n                consts.PORT_CAP, be_perf_list)\n            metrics_array.extend(metrics_list)\n\n            metrics_list = perf_utils.construct_metrics(\n                storage_id, consts.FEPORT_METRICS,\n                consts.PORT_CAP, fe_perf_list)\n            metrics_array.extend(metrics_list)\n\n            metrics_list = perf_utils.construct_metrics(\n                storage_id, consts.RDFPORT_METRICS,\n                consts.PORT_CAP, rdf_perf_list)\n            metrics_array.extend(metrics_list)\n            return metrics_array\n        except Exception:\n            LOG.error(\"Failed to get PORT metrics for VMAX\")\n            raise\n\n    def get_controller_metrics(self, storage_id,\n                               metrics, start_time, end_time):\n        \"\"\"Get performance metrics.\"\"\"\n        try:\n            be_perf_list, fe_perf_list, rdf_perf_list = self.rest.\\\n                get_controller_metrics(self.array_id[storage_id],\n                                       metrics, start_time, end_time)\n\n            metrics_array = []\n            metrics_list = perf_utils.construct_metrics(\n                storage_id, consts.BEDIRECTOR_METRICS,\n                consts.CONTROLLER_CAP, be_perf_list)\n            metrics_array.extend(metrics_list)\n\n            metrics_list = perf_utils.construct_metrics(\n                storage_id, consts.FEDIRECTOR_METRICS,\n                consts.CONTROLLER_CAP, fe_perf_list)\n            metrics_array.extend(metrics_list)\n\n            metrics_list = perf_utils.construct_metrics(\n                storage_id, consts.RDFDIRECTOR_METRICS,\n                consts.CONTROLLER_CAP, rdf_perf_list)\n            metrics_array.extend(metrics_list)\n\n            return metrics_array\n        except Exception:\n            LOG.error(\"Failed to get CONTROLLER metrics for VMAX\")\n            raise\n\n    def get_disk_metrics(self, storage_id, metrics, start_time, end_time):\n        \"\"\"Get disk performance metrics.\"\"\"\n        if int(self.uni_version) < 91:\n            return []\n\n        try:\n            perf_list = self.rest.get_disk_metrics(\n                self.array_id[storage_id], metrics, start_time, end_time)\n\n            metrics_array = perf_utils.construct_metrics(\n                storage_id, consts.DISK_METRICS, consts.DISK_CAP, perf_list)\n\n            return metrics_array\n        except Exception:\n            LOG.error(\"Failed to get DISK metrics for VMAX\")\n            raise\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/constants.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n# minimum interval supported by VMAX\nVMAX_PERF_MIN_INTERVAL = 5\n\nBEDIRECTOR_METRICS = {\n    'iops': 'IOs',\n    'throughput': 'MBs',\n    'readThroughput': 'MBRead',\n    'writeThroughput': 'MBWritten',\n}\nFEDIRECTOR_METRICS = {\n    'iops': 'HostIOs',\n    'throughput': 'HostMBs',\n}\nRDFDIRECTOR_METRICS = {\n    'iops': 'IOs',\n    'throughput': 'MBSentAndReceived',\n    'readThroughput': 'MBRead',\n    'writeThroughput': 'MBWritten',\n    'responseTime': 'AverageIOServiceTime',\n}\nBEPORT_METRICS = {\n    'iops': 'IOs',\n    'throughput': 'MBs',\n    'readThroughput': 'MBRead',\n    'writeThroughput': 'MBWritten',\n}\nFEPORT_METRICS = {\n    'iops': 'IOs',\n    'throughput': 'MBs',\n    'readThroughput': 'MBRead',\n    'writeThroughput': 'MBWritten',\n    'responseTime': 'ResponseTime',\n}\nRDFPORT_METRICS = {\n    'iops': 'IOs',\n    'throughput': 'MBs',\n    'readThroughput': 'MBRead',\n    'writeThroughput': 'MBWritten',\n}\nDISK_METRICS = {\n    'iops': 'IOs',\n    'throughput': 'MBs',\n    'readThroughput': 'MBReads',\n    'writeThroughput': 'MBWritten',\n    'responseTime': 'AvgResponseTime',\n}\nPOOL_METRICS = {\n    'iops': 'HostIOs',\n    'readIops': 'HostReads',\n    'writeIops': 'HostWrites',\n    'throughput': 'HostMBs',\n    'readThroughput': 'HostMBReads',\n    'writeThroughput': 'HostMBWritten',\n    'responseTime': 'ResponseTime',\n}\nSTORAGE_METRICS = {\n    'iops': 'HostIOs',\n    'readIops': 'HostReads',\n    'writeIops': 'HostWrites',\n    'throughput': 'HostMBs',\n    'readThroughput': 'HostMBReads',\n    'writeThroughput': 'HostMBWritten',\n}\n\nIOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Input/output operations per second\"\n}\nREAD_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Read input/output operations per second\"\n}\nWRITE_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Write input/output operations per second\"\n}\nTHROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data is \"\n                   \"successfully transferred in MB/s\"\n}\nREAD_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data read is \"\n                   \"successfully transferred in MB/s\"\n}\nWRITE_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data write is \"\n                   \"successfully transferred in MB/s\"\n}\nRESPONSE_TIME_DESCRIPTION = {\n    \"unit\": \"ms\",\n    \"description\": \"Average time taken for an IO \"\n                   \"operation in ms\"\n}\nIO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of IO requests in KB\"\n}\nREAD_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of read IO requests in KB\"\n}\nWRITE_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of write IO requests in KB\"\n}\nSTORAGE_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nPOOL_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nCONTROLLER_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nPORT_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nDISK_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/perf_utils.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.common import constants\n\n\ndef parse_performance_data(metrics):\n    \"\"\"Parse metrics response to a map\n    :param metrics: metrics from unispshere REST API\n    :returns: map with key as metric name and value as dictionary\n        containing {timestamp: value} for a the timestamps available\n    \"\"\"\n    metrics_map = {}\n    timestamp = metrics[\"timestamp\"]\n    for key, value in metrics.items():\n        metrics_map[key] = metrics_map.get(key, {})\n        metrics_map[key][timestamp] = value\n    return metrics_map\n\n\ndef construct_metrics(storage_id, resource_metrics, unit_map, perf_list):\n    metrics_list = []\n    metrics_values = {}\n    for perf in perf_list:\n        collected_metrics_list = perf.get('metrics')\n        for collected_metrics in collected_metrics_list:\n            metrics_map = parse_performance_data(collected_metrics)\n\n            for key, value in resource_metrics.items():\n                metrics_map_value = metrics_map.get(value)\n                if metrics_map_value:\n                    metrics_values[key] = metrics_values.get(key, {})\n                    for k, v in metrics_map_value.items():\n                        metrics_values[key][k] = v\n\n        for resource_key, resource_value in metrics_values.items():\n            labels = {\n                'storage_id': storage_id,\n                'resource_type': perf.get('resource_type'),\n                'resource_id': perf.get('resource_id'),\n                'resource_name': perf.get('resource_name'),\n                'type': 'RAW',\n                'unit': unit_map[resource_key]['unit']\n            }\n            metrics_res = constants.metric_struct(name=resource_key,\n                                                  labels=labels,\n                                                  values=resource_value)\n            metrics_list.append(metrics_res)\n    return metrics_list\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/rest.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2020 Dell Inc. or its subsidiaries.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport json\nimport sys\n\nimport requests\nimport requests.auth\nimport requests.exceptions as r_exc\nimport six\nimport urllib3\nfrom oslo_log import log as logging\n\nfrom delfin import cryptor\nfrom delfin import exception\nfrom delfin import ssl_utils\nfrom delfin.common import alert_util\nfrom delfin.common import constants as delfin_const\nfrom delfin.drivers.dell_emc.vmax import constants\nfrom delfin.i18n import _\n\nLOG = logging.getLogger(__name__)\nPERFORMANCE = 'performance'\nSLOPROVISIONING = 'sloprovisioning'\nSYSTEM = 'system'\nSYMMETRIX = 'symmetrix'\nDIRECTOR = 'director'\nPORT = 'port'\nU4V_VERSION = '92'\nUCODE_5978 = '5978'\n# HTTP constants\nGET = 'GET'\nPOST = 'POST'\nPUT = 'PUT'\nDELETE = 'DELETE'\nSTATUS_200 = 200\nSTATUS_201 = 201\nSTATUS_202 = 202\nSTATUS_204 = 204\nSTATUS_401 = 401\n\n# Default expiration time(in sec) for vmax connect request\nVERSION_GET_TIME_OUT = 10\n\n\nclass VMaxRest(object):\n    \"\"\"Rest class based on Unisphere for VMax Rest API.\"\"\"\n\n    def __init__(self):\n        self.session = None\n        self.base_uri = None\n        self.user = None\n        self.passwd = None\n        self.verify = None\n        urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n    def set_rest_credentials(self, array_info):\n        \"\"\"Given the array record set the rest server credentials.\n        :param array_info: record\n        \"\"\"\n        ip = array_info['host']\n        port = array_info['port']\n        self.user = array_info['username']\n        self.passwd = array_info['password']\n        ip_port = \"%(ip)s:%(port)d\" % {'ip': ip, 'port': port}\n        self.base_uri = (\"https://%(ip_port)s/univmax/restapi\" % {\n            'ip_port': ip_port})\n\n    def establish_rest_session(self):\n        \"\"\"Establish the rest session.\n        :returns: requests.session() -- session, the rest session\n        \"\"\"\n        LOG.info(\"Establishing REST session with %(base_uri)s\",\n                 {'base_uri': self.base_uri})\n        if self.session:\n            self.session.close()\n        session = requests.session()\n        session.headers = {'content-type': 'application/json',\n                           'accept': 'application/json',\n                           'Application-Type': 'delfin'}\n        session.auth = requests.auth.HTTPBasicAuth(\n            self.user, cryptor.decode(self.passwd))\n\n        if not self.verify:\n            session.verify = False\n        else:\n            LOG.debug(\"Enable certificate verification, ca_path: {0}\".format(\n                self.verify))\n            session.verify = self.verify\n        session.mount(\"https://\", ssl_utils.get_host_name_ignore_adapter())\n\n        self.session = session\n        return session\n\n    def request(self, target_uri, method, params=None, request_object=None,\n                timeout=None):\n        \"\"\"Sends a request (GET, POST, PUT, DELETE) to the target api.\n        :param target_uri: target uri (string)\n        :param method: The method (GET, POST, PUT, or DELETE)\n        :param params: Additional URL parameters\n        :param request_object: request payload (dict)\n        :param timeout: expiration timeout(in sec)\n        :returns: server response object (dict)\n        :raises: StorageBackendException, Timeout, ConnectionError,\n                 HTTPError, SSLError\n        \"\"\"\n        url, message, status_code, response = None, None, None, None\n        if not self.session:\n            self.establish_rest_session()\n\n        try:\n            url = (\"%(self.base_uri)s%(target_uri)s\" % {\n                'self.base_uri': self.base_uri,\n                'target_uri': target_uri})\n\n            if request_object:\n                response = self.session.request(\n                    method=method, url=url,\n                    data=json.dumps(request_object, sort_keys=True,\n                                    indent=4), timeout=timeout)\n            elif params:\n                response = self.session.request(\n                    method=method, url=url, params=params, timeout=timeout)\n            else:\n                response = self.session.request(\n                    method=method, url=url, timeout=timeout)\n\n            status_code = response.status_code\n\n            try:\n                message = response.json()\n            except ValueError:\n                LOG.debug(\"No response received from API. Status code \"\n                          \"received is: %(status_code)s\", {\n                              'status_code': status_code})\n                message = None\n\n            LOG.debug(\"%(method)s request to %(url)s has returned with \"\n                      \"a status code of: %(status_code)s.\", {\n                          'method': method, 'url': url,\n                          'status_code': status_code})\n\n        except r_exc.SSLError as e:\n            msg = _(\"The connection to %(base_uri)s has encountered an \"\n                    \"SSL error. Please check your SSL config or supplied \"\n                    \"SSL cert in Delfin configuration. SSL Exception \"\n                    \"message: %(e)s\") % {'base_uri': self.base_uri, 'e': e}\n            LOG.error(msg)\n            err_str = six.text_type(e)\n            if 'certificate verify failed' in err_str:\n                raise exception.SSLCertificateFailed()\n            else:\n                raise exception.SSLHandshakeFailed()\n\n        except (r_exc.Timeout, r_exc.ConnectionError,\n                r_exc.HTTPError) as e:\n            exc_class, __, __ = sys.exc_info()\n            msg = _(\"The %(method)s to Unisphere server %(base)s has \"\n                    \"experienced a %(error)s error. Please check your \"\n                    \"Unisphere server connection/availability. \"\n                    \"Exception message: %(exc_msg)s\")\n            raise exc_class(msg % {'method': method,\n                                   'base': self.base_uri,\n                                   'error': e.__class__.__name__,\n                                   'exc_msg': e})\n\n        except Exception as e:\n            msg = _(\"The %(method)s request to URL %(url)s failed with \"\n                    \"exception %(e)s\")\n            LOG.error(msg, {'method': method, 'url': url,\n                            'e': six.text_type(e)})\n            raise exception.StorageBackendException(\n                message=(msg, {'method': method, 'url': url,\n                               'e': six.text_type(e)}))\n\n        return status_code, message\n\n    @staticmethod\n    def check_status_code_success(operation, status_code, message):\n        \"\"\"Check if a status code indicates success.\n        :param operation: the operation\n        :param status_code: the status code\n        :param message: the server response\n        :raises: StorageBackendException\n        \"\"\"\n        if status_code not in [STATUS_200, STATUS_201,\n                               STATUS_202, STATUS_204]:\n            exception_message = (\n                _(\"Error %(operation)s. The status code received is %(sc)s \"\n                  \"and the message is %(message)s.\") % {\n                    'operation': operation, 'sc': str(status_code),\n                    'message': message})\n            raise exception.StorageBackendException(\n                message=exception_message)\n\n    def build_uri(self, *args, **kwargs):\n        \"\"\"Build the target url.\n        :param args: input args, see _build_uri_legacy_args() for input\n                     breakdown\n        :param kwargs: input keyword args, see _build_uri_kwargs() for input\n                       breakdown\n        :return: target uri -- str\n        \"\"\"\n        if args:\n            target_uri = self._build_uri_legacy_args(*args, **kwargs)\n        else:\n            target_uri = self._build_uri_kwargs(**kwargs)\n\n        return target_uri\n\n    @staticmethod\n    def _build_uri_legacy_args(*args, **kwargs):\n        \"\"\"Build the target URI using legacy args & kwargs.\n        Expected format:\n            arg[0]: the array serial number: the array serial number -- str\n            arg[1]: the resource category e.g. 'sloprovisioning' -- str\n            arg[2]: the resource type e.g. 'maskingview' -- str\n            kwarg resource_name: the name of a specific resource -- str\n            kwarg private: if endpoint is private -- bool\n            kwarg version: U4V REST endpoint version -- int/str\n            kwarg no_version: if endpoint should be versionless -- bool\n        :param args: input args -- see above\n        :param kwargs: input keyword args -- see above\n        :return: target URI -- str\n        \"\"\"\n        # Extract args following legacy _build_uri() format\n        array_id, category, resource_type = args[0], args[1], args[2]\n        # Extract keyword args following legacy _build_uri() format\n        resource_name = kwargs.get('resource_name')\n        private = kwargs.get('private')\n        version = kwargs.get('version', U4V_VERSION)\n        if kwargs.get('no_version'):\n            version = None\n\n        # Build URI\n        target_uri = ''\n        if private:\n            target_uri += '/private'\n        if version:\n            target_uri += '/%(version)s' % {'version': version}\n        target_uri += (\n            '/{cat}/symmetrix/{array_id}/{res_type}'.format(\n                cat=category, array_id=array_id, res_type=resource_type))\n        if resource_name:\n            target_uri += '/{resource_name}'.format(\n                resource_name=kwargs.get('resource_name'))\n\n        return target_uri\n\n    @staticmethod\n    def _build_uri_kwargs(**kwargs):\n        \"\"\"Build the target URI using kwargs.\n        Expected kwargs:\n            private: if endpoint is private (optional) -- bool\n            version: U4P REST endpoint version (optional) -- int/None\n            no_version: if endpoint should be versionless (optional) -- bool\n            category: U4P REST category eg. 'common', 'replication'-- str\n            resource_level: U4P REST resource level eg. 'symmetrix'\n                            (optional) -- str\n            resource_level_id: U4P REST resource level id (optional) -- str\n            resource_type: U4P REST resource type eg. 'rdf_director', 'host'\n                           (optional) -- str\n            resource_type_id: U4P REST resource type id (optional) -- str\n            resource: U4P REST resource eg. 'port' (optional) -- str\n            resource_id: U4P REST resource id (optional) -- str\n            object_type: U4P REST resource eg. 'rdf_group' (optional) -- str\n            object_type_id: U4P REST resource id (optional) -- str\n        :param kwargs: input keyword args -- see above\n        :return: target URI -- str\n        \"\"\"\n        version = kwargs.get('version', U4V_VERSION)\n        if kwargs.get('no_version'):\n            version = None\n\n        target_uri = ''\n\n        if kwargs.get('private'):\n            target_uri += '/private'\n\n        if version:\n            target_uri += '/%(ver)s' % {'ver': version}\n\n        target_uri += '/%(cat)s' % {'cat': kwargs.get('category')}\n\n        if kwargs.get('resource_level'):\n            target_uri += '/%(res_level)s' % {\n                'res_level': kwargs.get('resource_level')}\n\n        if kwargs.get('resource_level_id'):\n            target_uri += '/%(res_level_id)s' % {\n                'res_level_id': kwargs.get('resource_level_id')}\n\n        if kwargs.get('resource_type'):\n            target_uri += '/%(res_type)s' % {\n                'res_type': kwargs.get('resource_type')}\n            if kwargs.get('resource_type_id'):\n                target_uri += '/%(res_type_id)s' % {\n                    'res_type_id': kwargs.get('resource_type_id')}\n\n        if kwargs.get('resource'):\n            target_uri += '/%(res)s' % {\n                'res': kwargs.get('resource')}\n            if kwargs.get('resource_id'):\n                target_uri += '/%(res_id)s' % {\n                    'res_id': kwargs.get('resource_id')}\n\n        if kwargs.get('object_type'):\n            target_uri += '/%(object_type)s' % {\n                'object_type': kwargs.get('object_type')}\n            if kwargs.get('object_type_id'):\n                target_uri += '/%(object_type_id)s' % {\n                    'object_type_id': kwargs.get('object_type_id')}\n\n        return target_uri\n\n    def get_request(self, target_uri, resource_type, params=None):\n        \"\"\"Send a GET request to the array.\n        :param target_uri: the target uri\n        :param resource_type: the resource type, e.g. maskingview\n        :param params: optional dict of filter params\n        :returns: resource_object -- dict or None\n        \"\"\"\n        resource_object = None\n        sc, message = self.request(target_uri, GET, params=params)\n        operation = 'get %(res)s' % {'res': resource_type}\n        try:\n            self.check_status_code_success(operation, sc, message)\n        except Exception as e:\n            LOG.debug(\"Get resource failed with %(e)s\",\n                      {'e': e})\n        if sc == STATUS_200:\n            resource_object = message\n            resource_object = self.list_pagination(resource_object)\n        return resource_object\n\n    def get_alert_request(self, target_uri):\n        \"\"\"Send a GET request to the array.\n        :param target_uri: the target uri\n        :returns: resource_object -- dict or None\n        \"\"\"\n        sc, message = self.request(target_uri, GET, params=None)\n        if sc != STATUS_200:\n            raise exception.StorageListAlertFailed(message)\n        resource_object = message\n        resource_object = self.list_pagination(resource_object)\n        return resource_object\n\n    def get_resource(self, array, category, resource_type,\n                     resource_name=None, params=None, private=False,\n                     version=U4V_VERSION):\n        \"\"\"Get resource details from array.\n        :param array: the array serial number\n        :param category: the resource category e.g. sloprovisioning\n        :param resource_type: the resource type e.g. maskingview\n        :param resource_name: the name of a specific resource\n        :param params: query parameters\n        :param private: empty string or '/private' if private url\n        :param version: None or specific version number if required\n        :returns: resource object -- dict or None\n        \"\"\"\n        target_uri = self.build_uri(\n            array, category, resource_type, resource_name=resource_name,\n            private=private, version=version)\n        return self.get_request(target_uri, resource_type, params)\n\n    def get_resource_kwargs(self, *args, **kwargs):\n        \"\"\"Get resource details from the array.\n\n        :key version: Unisphere version -- int\n        :key no_version: if versionless uri -- bool\n        :key category: the resource category e.g. sloprovisioning\n        :key resource_level: resource level e.g. storagegroup\n        :key resource_level_id: resource level id\n        :key resource_type: optional resource type e.g. maskingview\n        :key resource_type_id: optional resource type id\n        :key resource: the name of a specific resource\n        :key resource_id: the name of a specific resource\n        :key object_type: optional name of resource\n        :key object_type_id: optional name of resource\n        :key params: query parameters  -- dict\n        :key private: empty string or '/private' if private url\n        :returns: resource object -- dict or None\n        \"\"\"\n        resource_type = None\n        if args:\n            resource_type = args[2]\n        elif kwargs:\n            resource_type = kwargs.get('resource_level')\n        target_uri = self.build_uri(*args, **kwargs)\n        return self.get_request(\n            target_uri, resource_type, kwargs.get('params'))\n\n    def get_array_detail(self, version=U4V_VERSION, array=''):\n        \"\"\"Get an array from its serial number.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :returns: array_details -- dict or None\n        \"\"\"\n        target_uri = '/%s/system/symmetrix/%s' % (version, array)\n        array_details = self.get_request(target_uri, 'system')\n        if not array_details:\n            LOG.error(\"Cannot connect to array %(array)s.\",\n                      {'array': array})\n        return array_details\n\n    def get_uni_version(self):\n        \"\"\"Get the unisphere version from the server.\n        :returns: version and major_version(e.g. (\"V8.4.0.16\", \"84\"))\n        \"\"\"\n        version, major_version = None, None\n        response = self.get_unisphere_version()\n        if response and response.get('version'):\n            version = response['version']\n            version_list = version.split('.')\n            major_version = version_list[0][1] + version_list[1]\n        return version, major_version\n\n    def get_unisphere_version(self):\n        \"\"\"Get the unisphere version from the server.\n        :returns: version dict\n        \"\"\"\n        post_90_endpoint = '/version'\n        pre_91_endpoint = '/system/version'\n\n        status_code, version_dict = self.request(\n            post_90_endpoint, GET, timeout=VERSION_GET_TIME_OUT)\n        if status_code is not STATUS_200:\n            status_code, version_dict = self.request(\n                pre_91_endpoint, GET, timeout=VERSION_GET_TIME_OUT)\n\n        if status_code == STATUS_401:\n            raise exception.InvalidUsernameOrPassword()\n\n        if not version_dict:\n            LOG.error(\"Unisphere version info not found.\")\n        return version_dict\n\n    def get_srp_by_name(self, array, version, srp=None):\n        \"\"\"Returns the details of a storage pool.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param srp: the storage resource pool name\n        :returns: SRP_details -- dict or None\n        \"\"\"\n        LOG.debug(\"storagePoolName: %(srp)s, array: %(array)s.\",\n                  {'srp': srp, 'array': array})\n        srp_details = self.get_resource(array, SLOPROVISIONING, 'srp',\n                                        resource_name=srp, version=version,\n                                        params=None)\n        return srp_details\n\n    def get_vmax_array_details(self, version=U4V_VERSION, array=''):\n        \"\"\"Get the VMax array properties.\n        :param version: the unisphere version\n        :param array: the array serial number\n        :returns: the VMax model\n        \"\"\"\n        system_info = self.get_array_detail(version, array)\n        vmax_model = system_info.get('model', 'VMAX')\n        vmax_ucode = system_info.get('ucode')\n        vmax_display_name = system_info.get('display_name', vmax_model)\n        array_details = {\"model\": vmax_model,\n                         \"ucode\": vmax_ucode,\n                         \"display_name\": vmax_display_name}\n        return array_details\n\n    def get_array_model_info(self, version=U4V_VERSION, array=''):\n        \"\"\"Get the VMax model.\n        :param version: the unisphere version\n        :param array: the array serial number\n        :returns: the VMax model\n        \"\"\"\n        is_next_gen = False\n        system_info = self.get_array_detail(version, array)\n        array_model = system_info.get('model', None)\n        ucode_version = system_info['ucode'].split('.')[0]\n        if ucode_version >= UCODE_5978:\n            is_next_gen = True\n        return array_model, is_next_gen\n\n    def get_storage_group(self, array, version, storage_group_name):\n        \"\"\"Given a name, return storage group details.\n        :param version: the unisphere version\n        :param array: the array serial number\n        :param storage_group_name: the name of the storage group\n        :returns: storage group dict or None\n        \"\"\"\n        return self.get_resource(\n            array, SLOPROVISIONING, 'storagegroup',\n            version=version,\n            resource_name=storage_group_name)\n\n    def get_system_capacity(self, array, version):\n        target_uri = '/%s/sloprovisioning/symmetrix/%s' % (version, array)\n        capacity_details = self.get_request(target_uri, None)\n        if not capacity_details:\n            LOG.error(\"Cannot connect to array %(array)s.\",\n                      {'array': array})\n        return capacity_details\n\n    def get_default_srps(self, array, version=U4V_VERSION):\n        \"\"\"Get the VMax array default SRPs.\n        :param version: the unisphere version\n        :param array: the array serial number\n        :returns: dictionary default SRPs\n        \"\"\"\n        symmetrix_info = self.get_system_capacity(array, version)\n        default_fba_srp = symmetrix_info.get('default_fba_srp', None)\n        default_ckd_srp = symmetrix_info.get('default_ckd_srp', None)\n        default_srps = {\"FBA\": default_fba_srp,\n                        \"CKD\": default_ckd_srp}\n        return default_srps\n\n    def get_volume(self, array, version, device_id):\n        \"\"\"Get a VMax volume from array.\n        :param array: the array serial number\n        :param device_id: the volume device id\n        :returns: volume dict\n        :raises: StorageBackendException\n        \"\"\"\n        volume_dict = self.get_resource(\n            array, SLOPROVISIONING, 'volume', resource_name=device_id,\n            version=version)\n        if not volume_dict:\n            exception_message = (_(\"Volume %(deviceID)s not found.\")\n                                 % {'deviceID': device_id})\n            LOG.error(exception_message)\n            raise exception.StorageBackendException(\n                message=exception_message)\n        return volume_dict\n\n    def get_volume_list(self, array, version, params):\n        \"\"\"Get a filtered list of VMax volumes from array.\n        Filter parameters are required as the unfiltered volume list could be\n        very large and could affect performance if called often.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param params: filter parameters\n        :returns: device_ids -- list\n        \"\"\"\n        device_ids = []\n        volume_dict_list = self.get_resource(\n            array, SLOPROVISIONING, 'volume', version=version, params=params)\n        try:\n            for vol_dict in volume_dict_list:\n                device_id = vol_dict['volumeId']\n                device_ids.append(device_id)\n        except (KeyError, TypeError):\n            pass\n        return device_ids\n\n    def get_director(self, array, version, device_id):\n        \"\"\"Get a VMAX director from array.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param device_id: the volume device id\n        :returns: volume dict\n        :raises: ControllerNotFound\n        \"\"\"\n        director_dict = None\n        # Unisphere versions 90 and above\n        if int(version) > 84:\n            director_dict = self.get_resource(\n                array, SYSTEM, 'director', resource_name=device_id,\n                version=version)\n\n        # Unisphere versions 84\n        if int(version) == 84:\n            director_dict = self.get_resource(\n                array, SLOPROVISIONING, 'director', resource_name=device_id,\n                version=version)\n\n        if int(version) < 84:\n            LOG.error(\"Director is not supported in Unisphere version < 8.4\")\n            return None\n\n        if not director_dict:\n            exception_message = (_(\"Director %(deviceID)s not found.\")\n                                 % {'deviceID': device_id})\n            LOG.error(exception_message)\n            raise exception.ControllerNotFound(device_id)\n        return director_dict\n\n    def get_director_list(self, array, version, params=None):\n        \"\"\"Get a filtered list of VMAX controllers from array.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param params: filter parameters\n        :returns: directors -- list\n        \"\"\"\n        response = None\n        # Unisphere versions 90 and above\n        if int(version) > 84:\n            response = self.get_resource(\n                array, SYSTEM, 'director',\n                version=version, params=params)\n\n        # Unisphere versions 84\n        if int(version) == 84:\n            response = self.get_resource(\n                array, SLOPROVISIONING, 'director',\n                version=version, params=params)\n\n        if int(version) < 84:\n            LOG.error(\"Director not supported in Unisphere version < 8.4\")\n            return []\n\n        if not response:\n            exception_message = (_(\"Get Director list failed.\")\n                                 % {'deviceID': array})\n            LOG.error(exception_message)\n            raise exception.ControllerListNotFound(array)\n\n        return response.get('directorId', list()) if response else list()\n\n    def get_port(self, array, version, director_id, port_id):\n        \"\"\"Get a VMAX director from array.\n        :param array: the array serial number\n        :param version: the unisphere version  -- int\n        :param director_id: the director id\n        :param port_id: the port id\n        :returns: volume dict\n        :raises: ControllerNotFound\n        \"\"\"\n        port_dict = None\n        # Unisphere versions 90 and above\n        if int(version) > 84:\n            port_dict = self.get_resource_kwargs(\n                category=SYSTEM, version=version,\n                resource_level=SYMMETRIX, resource_level_id=array,\n                resource_type=DIRECTOR, resource_type_id=director_id,\n                resource=PORT, resource_id=port_id)\n\n        # Unisphere versions 84\n        if int(version) == 84:\n            port_dict = self.get_resource_kwargs(\n                category=SLOPROVISIONING, version=version,\n                resource_level=SYMMETRIX, resource_level_id=array,\n                resource_type=DIRECTOR, resource_type_id=director_id,\n                resource=PORT, resource_id=port_id)\n\n        if int(version) < 84:\n            LOG.error(\"Port get is not supported in Unisphere version < 8.4\")\n            return None\n\n        if not port_dict:\n            exception_message = (_(\"Port %(deviceID)s not found.\")\n                                 % {'deviceID': port_id})\n            LOG.error(exception_message)\n            raise exception.PortNotFound(port_id)\n        return port_dict\n\n    def get_port_list(self, array, version, director_id, params=None):\n        \"\"\"Get a filtered list of VMAX controllers from array.\n        :param array: the array serial number\n        :param version: the unisphere version  -- int\n        :param params: filter parameters\n        :param director_id: director id\n        :returns: device_ids -- list\n        \"\"\"\n        response = None\n        # Unisphere versions 90 and above\n        if int(version) > 84:\n            response = self.get_resource_kwargs(\n                category=SYSTEM, version=version,\n                resource_level=SYMMETRIX, resource_level_id=array,\n                resource_type=DIRECTOR, resource_type_id=director_id,\n                resource=PORT, params=params)\n\n        # Unisphere versions 84\n        if int(version) == 84:\n            response = self.get_resource_kwargs(\n                category=SLOPROVISIONING, version=version,\n                resource_level=SYMMETRIX, resource_level_id=array,\n                resource_type=DIRECTOR, resource_type_id=director_id,\n                resource=PORT, params=params)\n\n        if int(version) < 84:\n            LOG.error(\"Port list not supported in Unisphere version < 8.4\")\n            return []\n\n        if not response:\n            exception_message = (_(\"Get Port list failed.\")\n                                 % {'deviceID': array})\n            LOG.error(exception_message)\n            raise exception.PortListNotFound(array)\n\n        port_ids = response.get('symmetrixPortKey',\n                                list()) if response else list()\n        return port_ids\n\n    def get_disk(self, array, version, device_id):\n        \"\"\"Get a VMax disk from array.\n        :param array: the array serial number\n        :param version: the unisphere version  -- int\n        :param device_id: the disk device id\n        :returns: disk dict\n        :raises: StorageBackendException\n        \"\"\"\n        disk_dict = self.get_resource(\n            array, SYSTEM, 'disk', resource_name=device_id,\n            version=version)\n        if not disk_dict:\n            exception_message = (_(\"Disk %(deviceID)s not found.\")\n                                 % {'deviceID': device_id})\n            LOG.error(exception_message)\n            raise exception.DiskNotFound(device_id)\n        return disk_dict\n\n    def get_disk_list(self, array, version, params=None):\n        \"\"\"Get a filtered list of VMax disks from array.\n        Filter parameters are required as the unfiltered disk list could be\n        very large and could affect performance if called often.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param params: filter parameters\n        :returns: disk_ids -- list\n        \"\"\"\n        disk_dict_list = self.get_resource(\n            array, SYSTEM, 'disk', version=version, params=params)\n        return disk_dict_list.get('disk_ids', [])\n\n    def get_initiator(self, array, version, initiator_id):\n        \"\"\"Get a VMax initiator from array.\n        :param array: the array serial number\n        :param version: the unisphere version  -- int\n        :param initiator_id: the initiator id\n        :returns: initiator dict\n        :raises: StorageHostInitiatorNotFound\n        \"\"\"\n        initiator_dict = self.get_resource(\n            array, SLOPROVISIONING, 'initiator', resource_name=initiator_id,\n            version=version)\n        if not initiator_dict:\n            exception_message = (_(\"Initiator %(initiator_id)s not found.\")\n                                 % {'initiator_id': initiator_id})\n            LOG.error(exception_message)\n            raise exception.StorageHostInitiatorNotFound(initiator_id)\n        return initiator_dict\n\n    def get_initiator_list(self, array, version, params=None):\n        \"\"\"Get a filtered list of VMax initiators from array.\n        Filter parameters are required as the unfiltered initiator list\n        could bevery large and could affect performance if called often.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param params: filter parameters\n        :returns: initiatorId -- list\n        \"\"\"\n        initiator_dict_list = self.get_resource(\n            array, SLOPROVISIONING, 'initiator',\n            version=version, params=params)\n        return initiator_dict_list.get('initiatorId', [])\n\n    def get_host(self, array, version, host_id):\n        \"\"\"Get a VMax host from array.\n        :param array: the array serial number\n        :param version: the unisphere version  -- int\n        :param host_id: the host id\n        :returns: host dict\n        :raises: StorageHostNotFound\n        \"\"\"\n        host_dict = self.get_resource(\n            array, SLOPROVISIONING, 'host',\n            resource_name=host_id,\n            version=version)\n        if not host_dict:\n            exception_message = (_(\"Host %(host_id)s not found.\")\n                                 % {'host_id': host_id})\n            LOG.error(exception_message)\n            raise exception.StorageHostNotFound(host_id)\n        return host_dict\n\n    def get_host_list(self, array, version, params=None):\n        \"\"\"Get a filtered list of VMax hosts from array.\n        Filter parameters are required as the unfiltered host list\n        could bevery large and could affect performance if called often.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param params: filter parameters\n        :returns: hostId -- list\n        \"\"\"\n        host_dict_list = self.get_resource(\n            array, SLOPROVISIONING, 'host',\n            version=version, params=params)\n        return host_dict_list.get('hostId', [])\n\n    def get_host_group(self, array, version, host_group_id):\n        \"\"\"Get a VMax host group from array.\n        :param array: the array serial number\n        :param version: the unisphere version  -- int\n        :param host_group_id: the host group id\n        :returns: host group dict\n        :raises: StorageHostGroupNotFound\n        \"\"\"\n        host_group_dict = self.get_resource(\n            array, SLOPROVISIONING, 'hostgroup',\n            resource_name=host_group_id,\n            version=version)\n        if not host_group_dict:\n            exception_message = (_(\"HostGroup %(host_group_id)s not found.\")\n                                 % {'host_group_id': host_group_id})\n            LOG.error(exception_message)\n            raise exception.StorageHostGroupNotFound(host_group_id)\n        return host_group_dict\n\n    def get_host_group_list(self, array, version, params=None):\n        \"\"\"Get a filtered list of VMax host groups from array.\n        Filter parameters are required as the unfiltered host list\n        could bevery large and could affect performance if called often.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param params: filter parameters\n        :returns: hostGroupId -- list\n        \"\"\"\n        host_group_dict_list = self.get_resource(\n            array, SLOPROVISIONING, 'hostgroup',\n            version=version, params=params)\n        return host_group_dict_list.get('hostGroupId', [])\n\n    def get_port_group(self, array, version, port_group_id):\n        \"\"\"Get a VMax port group from array.\n        :param array: the array serial number\n        :param version: the unisphere version  -- int\n        :param port_group_id: the port group id\n        :returns: port group dict\n        :raises: PortGroupNotFound\n        \"\"\"\n        port_group_dict = self.get_resource(\n            array, SLOPROVISIONING, 'portgroup',\n            resource_name=port_group_id,\n            version=version)\n        if not port_group_dict:\n            exception_message = (_(\"PortGroup %(port_group_id)s not found.\")\n                                 % {'port_group_id': port_group_id})\n            LOG.error(exception_message)\n            raise exception.PortGroupNotFound(port_group_id)\n        return port_group_dict\n\n    def get_port_group_list(self, array, version, params=None):\n        \"\"\"Get a filtered list of VMax port groups from array.\n        Filter parameters are required as the unfiltered host list\n        could bevery large and could affect performance if called often.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param params: filter parameters\n        :returns: portGroupId -- list\n        \"\"\"\n        port_group_dict_list = self.get_resource(\n            array, SLOPROVISIONING, 'portgroup',\n            version=version, params=params)\n        return port_group_dict_list.get('portGroupId', [])\n\n    def get_volume_group(self, array, version, storage_group_id):\n        \"\"\"Get a VMax storage/volume group from array.\n        :param array: the array serial number\n        :param version: the unisphere version  -- int\n        :param storage_group_id: the storage group id\n        :returns: volume group dict\n        :raises: VolumeGroupNotFound\n        \"\"\"\n        storage_group_dict = self.get_resource(\n            array, SLOPROVISIONING, 'storagegroup',\n            resource_name=storage_group_id,\n            version=version)\n        if not storage_group_dict:\n            exception_message = (_(\"StorageGroup %(sid)s not found.\")\n                                 % {'id': storage_group_id})\n            LOG.error(exception_message)\n            raise exception.VolumeGroupNotFound(storage_group_id)\n        return storage_group_dict\n\n    def get_volume_group_list(self, array, version, params=None):\n        \"\"\"Get a filtered list of VMax storage groups from array.\n        Filter parameters are required as the unfiltered host list\n        could bevery large and could affect performance if called often.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param params: filter parameters\n        :returns: storageGroupId -- list\n        \"\"\"\n        storage_group_dict_list = self.get_resource(\n            array, SLOPROVISIONING, 'storagegroup',\n            version=version, params=params)\n        return storage_group_dict_list.get('storageGroupId', [])\n\n    def get_masking_view(self, array, version, masking_view_id):\n        \"\"\"Get a VMax masking view from array.\n        :param array: the array serial number\n        :param version: the unisphere version  -- int\n        :param masking_view_id: the masking view id\n        :returns: masking view dict\n        :raises: MaskingViewNotFound\n        \"\"\"\n        masking_view_dict = self.get_resource(\n            array, SLOPROVISIONING, 'maskingview',\n            resource_name=masking_view_id,\n            version=version)\n        if not masking_view_dict:\n            exception_message = (_(\"Masking View %(id)s not found.\")\n                                 % {'id': masking_view_id})\n            LOG.error(exception_message)\n            raise exception.MaskingViewNotFound(masking_view_id)\n        return masking_view_dict\n\n    def get_masking_view_list(self, array, version, params=None):\n        \"\"\"Get a filtered list of VMax masking views from array.\n        Filter parameters are required as the unfiltered initiator list\n        could bevery large and could affect performance if called often.\n        :param array: the array serial number\n        :param version: the unisphere version\n        :param params: filter parameters\n        :returns: maskingViewId -- list\n        \"\"\"\n        masking_view_dict_list = self.get_resource(\n            array, SLOPROVISIONING, 'maskingview',\n            version=version, params=params)\n        return masking_view_dict_list.get('maskingViewId', [])\n\n    def post_request(self, target_uri, payload):\n        \"\"\"Generate  a POST request.\n        :param target_uri: the uri to query from unipshere REST API\n        :param payload: the payload\n        :returns: status_code -- int, message -- string, server response\n        \"\"\"\n        status_code, message = self.request(target_uri, POST,\n                                            request_object=payload)\n        resource_object = None\n        if status_code == STATUS_200:\n            resource_object = message\n            resource_object = self.list_pagination(resource_object)\n        operation = 'POST request for URL'\n        self.check_status_code_success(\n            operation, status_code, resource_object)\n\n        return status_code, resource_object\n\n    def get_array_keys(self, array):\n        target_uri = '/performance/Array/keys'\n\n        response = self.get_request(target_uri, PERFORMANCE, None)\n        if response is None:\n            err_msg = \"Failed to get Array keys from VMAX: {0}\"\\\n                .format(str(array))\n            LOG.error(err_msg)\n\n        return response\n\n    def get_resource_keys(self, array, resource, payload=None):\n        if payload is None:\n            payload = {}\n\n        payload['symmetrixId'] = str(array)\n        target_uri = '/performance/{0}/keys'.format(resource)\n        sc, response = self.post_request(target_uri, payload)\n        if response is None:\n            err_msg = \"Failed to get {0} keys from VMAX: {1} status: {2}\"\\\n                .format(resource, str(array), sc)\n            LOG.error(err_msg)\n\n        return response\n\n    def get_resource_metrics(self, array, start_time,\n                             end_time, resource, metrics,\n                             payload=None):\n        if payload is None:\n            payload = {}\n\n        payload['symmetrixId'] = str(array)\n        payload['startDate'] = start_time\n        payload['endDate'] = end_time\n        payload['metrics'] = metrics\n        payload['dataFormat'] = 'Average'\n        target_uri = '/performance/{0}/metrics'.format(resource)\n\n        status_code, response = self.post_request(target_uri, payload)\n        if status_code != STATUS_200:\n            err_msg = \"Failed to get {0} metrics from VMAX: {1}\" \\\n                .format(resource, str(array))\n            LOG.error(err_msg)\n            return None\n        return response\n\n    def get_storage_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n        storage_metrics = []\n        for k in metrics.keys():\n            vmax_key = constants.STORAGE_METRICS.get(k)\n            if vmax_key:\n                storage_metrics.append(vmax_key)\n\n        keys = self.get_array_keys(array)\n        keys_dict = None\n        if keys:\n            keys_dict = keys.get('arrayInfo', None)\n\n        metrics_list = []\n        for key_dict in keys_dict:\n            if key_dict.get('symmetrixId') == array:\n                metrics_res = self.get_resource_metrics(\n                    array, start_time, end_time, 'Array',\n                    storage_metrics, payload=None)\n                if metrics_res:\n                    label = {\n                        'resource_id': key_dict.get('symmetrixId'),\n                        'resource_name': 'VMAX' + key_dict.get('symmetrixId'),\n                        'resource_type': delfin_const.ResourceType.STORAGE,\n                        'metrics': metrics_res\n                    }\n                    metrics_list.append(label)\n\n        return metrics_list\n\n    def get_pool_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n        pool_metrics = []\n        for k in metrics.keys():\n            vmax_key = constants.POOL_METRICS.get(k)\n            if vmax_key:\n                pool_metrics.append(vmax_key)\n\n        keys = self.get_resource_keys(array, 'SRP')\n        keys_dict = None\n        if keys:\n            keys_dict = keys.get('srpInfo', None)\n\n        metrics_list = []\n        for key_dict in keys_dict:\n            payload = {'srpId': key_dict.get('srpId')}\n            metrics_res = self.get_resource_metrics(\n                array, start_time, end_time, 'SRP',\n                pool_metrics, payload=payload)\n            if metrics_res:\n                label = {\n                    'resource_id': key_dict.get('srpId'),\n                    'resource_name': key_dict.get('srpId'),\n                    'resource_type': delfin_const.ResourceType.STORAGE_POOL,\n                    'metrics': metrics_res\n                }\n                metrics_list.append(label)\n\n        return metrics_list\n\n    def get_fedirector_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n        fedirector_metrics = []\n        for k in metrics.keys():\n            vmax_key = constants.FEDIRECTOR_METRICS.get(k)\n            if vmax_key:\n                fedirector_metrics.append(vmax_key)\n\n        keys = self.get_resource_keys(array, 'FEDirector')\n        keys_dict = None\n        if keys:\n            keys_dict = keys.get('feDirectorInfo', None)\n\n        metrics_list = []\n        for key_dict in keys_dict:\n            payload = {'directorId': key_dict.get('directorId')}\n            metrics_res = self.get_resource_metrics(\n                array, start_time, end_time, 'FEDirector',\n                fedirector_metrics, payload=payload)\n            if metrics_res:\n                label = {\n                    'resource_id': key_dict.get('directorId'),\n                    'resource_name': 'FEDirector_' +\n                                     key_dict.get('directorId'),\n                    'resource_type': delfin_const.ResourceType.CONTROLLER,\n                    'metrics': metrics_res\n                }\n                metrics_list.append(label)\n\n        return metrics_list\n\n    def get_bedirector_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n        bedirector_metrics = []\n        for k in metrics.keys():\n            vmax_key = constants.BEDIRECTOR_METRICS.get(k)\n            if vmax_key:\n                bedirector_metrics.append(vmax_key)\n\n        keys = self.get_resource_keys(array, 'BEDirector')\n        keys_dict = None\n        if keys:\n            keys_dict = keys.get('beDirectorInfo', None)\n\n        metrics_list = []\n        for key_dict in keys_dict:\n            payload = {'directorId': key_dict.get('directorId')}\n            metrics_res = self.get_resource_metrics(\n                array, start_time, end_time, 'BEDirector',\n                bedirector_metrics, payload=payload)\n            if metrics_res:\n                label = {\n                    'resource_id': key_dict.get('directorId'),\n                    'resource_name': 'BEDirector_' +\n                                     key_dict.get('directorId'),\n                    'resource_type': delfin_const.ResourceType.CONTROLLER,\n                    'metrics': metrics_res\n                }\n                metrics_list.append(label)\n\n        return metrics_list\n\n    def get_rdfdirector_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n        rdfdirector_metrics = []\n        for k in metrics.keys():\n            vmax_key = constants.RDFDIRECTOR_METRICS.get(k)\n            if vmax_key:\n                rdfdirector_metrics.append(vmax_key)\n\n        keys = self.get_resource_keys(array, 'RDFDirector')\n        keys_dict = None\n        if keys:\n            keys_dict = keys.get('rdfDirectorInfo', None)\n\n        metrics_list = []\n        for key_dict in keys_dict:\n            payload = {'directorId': key_dict.get('directorId')}\n            metrics_res = self.get_resource_metrics(\n                array, start_time, end_time, 'RDFDirector',\n                rdfdirector_metrics, payload=payload)\n            if metrics_res:\n                label = {\n                    'resource_id': key_dict.get('directorId'),\n                    'resource_name': 'RDFDirector_' +\n                                     key_dict.get('directorId'),\n                    'resource_type': delfin_const.ResourceType.CONTROLLER,\n                    'metrics': metrics_res\n                }\n                metrics_list.append(label)\n\n        return metrics_list\n\n    def get_controller_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n        be_metrics = self.get_bedirector_metrics(\n            array, metrics, start_time, end_time)\n        fe_metrics = self.get_fedirector_metrics(\n            array, metrics, start_time, end_time)\n        rdf_metrics = self.get_rdfdirector_metrics(\n            array, metrics, start_time, end_time)\n\n        return be_metrics, fe_metrics, rdf_metrics\n\n    def get_feport_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n        feport_metrics = []\n        for k in metrics.keys():\n            vmax_key = constants.FEPORT_METRICS.get(k)\n            if vmax_key:\n                feport_metrics.append(vmax_key)\n\n        director_keys = self.get_resource_keys(array, 'FEDirector')\n        director_keys_dict = None\n        if director_keys:\n            director_keys_dict = director_keys.get('feDirectorInfo', None)\n\n        metrics_list = []\n        for director_key_dict in director_keys_dict:\n            payload = {'directorId': director_key_dict.get('directorId')}\n            keys = self.get_resource_keys(array, 'FEPort', payload=payload)\n            keys_dict = None\n            if keys:\n                keys_dict = keys.get('fePortInfo', None)\n\n            for key_dict in keys_dict:\n                payload['portId'] = key_dict.get('portId')\n                metrics_res = self.get_resource_metrics(\n                    array, start_time, end_time, 'FEPort',\n                    feport_metrics, payload=payload)\n                if metrics_res:\n                    label = {\n                        'resource_id': key_dict.get('portId'),\n                        'resource_name': 'FEPort_' +\n                                         director_key_dict.get('directorId') +\n                                         '_' + key_dict.get('portId'),\n                        'resource_type': delfin_const.ResourceType.PORT,\n                        'metrics': metrics_res\n                    }\n                    metrics_list.append(label)\n\n        return metrics_list\n\n    def get_beport_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n\n        beport_metrics = []\n        for k in metrics.keys():\n            vmax_key = constants.BEPORT_METRICS.get(k)\n            if vmax_key:\n                beport_metrics.append(vmax_key)\n\n        director_keys = self.get_resource_keys(array, 'BEDirector')\n        director_keys_dict = None\n        if director_keys:\n            director_keys_dict = director_keys.get('beDirectorInfo', None)\n\n        metrics_list = []\n        for director_key_dict in director_keys_dict:\n            payload = {'directorId': director_key_dict.get('directorId')}\n            keys = self.get_resource_keys(array, 'BEPort', payload=payload)\n            keys_dict = None\n            if keys:\n                keys_dict = keys.get('bePortInfo', None)\n\n            for key_dict in keys_dict:\n                payload['portId'] = key_dict.get('portId')\n                metrics_res = self.get_resource_metrics(\n                    array, start_time, end_time, 'BEPort',\n                    beport_metrics, payload=payload)\n                if metrics_res:\n                    label = {\n                        'resource_id': key_dict.get('portId'),\n                        'resource_name': 'BEPort_' +\n                                         director_key_dict.get('directorId') +\n                                         '_' + key_dict.get('portId'),\n                        'resource_type': delfin_const.ResourceType.PORT,\n                        'metrics': metrics_res\n                    }\n                    metrics_list.append(label)\n\n        return metrics_list\n\n    def get_rdfport_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n        rdfport_metrics = []\n        for k in metrics.keys():\n            vmax_key = constants.RDFPORT_METRICS.get(k)\n            if vmax_key:\n                rdfport_metrics.append(vmax_key)\n\n        director_keys = self.get_resource_keys(array, 'RDFDirector')\n        director_keys_dict = None\n        if director_keys:\n            director_keys_dict = director_keys.get('rdfDirectorInfo', None)\n\n        metrics_list = []\n        for director_key_dict in director_keys_dict:\n            payload = {'directorId': director_key_dict.get('directorId')}\n            keys = self.get_resource_keys(array, 'RDFPort', payload=payload)\n            keys_dict = None\n            if keys:\n                keys_dict = keys.get('rdfPortInfo', None)\n\n            for key_dict in keys_dict:\n                payload['portId'] = key_dict.get('portId')\n                metrics_res = self.get_resource_metrics(\n                    array, start_time, end_time, 'RDFPort',\n                    rdfport_metrics, payload=payload)\n                if metrics_res:\n                    label = {\n                        'resource_id': key_dict.get('portId'),\n                        'resource_name': 'RDFPort_' +\n                                         director_key_dict.get('directorId') +\n                                         '_' + key_dict.get('portId'),\n                        'resource_type': delfin_const.ResourceType.PORT,\n                        'metrics': metrics_res\n                    }\n                    metrics_list.append(label)\n\n        return metrics_list\n\n    def get_port_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a array performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n\n        be_metrics = self.get_beport_metrics(\n            array, metrics, start_time, end_time)\n        fe_metrics = self.get_feport_metrics(\n            array, metrics, start_time, end_time)\n        rdf_metrics = self.get_rdfport_metrics(\n            array, metrics, start_time, end_time)\n        return be_metrics, fe_metrics, rdf_metrics\n\n    def get_disk_metrics(self, array, metrics, start_time, end_time):\n        \"\"\"Get a disk performance metrics from VMAX unipshere REST API.\n        :param array: the array serial number\n        :param metrics: required metrics\n        :param start_time: start time for collection\n        :param end_time: end time for collection\n        :returns: message -- response from unipshere REST API\n         \"\"\"\n        disk_metrics = []\n        for k in metrics.keys():\n            vmax_key = constants.DISK_METRICS.get(k)\n            if vmax_key:\n                disk_metrics.append(vmax_key)\n\n        keys = self.get_resource_keys(array, 'Disk')\n        keys_dict = None\n        if keys:\n            keys_dict = keys.get('diskInfo', None)\n\n        metrics_list = []\n        for key_dict in keys_dict:\n            payload = {'diskId': key_dict.get('diskId')}\n            metrics_res = self.get_resource_metrics(\n                array, start_time, end_time, 'Disk',\n                disk_metrics, payload=payload)\n            if metrics_res:\n                label = {\n                    'resource_id': key_dict.get('diskId'),\n                    'resource_name': 'Disk_' + key_dict.get('diskId'),\n                    'resource_type': delfin_const.ResourceType.DISK,\n                    'metrics': metrics_res\n                }\n                metrics_list.append(label)\n\n        return metrics_list\n\n    def list_pagination(self, list_info):\n        \"\"\"Process lists under or over the maxPageSize\n        :param list_info: the object list information\n        :returns: the result list\n        \"\"\"\n        result_list = []\n        try:\n            result_list = list_info['resultList']['result']\n            iterator_id = list_info['id']\n            list_count = list_info['count']\n            max_page_size = list_info['maxPageSize']\n            start_position = list_info['resultList']['from']\n            end_position = list_info['resultList']['to']\n        except (KeyError, TypeError):\n            return list_info\n        if list_count > max_page_size:\n            LOG.info(\"More entries exist in the result list, retrieving \"\n                     \"remainder of results from iterator.\")\n\n            start_position = end_position + 1\n            if list_count < (end_position + max_page_size):\n                end_position = list_count\n            else:\n                end_position += max_page_size\n            iterator_response = self.get_iterator_page_list(\n                iterator_id, list_count, start_position, end_position,\n                max_page_size)\n\n            result_list += iterator_response\n        return result_list\n\n    def get_iterator_page_list(self, iterator_id, result_count, start_position,\n                               end_position, max_page_size):\n        \"\"\"Iterate through response if more than one page available.\n        :param iterator_id: the iterator ID\n        :param result_count: the amount of results in the iterator\n        :param start_position: position to begin iterator from\n        :param end_position: position to stop iterator\n        :param max_page_size: the max page size\n        :returns: list -- merged results from multiple pages\n        \"\"\"\n        iterator_result = []\n        has_more_entries = True\n\n        while has_more_entries:\n            if start_position <= result_count <= end_position:\n                end_position = result_count\n                has_more_entries = False\n\n            params = {'to': end_position, 'from': start_position}\n            target_uri = ('/common/Iterator/%(iterator_id)s/page' % {\n                'iterator_id': iterator_id})\n            iterator_response = self.get_request(target_uri, 'iterator',\n                                                 params)\n            try:\n                iterator_result += iterator_response['result']\n                start_position += max_page_size\n                end_position += max_page_size\n            except (KeyError, TypeError):\n                pass\n\n        return iterator_result\n\n    def get_alerts(self, query_para, array, version):\n        \"\"\"Get all alerts with given version and arrayid\n        :param query_para: Contains optional begin and end time\n        :param array: the array serial number\n        :param version: the unisphere version\n        :returns: alert_list -- dict or None\n        \"\"\"\n        target_uri = '/%s/system/symmetrix/%s/alert?acknowledged=false' \\\n                     % (version, array)\n\n        # First get list of all alert ids\n        alert_id_list = self.get_alert_request(target_uri)\n        if not alert_id_list:\n            # No current alert ids found\n            return []\n\n        # For each alert id, get details of alert\n        # Above list is prefixed with 'alertId'\n        alert_id_list = alert_id_list['alertId']\n        alert_list = []\n        for alert_id in alert_id_list:\n            target_uri = '/%s/system/symmetrix/%s/alert/%s' \\\n                         % (version, array, alert_id)\n            alert = self.get_alert_request(target_uri)\n            if alert is not None and alert_util.is_alert_in_time_range(\n                    query_para, alert['created_date_milliseconds']):\n                alert_list.append(alert)\n\n        return alert_list\n\n    def clear_alert(self, sequence_number, array, version):\n        \"\"\"Clears alert for given sequence number\n        :param sequence_number: unique id of the alert\n        :param array: the array serial number\n        :param version: the unisphere version\n        :returns: result -- success/failure\n        \"\"\"\n        target_uri = '/%s/system/symmetrix/%s/alert/%s' \\\n                     % (version, array, sequence_number)\n\n        status, message = self.request(target_uri, DELETE, params=None)\n        if status != STATUS_204:\n            raise exception.StorageClearAlertFailed(message)\n        return status\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vmax/vmax.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log\n\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.dell_emc.vmax import client\nfrom delfin.drivers.dell_emc.vmax import constants as consts\nfrom delfin.drivers.dell_emc.vmax.alert_handler import snmp_alerts\nfrom delfin.drivers.dell_emc.vmax.alert_handler import unisphere_alerts\n\nLOG = log.getLogger(__name__)\n\n\nclass VMAXStorageDriver(driver.StorageDriver):\n    \"\"\"VMAXStorageDriver implement the DELL EMC Storage driver,\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.client = client.VMAXClient(**kwargs)\n        self.client.init_connection(kwargs)\n        self.add_storage(kwargs)\n\n    def delete_storage(self, context):\n        self.client.array_id.pop(context.storage_id)\n\n    def add_storage(self, kwargs):\n        self.client.add_storage(kwargs)\n\n    def reset_connection(self, context, **kwargs):\n        self.client.reset_connection(**kwargs)\n\n    def get_storage(self, context):\n        storage_id = context.storage_id\n        # Get the VMAX model\n        array_details = self.client.get_array_details(storage_id)\n        model = array_details['model']\n        ucode = array_details['ucode']\n        display_name = array_details['display_name']\n\n        # Get Storage details for capacity info\n        total_capacity, used_capacity, free_capacity,\\\n            raw_capacity, subscribed_capacity = \\\n            self.client.get_storage_capacity(storage_id)\n\n        storage = {\n            # Unisphere Rest API do not provide Array name .\n            # Generate  name  by combining model and symmetrixId\n            'name': display_name,\n            'vendor': 'Dell EMC',\n            'description': '',\n            'model': model,\n            'firmware_version': ucode,\n            'status': constants.StorageStatus.NORMAL,\n            'serial_number': self.client.array_id[storage_id],\n            'location': '',\n            'total_capacity': total_capacity,\n            'used_capacity': used_capacity,\n            'free_capacity': free_capacity,\n            'raw_capacity': raw_capacity,\n            'subscribed_capacity': subscribed_capacity\n        }\n        LOG.info(\"get_storage(), successfully retrieved storage details\")\n        return storage\n\n    def list_storage_pools(self, context):\n        return self.client.list_storage_pools(context.storage_id)\n\n    def list_volumes(self, context):\n        return self.client.list_volumes(context.storage_id)\n\n    def list_controllers(self, context):\n        return self.client.list_controllers(context.storage_id)\n\n    def list_ports(self, context):\n        return self.client.list_ports(context.storage_id)\n\n    def list_disks(self, context):\n        return self.client.list_disks(context.storage_id)\n\n    def list_storage_host_initiators(self, context):\n        return self.client.list_storage_host_initiators(context.storage_id)\n\n    def list_storage_hosts(self, context):\n        return self.client.list_storage_hosts(context.storage_id)\n\n    def list_storage_host_groups(self, context):\n        return self.client.list_storage_host_groups(context.storage_id)\n\n    def list_port_groups(self, context):\n        return self.client.list_port_groups(context.storage_id)\n\n    def list_volume_groups(self, context):\n        return self.client.list_volume_groups(context.storage_id)\n\n    def list_masking_views(self, context):\n        return self.client.list_masking_views(context.storage_id)\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return snmp_alerts.AlertHandler().parse_alert(context, alert)\n\n    def clear_alert(self, context, sequence_number):\n        return self.client.clear_alert(context.storage_id, sequence_number)\n\n    def list_alerts(self, context, query_para):\n        # 1. CM generated snmp_alerts\n        # 2. SNMP Trap forwarder (specific 3rd IP)\n        alert_list = self.client.list_alerts(context.storage_id, query_para)\n        alert_model_list = unisphere_alerts.AlertHandler()\\\n            .parse_queried_alerts(alert_list)\n        return alert_model_list\n\n    def collect_perf_metrics(self, context, storage_id,\n                             resource_metrics, start_time,\n                             end_time):\n        metrics = []\n        try:\n            # storage metrics\n            if resource_metrics.get(constants.ResourceType.STORAGE):\n                storage_metrics = self.client.get_storage_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.STORAGE),\n                    start_time, end_time)\n                metrics.extend(storage_metrics)\n\n            # storage-pool metrics\n            if resource_metrics.get(constants.ResourceType.STORAGE_POOL):\n                pool_metrics = self.client.get_pool_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.STORAGE_POOL),\n                    start_time, end_time)\n                metrics.extend(pool_metrics)\n\n            # controller metrics\n            if resource_metrics.get(constants.ResourceType.CONTROLLER):\n                controller_metrics = self.client.get_controller_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.CONTROLLER),\n                    start_time, end_time)\n                metrics.extend(controller_metrics)\n\n            # port metrics\n            if resource_metrics.get(constants.ResourceType.PORT):\n                port_metrics = self.client.get_port_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.PORT),\n                    start_time, end_time)\n                metrics.extend(port_metrics)\n\n            # disk metrics\n            if resource_metrics.get(constants.ResourceType.DISK):\n                disk_metrics = self.client.get_disk_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.DISK),\n                    start_time, end_time)\n                metrics.extend(disk_metrics)\n\n        except Exception:\n            LOG.error(\"Failed to collect metrics from VMAX\")\n            raise\n\n        return metrics\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        \"\"\"Get capability of supported driver\"\"\"\n        return {\n            'is_historic': True,\n            'resource_metrics': {\n                constants.ResourceType.STORAGE: consts.STORAGE_CAP,\n                constants.ResourceType.STORAGE_POOL: consts.POOL_CAP,\n                constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP,\n                constants.ResourceType.PORT: consts.PORT_CAP,\n                constants.ResourceType.DISK: consts.DISK_CAP,\n            }\n        }\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vnx/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/dell_emc/vnx/vnx_block/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/dell_emc/vnx/vnx_block/alert_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hashlib\n\nfrom oslo_log import log as logging\n\nfrom delfin import exception, utils\nfrom delfin.common import constants\nfrom delfin.drivers.dell_emc.vnx.vnx_block import consts\nfrom delfin.i18n import _\n\nLOG = logging.getLogger(__name__)\n\n\nclass AlertHandler(object):\n\n    @staticmethod\n    def parse_alert(alert):\n        try:\n            alert_model = dict()\n            alert_model['alert_id'] = AlertHandler.check_event_code(\n                alert.get(consts.OID_MESSAGECODE))\n            alert_model['alert_name'] = alert.get(consts.OID_DETAILS)\n            alert_model['severity'] = consts.TRAP_LEVEL_MAP.get(\n                alert.get(consts.OID_SEVERITY),\n                constants.Severity.INFORMATIONAL)\n            alert_model['category'] = constants.Category.FAULT\n            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n            alert_model['occur_time'] = utils.utcnow_ms()\n            alert_model['description'] = alert.get(consts.OID_DETAILS)\n            alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n            alert_model['match_key'] = hashlib.md5(\n                alert.get(consts.OID_DETAILS, '').encode()).hexdigest()\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (_(\"Failed to build alert model as some attributes missing \"\n                     \"in alert message.\"))\n            raise exception.InvalidResults(msg)\n\n    @staticmethod\n    def check_event_code(event_code):\n        if '0x' not in event_code:\n            event_code = '0x%s' % event_code\n        return event_code\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vnx/vnx_block/component_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport csv\nimport os\nimport re\nimport time\n\nimport six\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers.dell_emc.vnx.vnx_block import consts\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = log.getLogger(__name__)\n\n\nclass ComponentHandler(object):\n\n    def __init__(self, navi_handler):\n        self.navi_handler = navi_handler\n\n    def get_storage(self):\n        domain = self.navi_handler.get_domain()\n        agent = self.navi_handler.get_agent()\n        status = constants.StorageStatus.NORMAL\n        raw_cap = self.handle_disk_capacity()\n        pool_capacity = self.handle_pool_capacity()\n        if domain and agent and pool_capacity:\n            result = {\n                'name': domain[0].get('node'),\n                'vendor': consts.EMCVNX_VENDOR,\n                'model': agent.get('model'),\n                'status': status,\n                'serial_number': agent.get('serial_no'),\n                'firmware_version': agent.get('revision'),\n                'total_capacity': pool_capacity.get('total_capacity'),\n                'raw_capacity': int(raw_cap),\n                'used_capacity': pool_capacity.get('used_capacity'),\n                'free_capacity': pool_capacity.get('free_capacity')\n            }\n        else:\n            err_msg = \"Get vnx storage info failed, domain: %s, agent: %s,\" \\\n                      \" pool_capacity: %s\" % (six.text_type(domain),\n                                              six.text_type(agent),\n                                              six.text_type(pool_capacity))\n            LOG.error(err_msg)\n            raise exception.StorageBackendException(err_msg)\n        return result\n\n    def list_storage_pools(self, storage_id):\n        pools = self.navi_handler.get_pools()\n        pool_list = []\n        if pools:\n            for pool in pools:\n                if pool.get('pool_name') is not None:\n                    status = consts.STATUS_MAP.get(\n                        pool.get('state'),\n                        constants.StoragePoolStatus.OFFLINE)\n                    used_cap = float(\n                        pool.get(\"consumed_capacity_gbs\")) * units.Gi\n                    free_cap = float(\n                        pool.get(\"available_capacity_gbs\")) * units.Gi\n                    total_cap = float(\n                        pool.get(\"user_capacity_gbs\")) * units.Gi\n                    subscribed_cap = float(pool.get(\n                        \"total_subscribed_capacity_gbs\")) * units.Gi\n                    p = {\n                        'name': pool.get('pool_name'),\n                        'storage_id': storage_id,\n                        'native_storage_pool_id': str(pool.get('pool_id')),\n                        'description': pool.get('description'),\n                        'status': status,\n                        'storage_type': constants.StorageType.BLOCK,\n                        'total_capacity': int(total_cap),\n                        'subscribed_capacity': int(subscribed_cap),\n                        'used_capacity': int(used_cap),\n                        'free_capacity': int(free_cap)\n                    }\n                    pool_list.append(p)\n        raid_groups = self.handle_raid_groups(storage_id)\n        if raid_groups:\n            pool_list.extend(raid_groups)\n        return pool_list\n\n    def handle_raid_groups(self, storage_id):\n        raid_groups = self.navi_handler.get_raid_group()\n        raid_list = []\n        if raid_groups:\n            for raid in raid_groups:\n                if raid.get('raidgroup_id') is not None:\n                    status = consts.STATUS_MAP.get(\n                        raid.get('raidgroup_state'),\n                        constants.StoragePoolStatus.OFFLINE)\n                    free_cap = float(raid.get(\n                        \"free_capacity_blocks,non-contiguous\"))\n                    total_cap = float(\n                        raid.get(\"logical_capacity_blocks\"))\n                    used_cap = total_cap - free_cap\n                    p = {\n                        'name': 'RAID Group %s' % raid.get('raidgroup_id'),\n                        'storage_id': storage_id,\n                        'native_storage_pool_id': '%s%s' % (\n                            consts.RAID_GROUP_ID_PREFIX,\n                            raid.get('raidgroup_id')),\n                        'status': status,\n                        'storage_type': constants.StorageType.BLOCK,\n                        'total_capacity': int(total_cap * (units.Ki / 2)),\n                        'used_capacity': int(used_cap * (units.Ki / 2)),\n                        'free_capacity': int(free_cap * (units.Ki / 2))\n                    }\n                    raid_list.append(p)\n        return raid_list\n\n    def handle_volume_from_pool(self, volumes, pool_ids, storage_id):\n        volume_list = []\n        if volumes:\n            for volume in volumes:\n                if volume.get('name') is not None:\n                    status = consts.STATUS_MAP.get(\n                        volume.get('current_state'),\n                        constants.StoragePoolStatus.OFFLINE)\n                    orig_pool_name = volume.get('pool_name')\n                    vol_type = consts.VOL_TYPE_MAP.get(\n                        volume.get('is_thin_lun').lower())\n                    volume_used_cap_str = volume.get('consumed_capacity_gbs')\n                    used_cap = 0\n                    if volume_used_cap_str and volume_used_cap_str != 'N/A':\n                        used_cap = float(volume_used_cap_str) * units.Gi\n                    total_cap = float(\n                        volume.get('user_capacity_gbs')) * units.Gi\n                    free_cap = total_cap - used_cap\n                    if free_cap < 0:\n                        free_cap = 0\n                    v = {\n                        'name': volume.get('name'),\n                        'storage_id': storage_id,\n                        'status': status,\n                        'native_volume_id': str(volume.get('lun_id')),\n                        'native_storage_pool_id': pool_ids.get(orig_pool_name,\n                                                               ''),\n                        'type': vol_type,\n                        'total_capacity': int(total_cap),\n                        'used_capacity': int(used_cap),\n                        'free_capacity': int(free_cap),\n                        'compressed': consts.VOL_COMPRESSED_MAP.get(\n                            volume.get('is_compressed').lower()),\n                        'wwn': volume.get('uid')\n                    }\n                    volume_list.append(v)\n        return volume_list\n\n    def handle_volume_from_raid_group(self, storage_id):\n        volume_list = []\n        volumes = self.navi_handler.get_all_lun()\n        if volumes:\n            for volume in volumes:\n                if volume.get('raidgroup_id') and (\n                        volume.get('raidgroup_id') != 'N/A' or volume.get(\n                        'is_meta_lun') == 'YES'):\n                    pool_id = None\n                    if volume.get('raidgroup_id') != 'N/A':\n                        pool_id = '%s%s' % (consts.RAID_GROUP_ID_PREFIX,\n                                            volume.get('raidgroup_id'))\n                    status = consts.STATUS_MAP.get(\n                        volume.get('state'),\n                        constants.StoragePoolStatus.OFFLINE)\n                    vol_type = consts.VOL_TYPE_MAP.get(\n                        volume.get('is_thin_lun').lower())\n                    total_cap = float(\n                        volume.get('lun_capacitymegabytes')) * units.Mi\n                    used_cap = total_cap\n                    free_cap = 0\n                    v = {\n                        'name': volume.get('name'),\n                        'storage_id': storage_id,\n                        'status': status,\n                        'native_volume_id': str(\n                            volume.get('logical_unit_number')),\n                        'native_storage_pool_id': pool_id,\n                        'type': vol_type,\n                        'total_capacity': int(total_cap),\n                        'used_capacity': int(used_cap),\n                        'free_capacity': int(free_cap),\n                        'wwn': volume.get('uid')\n                    }\n                    volume_list.append(v)\n        return volume_list\n\n    def list_volumes(self, storage_id):\n        volumes = self.navi_handler.get_pool_lun()\n        pools = self.navi_handler.get_pools()\n        pool_ids = {}\n        if pools:\n            for pool in pools:\n                if pool.get('pool_name') is not None:\n                    pool_ids[pool.get('pool_name')] = pool.get('pool_id')\n        volume_list = self.handle_volume_from_pool(volumes, pool_ids,\n                                                   storage_id)\n        raid_volumes = self.handle_volume_from_raid_group(storage_id)\n        if raid_volumes:\n            volume_list.extend(raid_volumes)\n        return volume_list\n\n    def handle_disk_capacity(self):\n        disks = self.navi_handler.get_disks()\n        raw_capacity = 0\n        if disks:\n            for disk in disks:\n                if disk.get('disk_id') is not None:\n                    capacity = float(disk.get(\"capacity\", 0))\n                    raw_capacity += capacity\n        return raw_capacity * units.Mi\n\n    def handle_pool_capacity(self):\n        pools = self.list_storage_pools(None)\n        total_capacity = 0\n        free_capacity = 0\n        used_capacity = 0\n        obj_model = None\n        if pools:\n            for pool in pools:\n                total_capacity += pool.get(\"total_capacity\")\n                free_capacity += pool.get(\"free_capacity\")\n                used_capacity += pool.get(\"used_capacity\")\n            obj_model = {\n                'total_capacity': total_capacity,\n                'free_capacity': free_capacity,\n                'used_capacity': used_capacity\n            }\n        return obj_model\n\n    def list_disks(self, storage_id):\n        disks = self.navi_handler.get_disks()\n        disk_list = []\n        for disk in (disks or []):\n            if disk.get('disk_id'):\n                status = consts.DISK_STATUS_MAP.get(\n                    disk.get('state', '').upper(),\n                    constants.DiskStatus.ABNORMAL)\n                capacity = int(float(disk.get(\"capacity\", 0)) * units.Mi)\n                logical_type = constants.DiskLogicalType.UNKNOWN\n                hot_spare = disk.get('hot_spare', '')\n                if hot_spare and hot_spare != 'N/A':\n                    logical_type = constants.DiskLogicalType.HOTSPARE\n                disk_name = disk.get('disk_name')\n                disk_name = ' '.join(disk_name.strip().split())\n                disk_model = {\n                    'name': disk_name,\n                    'storage_id': storage_id,\n                    'native_disk_id': disk.get('disk_id'),\n                    'serial_number': disk.get('serial_number'),\n                    'manufacturer': disk.get('vendor_id'),\n                    'model': disk.get('product_id'),\n                    'firmware': disk.get('product_revision'),\n                    'speed': None,\n                    'capacity': capacity,\n                    'status': status,\n                    'physical_type': consts.DISK_PHYSICAL_TYPE_MAP.get(\n                        disk.get('drive_type', '').upper(),\n                        constants.DiskPhysicalType.UNKNOWN),\n                    'logical_type': logical_type,\n                    'health_score': None,\n                    'native_disk_group_id': None,\n                    'location': disk_name\n                }\n                disk_list.append(disk_model)\n        return disk_list\n\n    def analyse_speed(self, speed_value):\n        speed = 0\n        try:\n            speeds = re.findall(\"\\\\d+\", speed_value)\n            if speeds:\n                speed = int(speeds[0])\n            if 'Gbps' in speed_value:\n                speed = speed * units.G\n            elif 'Mbps' in speed_value:\n                speed = speed * units.M\n            elif 'Kbps' in speed_value:\n                speed = speed * units.k\n        except Exception as err:\n            err_msg = \"analyse speed error: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n        return speed\n\n    def list_controllers(self, storage_id):\n        controllers = self.navi_handler.get_controllers()\n        cpus = self.navi_handler.get_cpus()\n        controller_list = []\n        for controller in (controllers or []):\n            memory_size = int(controller.get('memory_size_for_the_sp',\n                                             '0')) * units.Mi\n            cpu_info = ''\n            cpu_count = None\n            if cpus:\n                cpu_info = cpus.get(\n                    controller.get('serial_number_for_the_sp', ''), '')\n            if cpu_info:\n                cpu_count = 1\n            controller_model = {\n                'name': controller.get('sp_name'),\n                'storage_id': storage_id,\n                'native_controller_id': controller.get('signature_for_the_sp'),\n                'status': constants.ControllerStatus.NORMAL,\n                'location': None,\n                'soft_version': controller.get('revision_number_for_the_sp'),\n                'cpu_info': cpu_info,\n                'cpu_count': cpu_count,\n                'memory_size': str(memory_size)\n            }\n            controller_list.append(controller_model)\n        return controller_list\n\n    def list_ports(self, storage_id):\n        port_list = []\n        io_configs = self.navi_handler.get_io_configs()\n        iscsi_port_map = self.get_iscsi_ports()\n        ports = self.get_ports(storage_id, io_configs, iscsi_port_map)\n        port_list.extend(ports)\n        bus_ports = self.get_bus_ports(storage_id, io_configs)\n        port_list.extend(bus_ports)\n        return port_list\n\n    def get_ports(self, storage_id, io_configs, iscsi_port_map):\n        ports = self.navi_handler.get_ports()\n        port_list = []\n        for port in (ports or []):\n            port_id = port.get('sp_port_id')\n            sp_name = port.get('sp_name').replace('SP ', '')\n            name = '%s-%s' % (sp_name, port_id)\n            location = 'Slot %s%s,Port %s' % (\n                sp_name, port.get('i/o_module_slot'),\n                port.get('physical_port_id'))\n            mac_address = port.get('mac_address')\n            if mac_address == 'Not Applicable':\n                mac_address = None\n            module_key = '%s_%s' % (\n                sp_name, port.get('i/o_module_slot'))\n            type = ''\n            if io_configs:\n                type = io_configs.get(module_key, '')\n\n            ipv4 = None\n            ipv4_mask = None\n            if iscsi_port_map:\n                iscsi_port = iscsi_port_map.get(name)\n                if iscsi_port:\n                    ipv4 = iscsi_port.get('ip_address')\n                    ipv4_mask = iscsi_port.get('subnet_mask')\n            port_model = {\n                'name': location,\n                'storage_id': storage_id,\n                'native_port_id': name,\n                'location': location,\n                'connection_status':\n                    consts.PORT_CONNECTION_STATUS_MAP.get(\n                        port.get('link_status', '').upper(),\n                        constants.PortConnectionStatus.UNKNOWN),\n                'health_status': consts.PORT_HEALTH_STATUS_MAP.get(\n                    port.get('port_status', '').upper(),\n                    constants.PortHealthStatus.UNKNOWN),\n                'type': consts.PORT_TYPE_MAP.get(\n                    type.upper(), constants.PortType.OTHER),\n                'logical_type': None,\n                'speed': self.analyse_speed(\n                    port.get('speed_value', '')),\n                'max_speed': self.analyse_speed(\n                    port.get('max_speed', '')),\n                'native_parent_id': None,\n                'wwn': port.get('sp_uid'),\n                'mac_address': mac_address,\n                'ipv4': ipv4,\n                'ipv4_mask': ipv4_mask,\n                'ipv6': None,\n                'ipv6_mask': None,\n            }\n            port_list.append(port_model)\n        return port_list\n\n    def get_bus_ports(self, storage_id, io_configs):\n        bus_ports = self.navi_handler.get_bus_ports()\n        port_list = []\n        if bus_ports:\n            bus_port_state_map = self.navi_handler.get_bus_port_state()\n            for bus_port in bus_ports:\n                sps = bus_port.get('sps')\n                for sp in (sps or []):\n                    sp_name = sp.replace('sp', '').upper()\n                    location = '%s %s,Port %s' % (\n                        bus_port.get('i/o_module_slot'), sp_name,\n                        bus_port.get('physical_port_id'))\n                    native_port_id = location.replace(' ', '')\n                    native_port_id = native_port_id.replace(',', '')\n                    module_key = '%s_%s' % (\n                        sp_name, bus_port.get('i/o_module_slot'))\n                    type = ''\n                    if io_configs:\n                        type = io_configs.get(module_key, '')\n                    state = ''\n                    if bus_port_state_map:\n                        port_state_key = '%s_%s' % (\n                            sp_name, bus_port.get('physical_port_id'))\n                        state = bus_port_state_map.get(port_state_key,\n                                                       '')\n                    port_model = {\n                        'name': location,\n                        'storage_id': storage_id,\n                        'native_port_id': native_port_id,\n                        'location': location,\n                        'connection_status':\n                            constants.PortConnectionStatus.UNKNOWN,\n                        'health_status':\n                            consts.PORT_HEALTH_STATUS_MAP.get(\n                                state.upper(),\n                                constants.PortHealthStatus.UNKNOWN),\n                        'type': consts.PORT_TYPE_MAP.get(\n                            type.upper(), constants.PortType.OTHER),\n                        'logical_type': None,\n                        'speed': self.analyse_speed(\n                            bus_port.get('current_speed', '')),\n                        'max_speed': self.analyse_speed(\n                            bus_port.get('max_speed', '')),\n                        'native_parent_id': None,\n                        'wwn': None,\n                        'mac_address': None,\n                        'ipv4': None,\n                        'ipv4_mask': None,\n                        'ipv6': None,\n                        'ipv6_mask': None,\n                    }\n                    port_list.append(port_model)\n        return port_list\n\n    def get_iscsi_ports(self):\n        iscsi_port_map = {}\n        iscsi_ports = self.navi_handler.get_iscsi_ports()\n        for iscsi_port in (iscsi_ports or []):\n            name = '%s-%s' % (iscsi_port.get('sp'), iscsi_port.get('port_id'))\n            iscsi_port_map[name] = iscsi_port\n        return iscsi_port_map\n\n    def list_masking_views(self, storage_id):\n        views = self.navi_handler.list_masking_views()\n        views_list = []\n        host_vv_set = set()\n        if views:\n            for view in views:\n                name = view.get('storage_group_name')\n                host_names = view.get('host_names')\n                lun_ids = view.get('lun_ids')\n                if name:\n                    if name == '~physical' or name == '~management':\n                        continue\n                    view_model_template = {\n                        'native_masking_view_id': view.get(\n                            'storage_group_uid'),\n                        \"name\": view.get('storage_group_name'),\n                        \"storage_id\": storage_id\n                    }\n                    if host_names and lun_ids:\n                        host_names = list(set(host_names))\n                        for host_name in host_names:\n                            host_id = host_name.replace(' ', '')\n                            for lun_id in lun_ids:\n                                host_vv_key = '%s_%s' % (host_id, lun_id)\n                                if host_vv_key in host_vv_set:\n                                    continue\n                                host_vv_set.add(host_vv_key)\n                                view_model = copy.deepcopy(view_model_template)\n                                view_model[\n                                    'native_storage_host_id'] = host_id\n                                view_model['native_volume_id'] = lun_id\n                                view_model[\n                                    'native_masking_view_id'] = '%s_%s_%s' % (\n                                    view_model.get('native_masking_view_id'),\n                                    host_id, lun_id)\n                                views_list.append(view_model)\n        return views_list\n\n    def list_storage_host_initiators(self, storage_id):\n        initiators = self.navi_handler.list_hbas()\n        initiators_list = []\n        initiator_set = set()\n        port_types = {}\n        if initiators:\n            ports = self.list_ports(storage_id)\n            for port in (ports or []):\n                if port and port.get('type'):\n                    port_types[port.get('name')] = port.get('type')\n            for initiator in (initiators or []):\n                if initiator and initiator.get('hba_uid'):\n                    hba_uid = initiator.get('hba_uid')\n                    type = ''\n                    if port_types:\n                        ports = initiator.get('port_ids')\n                        if ports:\n                            port_id = list(ports)[0]\n                            type = port_types.get(port_id, '')\n                    host_id = initiator.get('server_name', '').replace(' ', '')\n                    if host_id == hba_uid:\n                        host_id = None\n                    if not host_id:\n                        continue\n                    if hba_uid in initiator_set:\n                        continue\n                    initiator_set.add(hba_uid)\n\n                    initiator_model = {\n                        \"name\": hba_uid,\n                        \"storage_id\": storage_id,\n                        \"native_storage_host_initiator_id\": hba_uid,\n                        \"wwn\": hba_uid,\n                        \"type\": consts.INITIATOR_TYPE_MAP.get(\n                            type.upper(), constants.InitiatorType.UNKNOWN),\n                        \"status\": constants.InitiatorStatus.ONLINE,\n                        \"native_storage_host_id\": host_id\n                    }\n                    initiators_list.append(initiator_model)\n        return initiators_list\n\n    def list_storage_hosts(self, storage_id):\n        hosts = self.navi_handler.list_hbas()\n        host_list = []\n        host_ids = set()\n        host_ips = {}\n        for host in (hosts or []):\n            if host and host.get('server_name'):\n                os_type = constants.HostOSTypes.UNKNOWN\n                os_name = host.get('hba_vendor_description')\n                ip_addr = host.get('server_ip_address')\n                if ip_addr == 'UNKNOWN':\n                    continue\n                if os_name and 'VMware ESXi' in os_name:\n                    os_type = constants.HostOSTypes.VMWARE_ESX\n                id = host.get('server_name').replace(' ', '')\n                if id in host_ids:\n                    continue\n                host_ids.add(id)\n\n                if ip_addr in host_ips.keys():\n                    first_port_ids = host_ips.get(ip_addr)\n                    cur_port_ids = host.get('port_ids')\n                    add_host = False\n                    intersections = list(\n                        set(first_port_ids).intersection(set(cur_port_ids)))\n                    if not intersections:\n                        add_host = True\n                    if not add_host:\n                        continue\n                host_ips[ip_addr] = host.get('port_ids')\n\n                host_model = {\n                    \"name\": host.get('server_name'),\n                    \"storage_id\": storage_id,\n                    \"native_storage_host_id\": id,\n                    \"os_type\": os_type,\n                    \"status\": constants.HostStatus.NORMAL,\n                    \"ip_address\": ip_addr\n                }\n                host_list.append(host_model)\n        return host_list\n\n    def collect_perf_metrics(self, storage_id, resource_metrics,\n                             start_time, end_time):\n        metrics = []\n        archive_file_list = []\n        try:\n            LOG.info(\"Start collection, storage:%s, start time:%s, end time:%s\"\n                     % (storage_id, start_time, end_time))\n            archive_file_list = self._get__archive_file(start_time, end_time)\n            LOG.info(\"Get archive files: {}\".format(archive_file_list))\n            if not archive_file_list:\n                LOG.warning(\"The required performance file was not found!\")\n                return metrics\n            resources_map, resources_type_map = self._get_resources_map(\n                resource_metrics)\n            if not resources_map or not resources_type_map:\n                LOG.warning(\"Resource object not found!\")\n                return metrics\n            performance_lines_map = self._filter_performance_data(\n                archive_file_list, resources_map, start_time, end_time)\n            if not performance_lines_map:\n                LOG.warning(\"The required performance data was not found!\")\n                return metrics\n            metrics = self.create_metrics(storage_id, resource_metrics,\n                                          resources_map, resources_type_map,\n                                          performance_lines_map)\n            LOG.info(\"Collection complete, storage:%s, start time:%s, \"\n                     \"end time:%s, length of metrics:%s \"\n                     % (storage_id, start_time, end_time, len(metrics)))\n        except exception.DelfinException as err:\n            err_msg = \"Failed to collect metrics from VnxBlockStor: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise err\n        except Exception as err:\n            err_msg = \"Failed to collect metrics from VnxBlockStor: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        finally:\n            self._remove_archive_file(archive_file_list)\n        return metrics\n\n    def create_metrics(self, storage_id, resource_metrics, resources_map,\n                       resources_type_map, performance_lines_map):\n        metrics = []\n        for resource_obj, resource_type in resources_type_map.items():\n            if not resources_map.get(resource_obj) \\\n                    or not resource_type:\n                continue\n            if not performance_lines_map.get(resource_obj):\n                continue\n            labels = {\n                'storage_id': storage_id,\n                'resource_type': resource_type,\n                'resource_id': resources_map.get(resource_obj),\n                'type': 'RAW',\n                'unit': ''\n            }\n            metric_model_list = self._get_metric_model(\n                resource_metrics.get(resource_type), labels,\n                performance_lines_map.get(resource_obj),\n                consts.RESOURCES_TYPE_TO_METRIC_CAP.get(resource_type),\n                resource_type)\n            if metric_model_list:\n                metrics.extend(metric_model_list)\n        return metrics\n\n    def _get__archive_file(self, start_time, end_time):\n        archive_file_list = []\n        archives = self.navi_handler.get_archives()\n        tools = Tools()\n        for archive_info in (archives or []):\n            collection_timestamp = tools.time_str_to_timestamp(\n                archive_info.get('collection_time'), consts.TIME_PATTERN)\n            if collection_timestamp > start_time:\n                archive_file_list.append(archive_info.get('archive_name'))\n            if collection_timestamp > end_time:\n                break\n        return archive_file_list\n\n    def _get_metric_model(self, metric_list, labels, metric_values, obj_cap,\n                          resources_type):\n        metric_model_list = []\n        tools = Tools()\n        for metric_name in (metric_list or []):\n            values = {}\n            obj_labels = copy.copy(labels)\n            obj_labels['unit'] = obj_cap.get(metric_name).get('unit')\n            for metric_value in metric_values:\n                metric_value_infos = metric_value\n                if not consts.METRIC_MAP.get(resources_type, {}).get(\n                        metric_name):\n                    continue\n                value = metric_value_infos[\n                    consts.METRIC_MAP.get(resources_type).get(metric_name)]\n                if not value:\n                    value = '0'\n                collection_timestamp = tools.time_str_to_timestamp(\n                    metric_value_infos[1], consts.TIME_PATTERN)\n                collection_time_str = tools.timestamp_to_time_str(\n                    collection_timestamp, consts.COLLECTION_TIME_PATTERN)\n                collection_timestamp = tools.time_str_to_timestamp(\n                    collection_time_str, consts.COLLECTION_TIME_PATTERN)\n                if \"iops\" == obj_cap.get(metric_name).get('unit').lower():\n                    value = int(float(value))\n                else:\n                    value = float('%.6f' % (float(value)))\n                values[collection_timestamp] = value\n            if values:\n                metric_model = constants.metric_struct(name=metric_name,\n                                                       labels=obj_labels,\n                                                       values=values)\n                metric_model_list.append(metric_model)\n        return metric_model_list\n\n    def _get_resources_map(self, resource_metrics):\n        resources_map = {}\n        resources_type_map = {}\n        for resource_type_key in resource_metrics.keys():\n            sub_resources_map = {}\n            sub_resources_type_map = {}\n            if resource_type_key == constants.ResourceType.CONTROLLER:\n                sub_resources_map, sub_resources_type_map = \\\n                    self._get_controllers_map()\n            elif resource_type_key == constants.ResourceType.PORT:\n                sub_resources_map, sub_resources_type_map = \\\n                    self._get_ports_map()\n            elif resource_type_key == constants.ResourceType.DISK:\n                sub_resources_map, sub_resources_type_map = \\\n                    self._get_disks_map()\n            elif resource_type_key == constants.ResourceType.VOLUME:\n                sub_resources_map, sub_resources_type_map = \\\n                    self._get_volumes_map()\n            if sub_resources_map and sub_resources_type_map:\n                resources_map.update(sub_resources_map)\n                resources_type_map.update(sub_resources_type_map)\n        return resources_map, resources_type_map\n\n    def _get_controllers_map(self):\n        resources_map = {}\n        resources_type_map = {}\n        controllers = self.navi_handler.get_controllers()\n        for controller in (controllers or []):\n            resources_map[controller.get('sp_name')] = controller.get(\n                'signature_for_the_sp')\n            resources_type_map[controller.get('sp_name')] = \\\n                constants.ResourceType.CONTROLLER\n        return resources_map, resources_type_map\n\n    def _get_ports_map(self):\n        resources_map = {}\n        resources_type_map = {}\n        ports = self.navi_handler.get_ports()\n        for port in (ports or []):\n            port_id = port.get('sp_port_id')\n            sp_name = port.get('sp_name').replace('SP ', '')\n            name = '%s-%s' % (sp_name, port_id)\n            port_id = 'Port %s [ %s ]' % (port_id, port.get('sp_uid'))\n            resources_map[port_id] = name\n            resources_type_map[port_id] = constants.ResourceType.PORT\n        return resources_map, resources_type_map\n\n    def _get_disks_map(self):\n        resources_map = {}\n        resources_type_map = {}\n        disks = self.navi_handler.get_disks()\n        for disk in (disks or []):\n            disk_name = disk.get('disk_name')\n            disk_name = ' '.join(disk_name.strip().split())\n            resources_map[disk_name] = disk.get('disk_id')\n            resources_type_map[disk_name] = constants.ResourceType.DISK\n        return resources_map, resources_type_map\n\n    def _get_volumes_map(self):\n        resources_map = {}\n        resources_type_map = {}\n        volumes = self.navi_handler.get_all_lun()\n        for volume in (volumes or []):\n            if not volume.get('name'):\n                continue\n            volume_name = '%s [%s]' % (\n                volume.get('name'), volume.get('logical_unit_number'))\n            resources_map[volume_name] = str(volume.get('logical_unit_number'))\n            resources_type_map[volume_name] = constants.ResourceType.VOLUME\n        return resources_map, resources_type_map\n\n    def _filter_performance_data(self, archive_file_list, resources_map,\n                                 start_time, end_time):\n        performance_lines_map = {}\n        try:\n            tools = Tools()\n            for archive_file in archive_file_list:\n                self.navi_handler.download_archives(archive_file)\n                archive_name_infos = archive_file.split('.')\n                file_path = '%s%s.csv' % (\n                    self.navi_handler.get_local_file_path(),\n                    archive_name_infos[0])\n                with open(file_path) as file:\n                    f_csv = csv.reader(file)\n                    next(f_csv)\n                    for row in f_csv:\n                        self._package_performance_data(row, resources_map,\n                                                       start_time, end_time,\n                                                       tools,\n                                                       performance_lines_map)\n        except Exception as err:\n            err_msg = \"Failed to filter performance data: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.StorageBackendException(err_msg)\n        return performance_lines_map\n\n    def _package_performance_data(self, row, resources_map, start_time,\n                                  end_time, tools, performance_lines_map):\n        resource_obj_name = row[0]\n        resource_obj_name = self._package_resource_obj_name(resource_obj_name)\n        if resource_obj_name in resources_map:\n            obj_collection_timestamp = tools.time_str_to_timestamp(\n                row[1], consts.TIME_PATTERN)\n            if (start_time + consts.TIME_INTERVAL_FLUCTUATION) \\\n                    <= obj_collection_timestamp \\\n                    and obj_collection_timestamp \\\n                    <= (end_time + consts.TIME_INTERVAL_FLUCTUATION):\n                performance_lines_map.setdefault(resource_obj_name, []).append(\n                    row)\n\n    def _package_resource_obj_name(self, source_name):\n        target_name = source_name\n        if 'Port ' in target_name:\n            return re.sub(r'(\\[.*;)', '[', target_name)\n        elif '; ' in target_name:\n            return re.sub(r'(; .*])', ']', target_name)\n        return target_name\n\n    def _remove_archive_file(self, archive_file_list):\n        try:\n            for archive_file in archive_file_list:\n                nar_file_path = '%s%s' % (\n                    self.navi_handler.get_local_file_path(), archive_file)\n                archive_name_infos = archive_file.split('.')\n                csv_file_path = '%s%s.csv' % (\n                    self.navi_handler.get_local_file_path(),\n                    archive_name_infos[0])\n                for file_path in [nar_file_path, csv_file_path]:\n                    LOG.info(\"Delete file :{}\".format(file_path))\n                    if os.path.exists(file_path):\n                        os.remove(file_path)\n                    else:\n                        err_msg = 'no such file:%s' % file_path\n                        LOG.error(err_msg)\n                        raise exception.StorageBackendException(err_msg)\n        except Exception as err:\n            err_msg = \"Failed to remove archive file: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.StorageBackendException(err_msg)\n\n    def get_latest_perf_timestamp(self, storage_id):\n        latest_time = 0\n        num = 0\n        tools = Tools()\n        while latest_time <= 0:\n            num += 1\n            latest_time, file_latest_time = self.check_latest_timestamp(\n                storage_id)\n            if num > consts.EXEC_MAX_NUM:\n                latest_time = file_latest_time\n                LOG.warning(\"Storage:{}, Exit after {} executions.\".format(\n                    storage_id, consts.EXEC_MAX_NUM))\n                break\n            if latest_time <= 0:\n                wait_time = tools.timestamp_to_time_str(\n                    time.time() * units.k,\n                    consts.ARCHIVE_FILE_NAME_TIME_PATTERN)\n                LOG.warning(\"Storage:{} No new file found, \"\n                            \"wait for next execution:{}\".format(storage_id,\n                                                                wait_time))\n                time.sleep(consts.SLEEP_TIME_SECONDS)\n        return latest_time\n\n    def get_data_latest_timestamp(self, storage_id):\n        archive_file_list = []\n        try:\n            tools = Tools()\n            archive_name = self.navi_handler.create_archives(storage_id)\n            LOG.info(\"Create archive_name: {}\".format(archive_name))\n            archive_file_list.append(archive_name)\n            archive_name_infos = archive_name.split('.')\n            file_path = '%s%s.csv' % (\n                self.navi_handler.get_local_file_path(), archive_name_infos[0])\n            resource_obj_name = ''\n            collection_time = ''\n            with open(file_path) as file:\n                f_csv = csv.reader(file)\n                next(f_csv)\n                for row in f_csv:\n                    if not resource_obj_name or resource_obj_name == row[0]:\n                        resource_obj_name = row[0]\n                        collection_time = row[1]\n                    else:\n                        break\n                latest_time = tools.time_str_to_timestamp(collection_time,\n                                                          consts.TIME_PATTERN)\n        except Exception as err:\n            err_msg = \"Failed to get latest perf timestamp \" \\\n                      \"from VnxBlockStor: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        finally:\n            self._remove_archive_file(archive_file_list)\n        return latest_time\n\n    def check_latest_timestamp(self, storage_id):\n        latest_time = 0\n        file_latest_time = self.get_data_latest_timestamp(storage_id)\n        sys_time = self.navi_handler.get_sp_time()\n        LOG.info(\"Get sys_time=={},file_latest_time=={}\".format(\n            sys_time, file_latest_time))\n        if sys_time > 0 and file_latest_time > 0:\n            LOG.info(\"(sys_time - file_latest_time)={}\".format(\n                (sys_time - file_latest_time)))\n            if (sys_time - file_latest_time) < \\\n                    consts.CREATE_FILE_TIME_INTERVAL:\n                latest_time = file_latest_time\n                time.sleep(consts.CHECK_WAITE_TIME_SECONDS)\n        return latest_time, file_latest_time\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vnx/vnx_block/consts.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.common.constants import ControllerMetric, DiskMetric, PortMetric, \\\n    VolumeMetric\n\nSOCKET_TIMEOUT = 30\nLOGIN_SOCKET_TIMEOUT = 10\nCER_ERR = 'Unable to validate the identity of the server'\nCALLER_ERR = 'Caller not privileged'\nSECURITY_ERR = 'Security file not found'\nTRYING_CONNECT_ERR = 'error occurred while trying to connect'\nCONNECTION_ERR = 'connection refused'\nINVALID_ERR = 'invalid username, password and/or scope'\nNOT_SUPPORTED_ERR = 'CLI commands are not supported by the target storage' \\\n                    ' system'\nEXCEPTION_MAP = {CER_ERR: exception.SSLCertificateFailed,\n                 CALLER_ERR: exception.InvalidUsernameOrPassword,\n                 SECURITY_ERR: exception.InvalidUsernameOrPassword,\n                 TRYING_CONNECT_ERR: exception.InvalidIpOrPort,\n                 CONNECTION_ERR: exception.InvalidIpOrPort,\n                 INVALID_ERR: exception.InvalidUsernameOrPassword,\n                 NOT_SUPPORTED_ERR: exception.StorageBackendException}\nCER_STORE = '2'\nCER_REJECT = '3'\nDISK_ID_KEY = 'Bus 0 Enclosure 0  Disk'\nLUN_ID_KEY = 'LOGICAL UNIT NUMBER'\nLUN_NAME_KEY = 'Name                        '\nCER_SEPARATE_KEY = '-----------------------------'\nTIME_PATTERN = '%m/%d/%Y %H:%M:%S'\nDATE_PATTERN = '%m/%d/%Y'\nONE_DAY_SCE = 24 * 60 * 60\nLOG_FILTER_PATTERN = '\\\\(7[0-7]([a-f]|[0-9]){2}\\\\)'\nNAVISECCLI_API = 'naviseccli -User %(username)s -password %(password)s' \\\n                 ' -scope 0 -t %(timeout)d -h %(host)s'\nCER_ADD_API = 'naviseccli security -certificate -add -file'\nCER_LIST_API = 'naviseccli security -certificate -list'\nCER_REMOVE_API = 'naviseccli security -certificate -remove'\nGET_AGENT_API = 'getagent'\nGET_DOMAIN_API = 'domain -list'\nGET_STORAGEPOOL_API = 'storagepool -list'\nGET_RAIDGROUP_API = 'getrg'\nGET_DISK_API = 'getdisk'\nGET_LUN_API = 'lun -list'\nGET_GETALLLUN_API = 'getall -lun'\nGET_SP_API = 'getsp'\nGET_PORT_API = 'port -list -sp -all'\nGET_BUS_PORT_API = 'backendbus -get -all'\nGET_BUS_PORT_STATE_API = 'ioportconfig -list -iomodule basemodule' \\\n                         ' -portstate -pportid'\nGET_ISCSI_PORT_API = 'connection -getport'\nGET_IO_PORT_CONFIG_API = 'ioportconfig -list -all'\nGET_RESUME_API = 'getresume -all'\nGET_LOG_API = 'getlog -date %(begin_time)s %(end_time)s'\nEMCVNX_VENDOR = 'DELL EMC'\nRAID_GROUP_ID_PREFIX = 'raid_group_'\nGET_SG_LIST_HOST_API = 'storagegroup -messner -list -host'\nGET_PORT_LIST_HBA_API = 'port -list -hba'\nSTATUS_MAP = {\n    'Ready': constants.StoragePoolStatus.NORMAL,\n    'Offline': constants.StoragePoolStatus.OFFLINE,\n    'Valid_luns': constants.StoragePoolStatus.NORMAL,\n    'Busy': constants.StoragePoolStatus.ABNORMAL,\n    'Halted': constants.StoragePoolStatus.ABNORMAL,\n    'Defragmenting': constants.StoragePoolStatus.NORMAL,\n    'Expanding': constants.StoragePoolStatus.NORMAL,\n    'Explicit Remove': constants.StoragePoolStatus.OFFLINE,\n    'Invalid': constants.StoragePoolStatus.OFFLINE,\n    'Bound': constants.StoragePoolStatus.NORMAL\n}\nVOL_TYPE_MAP = {'no': constants.VolumeType.THICK,\n                'yes': constants.VolumeType.THIN}\nVOL_COMPRESSED_MAP = {'no': False,\n                      'yes': True}\nDEFAULT_QUERY_LOG_DAYS = 9\nSECS_OF_TEN_DAYS = DEFAULT_QUERY_LOG_DAYS * ONE_DAY_SCE\nOID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'\nOID_MESSAGECODE = '1.3.6.1.4.1.1981.1.4.5'\nOID_DETAILS = '1.3.6.1.4.1.1981.1.4.6'\nSEVERITY_MAP = {\"76\": constants.Severity.CRITICAL,\n                \"75\": constants.Severity.MAJOR,\n                \"74\": constants.Severity.MINOR,\n                \"73\": constants.Severity.WARNING,\n                \"72\": constants.Severity.WARNING,\n                \"77\": constants.Severity.FATAL,\n                \"71\": constants.Severity.INFORMATIONAL,\n                \"70\": constants.Severity.INFORMATIONAL}\nTRAP_LEVEL_MAP = {'1.3.6.1.4.1.1981.0.6': constants.Severity.CRITICAL,\n                  '1.3.6.1.4.1.1981.0.5': constants.Severity.MINOR,\n                  '1.3.6.1.4.1.1981.0.4': constants.Severity.WARNING,\n                  '1.3.6.1.4.1.1981.0.3': constants.Severity.INFORMATIONAL,\n                  '1.3.6.1.4.1.1981.0.2': constants.Severity.INFORMATIONAL\n                  }\nDISK_STATUS_MAP = {\n    'BINDING': constants.DiskStatus.ABNORMAL,\n    'ENABLED': constants.DiskStatus.NORMAL,\n    'EMPTY': constants.DiskStatus.ABNORMAL,\n    'EXPANDING': constants.DiskStatus.ABNORMAL,\n    'FORMATTING': constants.DiskStatus.ABNORMAL,\n    'OFF': constants.DiskStatus.ABNORMAL,\n    'POWERING UP': constants.DiskStatus.ABNORMAL,\n    'REBUILDING': constants.DiskStatus.ABNORMAL,\n    'REMOVED': constants.DiskStatus.ABNORMAL,\n    'UNASSIGNED': constants.DiskStatus.ABNORMAL,\n    'UNBOUND': constants.DiskStatus.NORMAL,\n    'UNFORMATTED': constants.DiskStatus.ABNORMAL,\n    'UNSUPPORTED': constants.DiskStatus.ABNORMAL\n}\nDISK_PHYSICAL_TYPE_MAP = {\n    'SATA': constants.DiskPhysicalType.SATA,\n    'SAS': constants.DiskPhysicalType.SAS,\n    'SSD': constants.DiskPhysicalType.SSD,\n    'NL-SAS': constants.DiskPhysicalType.NL_SAS,\n    'NL-SSD': constants.DiskPhysicalType.NL_SSD,\n    'FLASH': constants.DiskPhysicalType.FLASH,\n    'SAS FLASH VP': constants.DiskPhysicalType.SAS_FLASH_VP,\n    'FIBRE CHANNEL': constants.DiskPhysicalType.FC,\n    'ATA': constants.DiskPhysicalType.ATA\n}\nSPPORT_KEY = \"Information about each SPPORT:\"\nPORT_CONNECTION_STATUS_MAP = {\n    'UP': constants.PortConnectionStatus.CONNECTED,\n    'DOWN': constants.PortConnectionStatus.DISCONNECTED\n}\nPORT_HEALTH_STATUS_MAP = {\n    'ONLINE': constants.PortHealthStatus.NORMAL,\n    'DISABLED': constants.PortHealthStatus.ABNORMAL,\n    'ENABLED': constants.PortHealthStatus.NORMAL,\n    'MISSING': constants.PortHealthStatus.ABNORMAL\n}\nPORT_TYPE_MAP = {\n    'FIBRE': constants.PortType.FC,\n    'FCOE': constants.PortType.FCOE,\n    'ISCSI': constants.PortType.ISCSI,\n    'SAS': constants.PortType.SAS,\n    'UNKNOWN': constants.PortType.OTHER\n}\nINITIATOR_TYPE_MAP = {\n    'FC': constants.InitiatorType.FC,\n    'FCOE': constants.InitiatorType.FC,\n    'ISCSI': constants.InitiatorType.ISCSI,\n    'SAS': constants.InitiatorType.SAS,\n    'UNKNOWN': constants.InitiatorType.UNKNOWN\n}\nALU_PAIRS_PATTERN = '^[0-9]+\\\\s+[0-9]+$'\nHBA_UID_PATTERN = \"^\\\\s*HBA UID\\\\s+SP Name\\\\s+SPPort\"\n\nCONTROLLER_CAP = {\n    ControllerMetric.IOPS.name: {\n        \"unit\": ControllerMetric.IOPS.unit,\n        \"description\": ControllerMetric.IOPS.description\n    },\n    ControllerMetric.READ_IOPS.name: {\n        \"unit\": ControllerMetric.READ_IOPS.unit,\n        \"description\": ControllerMetric.READ_IOPS.description\n    },\n    ControllerMetric.WRITE_IOPS.name: {\n        \"unit\": ControllerMetric.WRITE_IOPS.unit,\n        \"description\": ControllerMetric.WRITE_IOPS.description\n    },\n    ControllerMetric.THROUGHPUT.name: {\n        \"unit\": ControllerMetric.THROUGHPUT.unit,\n        \"description\": ControllerMetric.THROUGHPUT.description\n    },\n    ControllerMetric.READ_THROUGHPUT.name: {\n        \"unit\": ControllerMetric.READ_THROUGHPUT.unit,\n        \"description\": ControllerMetric.READ_THROUGHPUT.description\n    },\n    ControllerMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": ControllerMetric.WRITE_THROUGHPUT.unit,\n        \"description\": ControllerMetric.WRITE_THROUGHPUT.description\n    },\n    ControllerMetric.RESPONSE_TIME.name: {\n        \"unit\": ControllerMetric.RESPONSE_TIME.unit,\n        \"description\": ControllerMetric.RESPONSE_TIME.description\n    }\n}\nVOLUME_CAP = {\n    VolumeMetric.IOPS.name: {\n        \"unit\": VolumeMetric.IOPS.unit,\n        \"description\": VolumeMetric.IOPS.description\n    },\n    VolumeMetric.READ_IOPS.name: {\n        \"unit\": VolumeMetric.READ_IOPS.unit,\n        \"description\": VolumeMetric.READ_IOPS.description\n    },\n    VolumeMetric.WRITE_IOPS.name: {\n        \"unit\": VolumeMetric.WRITE_IOPS.unit,\n        \"description\": VolumeMetric.WRITE_IOPS.description\n    },\n    VolumeMetric.THROUGHPUT.name: {\n        \"unit\": VolumeMetric.THROUGHPUT.unit,\n        \"description\": VolumeMetric.THROUGHPUT.description\n    },\n    VolumeMetric.READ_THROUGHPUT.name: {\n        \"unit\": VolumeMetric.READ_THROUGHPUT.unit,\n        \"description\": VolumeMetric.READ_THROUGHPUT.description\n    },\n    VolumeMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": VolumeMetric.WRITE_THROUGHPUT.unit,\n        \"description\": VolumeMetric.WRITE_THROUGHPUT.description\n    },\n    VolumeMetric.RESPONSE_TIME.name: {\n        \"unit\": VolumeMetric.RESPONSE_TIME.unit,\n        \"description\": VolumeMetric.RESPONSE_TIME.description\n    },\n    VolumeMetric.READ_CACHE_HIT_RATIO.name: {\n        \"unit\": VolumeMetric.READ_CACHE_HIT_RATIO.unit,\n        \"description\": VolumeMetric.READ_CACHE_HIT_RATIO.description\n    },\n    VolumeMetric.WRITE_CACHE_HIT_RATIO.name: {\n        \"unit\": VolumeMetric.WRITE_CACHE_HIT_RATIO.unit,\n        \"description\": VolumeMetric.WRITE_CACHE_HIT_RATIO.description\n    },\n    VolumeMetric.READ_IO_SIZE.name: {\n        \"unit\": VolumeMetric.READ_IO_SIZE.unit,\n        \"description\": VolumeMetric.READ_IO_SIZE.description\n    },\n    VolumeMetric.WRITE_IO_SIZE.name: {\n        \"unit\": VolumeMetric.WRITE_IO_SIZE.unit,\n        \"description\": VolumeMetric.WRITE_IO_SIZE.description\n    }\n}\nPORT_CAP = {\n    PortMetric.IOPS.name: {\n        \"unit\": PortMetric.IOPS.unit,\n        \"description\": PortMetric.IOPS.description\n    },\n    PortMetric.READ_IOPS.name: {\n        \"unit\": PortMetric.READ_IOPS.unit,\n        \"description\": PortMetric.READ_IOPS.description\n    },\n    PortMetric.WRITE_IOPS.name: {\n        \"unit\": PortMetric.WRITE_IOPS.unit,\n        \"description\": PortMetric.WRITE_IOPS.description\n    },\n    PortMetric.THROUGHPUT.name: {\n        \"unit\": PortMetric.THROUGHPUT.unit,\n        \"description\": PortMetric.THROUGHPUT.description\n    },\n    PortMetric.READ_THROUGHPUT.name: {\n        \"unit\": PortMetric.READ_THROUGHPUT.unit,\n        \"description\": PortMetric.READ_THROUGHPUT.description\n    },\n    PortMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": PortMetric.WRITE_THROUGHPUT.unit,\n        \"description\": PortMetric.WRITE_THROUGHPUT.description\n    }\n}\nDISK_CAP = {\n    DiskMetric.IOPS.name: {\n        \"unit\": DiskMetric.IOPS.unit,\n        \"description\": DiskMetric.IOPS.description\n    },\n    DiskMetric.READ_IOPS.name: {\n        \"unit\": DiskMetric.READ_IOPS.unit,\n        \"description\": DiskMetric.READ_IOPS.description\n    },\n    DiskMetric.WRITE_IOPS.name: {\n        \"unit\": DiskMetric.WRITE_IOPS.unit,\n        \"description\": DiskMetric.WRITE_IOPS.description\n    },\n    DiskMetric.THROUGHPUT.name: {\n        \"unit\": DiskMetric.THROUGHPUT.unit,\n        \"description\": DiskMetric.THROUGHPUT.description\n    },\n    DiskMetric.READ_THROUGHPUT.name: {\n        \"unit\": DiskMetric.READ_THROUGHPUT.unit,\n        \"description\": DiskMetric.READ_THROUGHPUT.description\n    },\n    DiskMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": DiskMetric.WRITE_THROUGHPUT.unit,\n        \"description\": DiskMetric.WRITE_THROUGHPUT.description\n    },\n    DiskMetric.RESPONSE_TIME.name: {\n        \"unit\": DiskMetric.RESPONSE_TIME.unit,\n        \"description\": DiskMetric.RESPONSE_TIME.description\n    }\n}\nRESOURCES_TYPE_TO_METRIC_CAP = {\n    constants.ResourceType.CONTROLLER: CONTROLLER_CAP,\n    constants.ResourceType.PORT: PORT_CAP,\n    constants.ResourceType.DISK: DISK_CAP,\n    constants.ResourceType.VOLUME: VOLUME_CAP,\n}\nMETRIC_MAP = {\n    constants.ResourceType.CONTROLLER: {\n        ControllerMetric.IOPS.name: 16,\n        ControllerMetric.READ_IOPS.name: 25,\n        ControllerMetric.WRITE_IOPS.name: 34,\n        ControllerMetric.THROUGHPUT.name: 13,\n        ControllerMetric.READ_THROUGHPUT.name: 19,\n        ControllerMetric.WRITE_THROUGHPUT.name: 28,\n        ControllerMetric.RESPONSE_TIME.name: 10\n    },\n    constants.ResourceType.PORT: {\n        PortMetric.IOPS.name: 16,\n        PortMetric.READ_IOPS.name: 25,\n        PortMetric.WRITE_IOPS.name: 34,\n        PortMetric.THROUGHPUT.name: 13,\n        PortMetric.READ_THROUGHPUT.name: 19,\n        PortMetric.WRITE_THROUGHPUT.name: 28\n    },\n    constants.ResourceType.DISK: {\n        DiskMetric.IOPS.name: 16,\n        DiskMetric.READ_IOPS.name: 25,\n        DiskMetric.WRITE_IOPS.name: 34,\n        DiskMetric.THROUGHPUT.name: 13,\n        DiskMetric.READ_THROUGHPUT.name: 19,\n        DiskMetric.WRITE_THROUGHPUT.name: 28,\n        DiskMetric.RESPONSE_TIME.name: 10\n    },\n    constants.ResourceType.VOLUME: {\n        VolumeMetric.IOPS.name: 16,\n        VolumeMetric.READ_IOPS.name: 25,\n        VolumeMetric.WRITE_IOPS.name: 34,\n        VolumeMetric.THROUGHPUT.name: 13,\n        VolumeMetric.READ_THROUGHPUT.name: 19,\n        VolumeMetric.WRITE_THROUGHPUT.name: 28,\n        VolumeMetric.RESPONSE_TIME.name: 10,\n        VolumeMetric.READ_CACHE_HIT_RATIO.name: 42,\n        VolumeMetric.WRITE_CACHE_HIT_RATIO.name: 45,\n        VolumeMetric.READ_IO_SIZE.name: 22,\n        VolumeMetric.WRITE_IO_SIZE.name: 31\n    }\n}\n\nARCHIVE_FILE_NAME = '%s_SPA_%s.nar'\nGET_SP_TIME = 'getsptime'\nGET_NAR_INTERVAL_API = 'analyzer -get -narinterval'\nGET_ARCHIVE_API = 'analyzer -archive -list'\nCREATE_ARCHIVE_API = 'analyzer -archiveretrieve -file %s -location %s ' \\\n                     '-overwrite y -retry 3'\nDOWNLOAD_ARCHIVE_API = 'analyzer -archive -file %s -path %s -o'\nARCHIVEDUMP_API = 'analyzer -archivedump -data %s%s -out %s%s.csv'\nARCHIVE_FILE_DIR = \"/delfin/drivers/utils/performance_file/vnx_block/\"\nGET_SP_TIME_PATTERN = '%m/%d/%y %H:%M:%S'\nARCHIVE_FILE_NAME_TIME_PATTERN = '%Y_%m_%d_%H_%M_%S'\n# Unit: s\nSLEEP_TIME_SECONDS = 60\n# Unit: ms\nCREATE_FILE_TIME_INTERVAL = 150000\n# Unit: ms\nEXEC_TIME_INTERVAL = 240000\nEXEC_MAX_NUM = 50\n# Unit: ms\nTIME_INTERVAL_FLUCTUATION = 3000\nREPLACE_PATH = \"/delfin/drivers/dell_emc/vnx/vnx_block\"\n# Unit: s\nCHECK_WAITE_TIME_SECONDS = 15\nCOLLECTION_TIME_PATTERN = '%m/%d/%Y %H:%M:00'\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vnx/vnx_block/navi_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport re\nimport threading\nimport time\n\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import cryptor\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin.drivers.dell_emc.vnx.vnx_block import consts\nfrom delfin.drivers.dell_emc.vnx.vnx_block.navicli_client import NaviClient\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = logging.getLogger(__name__)\n\n\nclass NaviHandler(object):\n    session_lock = None\n\n    def __init__(self, **kwargs):\n        cli_access = kwargs.get('cli')\n        if cli_access is None:\n            raise exception.InvalidInput('Input navicli_access is missing')\n        self.navi_host = cli_access.get('host')\n        self.navi_port = cli_access.get('port')\n        self.navi_username = cli_access.get('username')\n        self.navi_password = cli_access.get('password')\n        self.navi_timeout = cli_access.get('conn_timeout',\n                                           consts.SOCKET_TIMEOUT)\n        self.verify = kwargs.get('verify', False)\n        self.session_lock = threading.Lock()\n\n    def get_cli_command_str(self, host_ip=None, sub_command=None,\n                            timeout=None):\n        if host_ip is None:\n            host_ip = self.navi_host\n        if timeout is None:\n            timeout = self.navi_timeout\n        command_str = consts.NAVISECCLI_API % {\n            'username': self.navi_username,\n            'password': cryptor.decode(self.navi_password),\n            'host': host_ip, 'timeout': timeout}\n        if self.navi_port:\n            command_str = '%s -port %d' % (command_str, self.navi_port)\n        command_str = '%s %s' % (command_str, sub_command)\n        return command_str\n\n    def login(self, host_ip=None):\n        \"\"\"Successful login returns the version number\n           Failure to log in will throw an exception\n        \"\"\"\n        version = ''\n        if host_ip is None:\n            host_ip = self.navi_host\n        accept_cer = consts.CER_STORE\n        if self.verify:\n            accept_cer = consts.CER_REJECT\n            self.remove_cer(host_ip=host_ip)\n            cer_add_command = '%s %s' % (consts.CER_ADD_API, self.verify)\n            NaviClient.exec(cer_add_command.split())\n        command_str = \\\n            self.get_cli_command_str(host_ip=host_ip,\n                                     sub_command=consts.GET_AGENT_API,\n                                     timeout=consts.LOGIN_SOCKET_TIMEOUT)\n        result = NaviClient.exec(command_str.split(), stdin_value=accept_cer)\n        if result:\n            agent_model = self.cli_res_to_dict(result)\n            if agent_model:\n                version = agent_model.get(\"revision\")\n        return version\n\n    def remove_cer(self, host_ip=None):\n        if host_ip is None:\n            host_ip = self.navi_host\n        cer_list_str = NaviClient.exec(consts.CER_LIST_API.split())\n        cer_map = self.analyse_cer(cer_list_str, host_ip)\n        if cer_map.get(host_ip):\n            cer_remove_command = '%s -issuer %s -serialNumber %s' % (\n                consts.CER_REMOVE_API,\n                cer_map.get(host_ip).get('issuer'),\n                cer_map.get(host_ip).get('serial#'))\n            NaviClient.exec(cer_remove_command.split())\n\n    def get_agent(self):\n        return self.get_resources_info(consts.GET_AGENT_API,\n                                       self.cli_res_to_dict)\n\n    def get_domain(self):\n        return self.get_resources_info(consts.GET_DOMAIN_API,\n                                       self.cli_domain_to_dict)\n\n    def get_pools(self):\n        return self.get_resources_info(consts.GET_STORAGEPOOL_API,\n                                       self.cli_res_to_list)\n\n    def get_disks(self):\n        return self.get_resources_info(consts.GET_DISK_API,\n                                       self.cli_disk_to_list)\n\n    def get_raid_group(self):\n        return self.get_resources_info(consts.GET_RAIDGROUP_API,\n                                       self.cli_raid_to_list)\n\n    def get_pool_lun(self):\n        return self.get_resources_info(consts.GET_LUN_API,\n                                       self.cli_res_to_list)\n\n    def get_all_lun(self):\n        return self.get_resources_info(consts.GET_GETALLLUN_API,\n                                       self.cli_lun_to_list)\n\n    def get_controllers(self):\n        return self.get_resources_info(consts.GET_SP_API,\n                                       self.cli_sp_to_list)\n\n    def get_cpus(self):\n        return self.get_resources_info(consts.GET_RESUME_API,\n                                       self.cli_cpu_to_dict)\n\n    def get_ports(self):\n        return self.get_resources_info(consts.GET_PORT_API,\n                                       self.cli_port_to_list)\n\n    def get_bus_ports(self):\n        return self.get_resources_info(consts.GET_BUS_PORT_API,\n                                       self.cli_bus_port_to_list)\n\n    def get_bus_port_state(self):\n        return self.get_resources_info(consts.GET_BUS_PORT_STATE_API,\n                                       self.cli_bus_port_state_to_dict)\n\n    def get_iscsi_ports(self):\n        return self.get_resources_info(consts.GET_ISCSI_PORT_API,\n                                       self.cli_iscsi_port_to_list)\n\n    def get_io_configs(self):\n        return self.get_resources_info(consts.GET_IO_PORT_CONFIG_API,\n                                       self.cli_io_config_to_dict)\n\n    def get_resources_info(self, sub_command, analyse_type):\n        # Execute commands to query data and analyze\n        try:\n            command_str = self.get_cli_command_str(sub_command=sub_command)\n            resource_info = self.navi_exe(command_str.split())\n            return_value = None\n            if resource_info:\n                return_value = analyse_type(resource_info)\n        except Exception as e:\n            err_msg = \"Failed to get resources info from %s: %s\" \\\n                      % (sub_command, six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        return return_value\n\n    def cli_res_to_dict(self, resource_info):\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if ':' in str_line:\n                        str_info = self.split_str_by_colon(str_line)\n                        obj_model = self.str_info_to_model(str_info, obj_model)\n        except Exception as e:\n            err_msg = \"arrange resource info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_model\n\n    def cli_res_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if consts.DISK_ID_KEY in str_line:\n                        str_line = str_line.replace(consts.DISK_ID_KEY,\n                                                    \"disk id:\")\n                    if consts.LUN_ID_KEY in str_line:\n                        str_line = str_line.replace(consts.LUN_ID_KEY,\n                                                    \"lun id:\")\n                    if ':' not in str_line:\n                        continue\n                    str_info = self.split_str_by_colon(str_line)\n                    obj_model = self.str_info_to_model(str_info, obj_model)\n                else:\n                    obj_list = self.add_model_to_list(obj_model, obj_list)\n                    obj_model = {}\n            # If the last object is not added to the LIST,\n            # perform the join operation\n            obj_list = self.add_model_to_list(obj_model, obj_list)\n        except Exception as e:\n            err_msg = \"cli resource to list error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def cli_raid_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                # Use 'RaidGroup ID' to determine whether it is\n                # a new object\n                if str_line and str_line.startswith('RaidGroup ID:'):\n                    obj_list = self.add_model_to_list(obj_model, obj_list)\n                    obj_model = {}\n                if str_line:\n                    if ':' not in str_line:\n                        continue\n                    str_info = self.split_str_by_colon(str_line)\n                    obj_model = self.str_info_to_model(str_info, obj_model)\n            # If the last object is not added to the LIST,\n            # perform the join operation\n            obj_list = self.add_model_to_list(obj_model, obj_list)\n        except Exception as e:\n            err_msg = \"arrange raid info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def cli_sp_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if ':' not in str_line:\n                        obj_model['sp_name'] = str_line\n                    else:\n                        str_info = self.split_str_by_colon(str_line)\n                        obj_model = self.str_info_to_model(str_info, obj_model)\n\n                        if str_line and str_line.startswith(\n                                'SP SCSI ID if Available:'):\n                            obj_list = self.add_model_to_list(obj_model,\n                                                              obj_list)\n                            obj_model = {}\n        except Exception as e:\n            err_msg = \"arrange sp info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def cli_port_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        max_speed_str = ''\n        previous_line = ''\n        try:\n            spport_infos = resource_info.split(consts.SPPORT_KEY)[1]\n            obj_infos = spport_infos.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if ':' in str_line:\n                        str_info = self.split_str_by_colon(str_line)\n                        obj_model = self.str_info_to_model(str_info, obj_model)\n                        previous_line = str_line\n                    else:\n                        if 'Available Speeds:' in previous_line:\n                            if 'Auto' not in str_line \\\n                                    and str_line > max_speed_str:\n                                max_speed_str = str_line\n                else:\n                    if max_speed_str:\n                        obj_model['max_speed'] = max_speed_str\n                    obj_list = self.add_model_to_list(obj_model, obj_list)\n                    obj_model = {}\n                    max_speed_str = ''\n                    previous_line = ''\n            if obj_model:\n                if max_speed_str:\n                    obj_model['max_speed'] = max_speed_str\n                obj_list = self.add_model_to_list(obj_model, obj_list)\n        except Exception as e:\n            err_msg = \"arrange port info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def cli_bus_port_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        sp_list = []\n        max_speed_str = ''\n        previous_line = ''\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if 'Bus ' in str_line and ':' not in str_line:\n                        if max_speed_str:\n                            obj_model['max_speed'] = max_speed_str\n                        obj_list = self.add_model_to_list(obj_model, obj_list)\n                        obj_model = {}\n                        sp_list = []\n                        max_speed_str = ''\n                        previous_line = ''\n                        obj_model['bus_name'] = str_line\n                    elif ':' in str_line:\n                        previous_line = str_line\n                        str_info = self.split_str_by_colon(str_line)\n                        obj_model = self.str_info_to_model(str_info, obj_model)\n                        if ' Connector State' in str_line:\n                            sp_list.append(\n                                str_info[0].replace('_connector_state', ''))\n                            obj_model['sps'] = sp_list\n                    else:\n                        if 'Available Speeds:' in previous_line:\n                            if 'Auto' not in str_line \\\n                                    and str_line > max_speed_str:\n                                max_speed_str = str_line\n            if max_speed_str:\n                obj_model['max_speed'] = max_speed_str\n            obj_list = self.add_model_to_list(obj_model, obj_list)\n        except Exception as e:\n            err_msg = \"arrange port info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def cli_bus_port_state_to_dict(self, resource_info):\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            sp = ''\n            port_id = ''\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if 'SP ID:' in str_line:\n                        str_info = self.split_str_by_colon(str_line)\n                        sp = str_info[1]\n                    if 'Physical Port ID:' in str_line:\n                        str_info = self.split_str_by_colon(str_line)\n                        port_id = str_info[1]\n                    if 'Port State:' in str_line:\n                        str_info = self.split_str_by_colon(str_line)\n                        obj_model[sp + '_' + port_id] = str_info[1]\n        except Exception as e:\n            err_msg = \"arrange bus port state info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_model\n\n    def cli_iscsi_port_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if ':' in str_line:\n                        str_info = self.split_str_by_colon(str_line)\n                        obj_model = self.str_info_to_model(str_info, obj_model)\n                else:\n                    obj_list = self.add_model_to_list(obj_model, obj_list)\n                    obj_model = {}\n        except Exception as e:\n            err_msg = \"arrange iscsi port info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def cli_io_config_to_dict(self, resource_info):\n        obj_model = {}\n        try:\n            obj_list = []\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if ':' in str_line:\n                        str_info = self.split_str_by_colon(str_line)\n                        obj_model = self.str_info_to_model(str_info, obj_model)\n                else:\n                    obj_list = self.add_model_to_list(obj_model, obj_list)\n                    obj_model = {}\n            for config in obj_list:\n                if config.get('i/o_module_slot'):\n                    key = '%s_%s' % (\n                        config.get('sp_id'), config.get('i/o_module_slot'))\n                    obj_model[key] = config.get('i/o_module_type').replace(\n                        ' Channel', '')\n        except Exception as e:\n            err_msg = \"arrange io port config info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_model\n\n    def cli_cpu_to_dict(self, resource_info):\n        obj_model = {}\n        try:\n            obj_list = []\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if 'CPU Module' in str_line:\n                        str_line = '%s:True' % str_line\n                    str_info = self.split_str_by_colon(str_line)\n                    obj_model = self.str_info_to_model(str_info, obj_model)\n                else:\n                    obj_list = self.add_model_to_list(obj_model, obj_list)\n                    obj_model = {}\n            for cpu_module in obj_list:\n                if cpu_module.get('cpu_module'):\n                    obj_model[\n                        cpu_module.get('emc_serial_number')] = cpu_module.get(\n                        'assembly_name')\n        except Exception as e:\n            err_msg = \"arrange cpu info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_model\n\n    def cli_disk_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if str_line.startswith('Bus '):\n                        disk_name = 'disk_name:%s' % str_line\n                        str_info = self.split_str_by_colon(disk_name)\n                        obj_model = self.str_info_to_model(str_info, obj_model)\n                        str_line = \"disk id:%s\" % (str_line.replace(' ', ''))\n                    if ':' not in str_line:\n                        continue\n                    str_info = self.split_str_by_colon(str_line)\n                    obj_model = self.str_info_to_model(str_info, obj_model)\n                else:\n                    obj_list = self.add_model_to_list(obj_model, obj_list)\n                    obj_model = {}\n            # If the last object is not added to the LIST,\n            # perform the join operation\n            obj_list = self.add_model_to_list(obj_model, obj_list)\n        except Exception as e:\n            err_msg = \"cli resource to list error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def cli_domain_to_dict(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            node_value = ''\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                # Use \"IP Address\" to determine whether it is a new object\n                if str_line and str_line.startswith('IP Address:'):\n                    obj_list = self.add_model_to_list(obj_model, obj_list)\n                    obj_model = {}\n                if str_line:\n                    if 'Master' in str_line:\n                        obj_model['master'] = 'True'\n                        str_line = str_line.replace('(Master)', '')\n                    str_info = self.split_str_by_colon(str_line)\n                    if str_line and str_line.startswith('Node:'):\n                        node_value = str_info[1]\n                        continue\n                    if str_line and str_line.startswith('IP Address:'):\n                        obj_model['node'] = node_value\n                    obj_model = self.str_info_to_model(str_info, obj_model)\n            # If the last object is not added to the LIST,\n            # perform the join operation\n            obj_list = self.add_model_to_list(obj_model, obj_list)\n        except Exception as e:\n            err_msg = \"arrange domain info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def cli_lun_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line and str_line.startswith(consts.LUN_ID_KEY):\n                    obj_list = self.add_model_to_list(obj_model, obj_list)\n                    obj_model = {}\n                if str_line:\n                    if str_line.startswith(consts.LUN_ID_KEY):\n                        str_line = str_line.replace(consts.LUN_ID_KEY,\n                                                    'LOGICAL UNIT NUMBER:')\n                    if str_line.startswith(consts.LUN_NAME_KEY):\n                        str_line = str_line.replace(consts.LUN_NAME_KEY,\n                                                    'Name:')\n                    if ':' not in str_line:\n                        continue\n                    str_info = self.split_str_by_colon(str_line)\n                    obj_model = self.str_info_to_model(str_info, obj_model)\n            obj_list = self.add_model_to_list(obj_model, obj_list)\n        except Exception as e:\n            err_msg = \"arrange lun info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def analyse_cer(self, resource_info, host_ip=None):\n        cer_map = {}\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line and consts.CER_SEPARATE_KEY not in str_line:\n                    str_info = self.split_str_by_colon(str_line)\n                    if str_info[0] == 'issuer' and host_ip not in str_info[1]:\n                        continue\n                    obj_model[str_info[0]] = str_info[1]\n                else:\n                    if obj_model and obj_model.get('issuer'):\n                        cer_map[host_ip] = obj_model\n                        break\n        except Exception as e:\n            err_msg = \"arrange cer info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return cer_map\n\n    def split_str_by_colon(self, str_line):\n        str_info = []\n        if str_line:\n            # str_info[0] is the parsed attribute name, there are some special\n            # characters such as spaces, brackets, etc.,\n            # str_info[1] is the value\n            str_info = str_line.split(':', 1)\n            str_info[0] = str_info[0].strip()\n            str_info[0] = str_info[0].replace(\" \", \"_\") \\\n                .replace(\"(\", \"\").replace(\")\", \"\").lower()\n            if len(str_info) > 1:\n                str_info[1] = str_info[1].strip()\n        return str_info\n\n    def str_info_to_model(self, str_info, obj_model):\n        # Some information is'attribute: value'\n        # Some attributes: no value for example:\n        # Pool ID:  1\n        # Description:\n        # State:  Offline\n        if str_info:\n            key = None\n            value = None\n            if len(str_info) > 1:\n                key = str_info[0]\n                value = str_info[1]\n            elif len(str_info) == 1:\n                key = str_info[0]\n            obj_model[key] = value\n        return obj_model\n\n    def add_model_to_list(self, obj_model, obj_list):\n        if len(obj_model) > 0:\n            obj_list.append(obj_model)\n        return obj_list\n\n    def navi_exe(self, command_str, host_ip=None):\n        self.session_lock.acquire()\n        try:\n            if command_str:\n                accept_cer = consts.CER_STORE\n                if self.verify:\n                    accept_cer = consts.CER_REJECT\n                result = NaviClient.exec(command_str, stdin_value=accept_cer)\n                return result\n        except exception.SSLCertificateFailed as e:\n            LOG.error(\"ssl error: %s\", six.text_type(e))\n            self.login(host_ip)\n            result = NaviClient.exec(command_str)\n            return result\n        except exception.InvalidUsernameOrPassword as e:\n            LOG.error(\"auth error: %s\", six.text_type(e))\n            self.login(host_ip)\n            result = NaviClient.exec(command_str)\n            return result\n        except Exception as e:\n            err_msg = \"naviseccli exec error: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        finally:\n            self.session_lock.release()\n\n    def list_masking_views(self):\n        return self.get_resources_info(consts.GET_SG_LIST_HOST_API,\n                                       self.cli_sg_to_list)\n\n    def cli_sg_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            pattern = re.compile(consts.ALU_PAIRS_PATTERN)\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if ':' not in str_line:\n                        search_obj = pattern.search(str_line)\n                        if search_obj:\n                            str_info = str_line.split()\n                            lun_ids = obj_model.get('lun_ids')\n                            if lun_ids:\n                                lun_ids.add(str_info[1])\n                            else:\n                                lun_ids = set()\n                                lun_ids.add(str_info[1])\n                                obj_model['lun_ids'] = lun_ids\n                    else:\n                        str_info = self.split_str_by_colon(str_line)\n                        if 'Host name:' in str_line:\n                            host_names = obj_model.get('host_names')\n                            if host_names:\n                                host_names.add(str_info[1])\n                            else:\n                                host_names = set()\n                                host_names.add(str_info[1])\n                                obj_model['host_names'] = host_names\n                            continue\n\n                        obj_model = self.str_info_to_model(str_info, obj_model)\n\n                        if str_line.startswith('Shareable:'):\n                            obj_list = self.add_model_to_list(obj_model,\n                                                              obj_list)\n                            obj_model = {}\n        except Exception as e:\n            err_msg = \"arrange sg info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def list_hbas(self):\n        return self.get_resources_info(consts.GET_PORT_LIST_HBA_API,\n                                       self.cli_hba_to_list)\n\n    def cli_hba_to_list(self, resource_info):\n        obj_list = []\n        obj_model = {}\n        sp_name = ''\n        port_ids = set()\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if 'Information about each HBA:' in obj_info:\n                        if obj_model:\n                            obj_model['port_ids'] = port_ids\n                        obj_list = self.add_model_to_list(obj_model,\n                                                          obj_list)\n                        obj_model = {}\n                        port_ids = set()\n                        sp_name = ''\n                    if ':' in obj_info:\n                        str_info = self.split_str_by_colon(str_line)\n                        obj_model = self.str_info_to_model(str_info, obj_model)\n                        if 'SP Name:' in obj_info:\n                            sp_name = obj_info.replace('SP Name:', '').replace(\n                                'SP', '').replace('\\r', '').replace(' ', '')\n                        if 'SP Port ID:' in obj_info:\n                            port_id = obj_info.replace('SP Port ID:',\n                                                       '').replace('\\r',\n                                                                   '').replace(\n                                ' ', '')\n                            port_id = '%s-%s' % (sp_name, port_id)\n                            port_ids.add(port_id)\n\n            if obj_model:\n                obj_model['port_ids'] = port_ids\n                obj_list.append(obj_model)\n        except Exception as e:\n            err_msg = \"arrange host info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def get_archives(self):\n        return self.get_resources_info(consts.GET_ARCHIVE_API,\n                                       self.cli_archives_to_list)\n\n    def cli_archives_to_list(self, resource_info):\n        obj_list = []\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    archive_infos = str_line.split()\n                    if archive_infos and len(archive_infos) == 5:\n                        obj_model = {}\n                        obj_model['collection_time'] = \\\n                            \"%s %s\" % (archive_infos[2], archive_infos[3])\n                        obj_model['archive_name'] = archive_infos[4]\n                        obj_list.append(obj_model)\n        except Exception as e:\n            err_msg = \"arrange archives info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def download_archives(self, archive_name):\n        download_archive_api = consts.DOWNLOAD_ARCHIVE_API % (\n            archive_name, self.get_local_file_path())\n        self.get_resources_info(download_archive_api, self.cli_res_to_list)\n        archive_name_infos = archive_name.split('.')\n        archivedump_api = consts.ARCHIVEDUMP_API % (\n            self.get_local_file_path(), archive_name,\n            self.get_local_file_path(), archive_name_infos[0])\n        self.get_resources_info(archivedump_api, self.cli_res_to_list)\n\n    def get_local_file_path(self):\n        driver_path = os.path.abspath(os.path.join(os.getcwd()))\n        driver_path = driver_path.replace(\"\\\\\", \"/\")\n        driver_path = driver_path.replace(consts.REPLACE_PATH, \"\")\n        local_path = '%s%s' % (driver_path, consts.ARCHIVE_FILE_DIR)\n        return local_path\n\n    def get_sp_time(self):\n        return self.get_resources_info(consts.GET_SP_TIME,\n                                       self.analysis_sp_time)\n\n    def analysis_sp_time(self, resource_info):\n        system_time = 0\n        try:\n            tools = Tools()\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if \"Time on SP A:\" in str_line:\n                    time_str = str_line.replace(\"Time on SP A:\", \"\").strip()\n                    system_time = tools.time_str_to_timestamp(\n                        time_str, consts.GET_SP_TIME_PATTERN)\n        except Exception as e:\n            err_msg = \"analysis sp time error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return system_time\n\n    def get_nar_interval(self):\n        return self.get_resources_info(consts.GET_NAR_INTERVAL_API,\n                                       self.analysis_nar_interval)\n\n    def analysis_nar_interval(self, resource_info):\n        nar_interval = 60\n        try:\n            if resource_info and \":\" in resource_info:\n                nar_interval_str = resource_info.split(\":\")[1].strip()\n                nar_interval = int(nar_interval_str)\n        except Exception as e:\n            err_msg = \"analysis sp time error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return nar_interval\n\n    def get_archive_file_name(self, storage_id):\n        tools = Tools()\n        create_time = tools.timestamp_to_time_str(\n            time.time() * units.k, consts.ARCHIVE_FILE_NAME_TIME_PATTERN)\n        archive_file_name = consts.ARCHIVE_FILE_NAME % (storage_id,\n                                                        create_time)\n        return archive_file_name\n\n    def create_archives(self, storage_id):\n        archive_name = self.get_archive_file_name(storage_id)\n        create_archive_api = consts.CREATE_ARCHIVE_API % (\n            archive_name, self.get_local_file_path())\n        self.get_resources_info(create_archive_api, self.cli_res_to_list)\n\n        archive_name_infos = archive_name.split('.')\n        archivedump_api = consts.ARCHIVEDUMP_API % (\n            self.get_local_file_path(), archive_name,\n            self.get_local_file_path(), archive_name_infos[0])\n        self.get_resources_info(archivedump_api, self.cli_res_to_list)\n        return archive_name\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vnx/vnx_block/navicli_client.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom subprocess import Popen, PIPE\n\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import exception\nfrom delfin.drivers.dell_emc.vnx.vnx_block import consts\n\nLOG = logging.getLogger(__name__)\n\n\nclass NaviClient(object):\n\n    @staticmethod\n    def exec(command_str, stdin_value=None):\n        \"\"\"execute command_str using Popen\n        :param command_str: should be list type\n        :param stdin_value: same as stdin of Popen\n        :return: output of Popen.communicate\n        \"\"\"\n        try:\n            p = Popen(command_str, stdin=PIPE, stdout=PIPE, stderr=PIPE,\n                      shell=False)\n        except FileNotFoundError as e:\n            err_msg = \"naviseccli tool not found: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.ComponentNotFound('naviseccli')\n        except Exception as e:\n            err_msg = \"naviseccli exec error: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n        if stdin_value:\n            out, err = p.communicate(\n                input=bytes(stdin_value, encoding='utf-8'))\n        else:\n            out = p.stdout.read()\n        if isinstance(out, bytes):\n            out = out.decode(\"utf-8\")\n        result = out.strip()\n        if result:\n            # Determine whether an exception occurs according\n            # to the returned information\n            for exception_key in consts.EXCEPTION_MAP.keys():\n                if stdin_value is None or stdin_value == consts.CER_STORE:\n                    if exception_key == consts.CER_ERR:\n                        continue\n                if exception_key in result:\n                    LOG.error('VNX Block exec failed: %s' % result)\n                    raise consts.EXCEPTION_MAP.get(exception_key)(result)\n\n        return result\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vnx/vnx_block/vnx_block.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom oslo_log import log\n\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.dell_emc.vnx.vnx_block import consts\nfrom delfin.drivers.dell_emc.vnx.vnx_block.alert_handler import AlertHandler\nfrom delfin.drivers.dell_emc.vnx.vnx_block.component_handler import \\\n    ComponentHandler\nfrom delfin.drivers.dell_emc.vnx.vnx_block.navi_handler import NaviHandler\n\nLOG = log.getLogger(__name__)\n\n\nclass VnxBlockStorDriver(driver.StorageDriver):\n    \"\"\"VnxBlockStorDriver implement EMC VNX Stor driver\"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.navi_handler = NaviHandler(**kwargs)\n        self.version = self.navi_handler.login()\n        self.com_handler = ComponentHandler(self.navi_handler)\n\n    def reset_connection(self, context, **kwargs):\n        self.navi_handler.remove_cer()\n        self.navi_handler.verify = kwargs.get('verify', False)\n        self.navi_handler.login()\n\n    def close_connection(self):\n        pass\n\n    def get_storage(self, context):\n        return self.com_handler.get_storage()\n\n    def list_storage_pools(self, context):\n        return self.com_handler.list_storage_pools(self.storage_id)\n\n    def list_volumes(self, context):\n        return self.com_handler.list_volumes(self.storage_id)\n\n    def list_alerts(self, context, query_para=None):\n        raise NotImplementedError(\n            \"Driver API list_alerts() is not Implemented\")\n\n    def list_controllers(self, context):\n        return self.com_handler.list_controllers(self.storage_id)\n\n    def list_ports(self, context):\n        return self.com_handler.list_ports(self.storage_id)\n\n    def list_disks(self, context):\n        return self.com_handler.list_disks(self.storage_id)\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return AlertHandler.parse_alert(alert)\n\n    def clear_alert(self, context, sequence_number):\n        pass\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n\n    def list_storage_host_initiators(self, context):\n        return self.com_handler.list_storage_host_initiators(self.storage_id)\n\n    def list_storage_hosts(self, context):\n        return self.com_handler.list_storage_hosts(self.storage_id)\n\n    def list_masking_views(self, context):\n        return self.com_handler.list_masking_views(self.storage_id)\n\n    def collect_perf_metrics(self, context, storage_id, resource_metrics,\n                             start_time, end_time):\n        return self.com_handler.collect_perf_metrics(storage_id,\n                                                     resource_metrics,\n                                                     start_time, end_time)\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        \"\"\"Get capability of supported driver\"\"\"\n        return {\n            'is_historic': True,\n            'resource_metrics': {\n                constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP,\n                constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n                constants.ResourceType.PORT: consts.PORT_CAP,\n                constants.ResourceType.DISK: consts.DISK_CAP\n            }\n        }\n\n    def get_latest_perf_timestamp(self, context):\n        return self.com_handler.get_latest_perf_timestamp(self.storage_id)\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vplex/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/dell_emc/vplex/alert_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hashlib\n\nfrom oslo_log import log\n\nfrom delfin import exception, utils\nfrom delfin.common import constants\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertHandler(object):\n    OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'\n    OID_COMPONENT = '1.3.6.1.4.1.1139.21.1.3.0'\n    OID_SYMPTOMTEXT = '1.3.6.1.4.1.1139.21.1.5.0'\n\n    TRAP_LEVEL_MAP = {'1.3.6.1.4.1.1139.21.0.1': constants.Severity.CRITICAL,\n                      '1.3.6.1.4.1.1139.21.0.2': constants.Severity.MAJOR,\n                      '1.3.6.1.4.1.1139.21.0.3': constants.Severity.WARNING,\n                      '1.3.6.1.4.1.1139.21.0.4':\n                          constants.Severity.INFORMATIONAL\n                      }\n\n    SECONDS_TO_MS = 1000\n\n    @staticmethod\n    def parse_alert(context, alert):\n        try:\n            description = alert.get(AlertHandler.OID_SYMPTOMTEXT)\n            alert_model = dict()\n            alert_model['alert_id'] = alert.get(AlertHandler.OID_COMPONENT)\n            alert_model['alert_name'] = description\n            alert_model['severity'] = AlertHandler.TRAP_LEVEL_MAP.get(\n                alert.get(AlertHandler.OID_SEVERITY),\n                constants.Severity.INFORMATIONAL)\n            alert_model['category'] = constants.Category.FAULT\n            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n            alert_model['occur_time'] = utils.utcnow_ms()\n            alert_model['description'] = description\n            alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n            alert_model['location'] = ''\n            alert_model['match_key'] = hashlib.md5(description.encode()). \\\n                hexdigest()\n\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (_(\"Failed to build alert model as some attributes missing \"\n                     \"in alert message.\"))\n            raise exception.InvalidResults(msg)\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vplex/consts.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom delfin.common import constants\n\nSOCKET_TIMEOUT = 10\nBASE_CONTEXT = '/vplex'\nREST_AUTH_URL = '/vplex/clusters'\n\nPORT_TYPE_MAP = {\n    'fc': constants.PortType.FC,\n    'iscsi': constants.PortType.ISCSI,\n    'ficon': constants.PortType.FICON,\n    'fcoe': constants.PortType.FCOE,\n    'eth': constants.PortType.ETH,\n    'sas': constants.PortType.SAS,\n    'ib': constants.PortType.IB,\n    'other': constants.PortType.OTHER,\n}\n\nINITIATOR_DESCRIPTION = {\n    'iscsi': constants.InitiatorType.ISCSI,\n    'fc': constants.InitiatorType.FC,\n}\n\nPORT_LOGICAL_TYPE_MAP = {\n    'front-end': constants.PortLogicalType.FRONTEND,\n    'back-end': constants.PortLogicalType.BACKEND,\n    'service': constants.PortLogicalType.SERVICE,\n    'management': constants.PortLogicalType.MANAGEMENT,\n    'internal': constants.PortLogicalType.INTERNAL,\n    'maintenance': constants.PortLogicalType.MAINTENANCE,\n    'inter-director-communication': constants.PortLogicalType.INTERCONNECT,\n    'other': constants.PortLogicalType.OTHER,\n    'local-com': constants.PortLogicalType.INTERCLUSTER,\n    'wan-com': constants.PortLogicalType.CLUSTER_MGMT\n}\nPORT_CONNECT_STATUS_MAP = {\n    'up': constants.PortConnectionStatus.CONNECTED,\n    'down': constants.PortConnectionStatus.DISCONNECTED,\n    'no-link': constants.PortConnectionStatus.UNKNOWN,\n    'ok': constants.PortConnectionStatus.CONNECTED,\n    'pending': constants.PortConnectionStatus.CONNECTED,\n    'suspended': constants.PortConnectionStatus.DISCONNECTED,\n    'hardware error': constants.PortConnectionStatus.UNKNOWN\n}\nPORT_HEALTH_STATUS_MAP = {\n    'ok': constants.PortHealthStatus.NORMAL,\n    'error': constants.PortHealthStatus.ABNORMAL,\n    'stopped': constants.PortHealthStatus.UNKNOWN\n}\nCONTROLLER_STATUS_MAP = {\n    \"ok\": constants.ControllerStatus.NORMAL,\n    \"busy\": constants.ControllerStatus.NORMAL,\n    \"no contact\": constants.ControllerStatus.OFFLINE,\n    \"lost communication\": constants.ControllerStatus.OFFLINE,\n    \"unknown\": constants.ControllerStatus.UNKNOWN\n}\nHOST_TYPE_MAP = {\n    \"hpux\": constants.HostOSTypes.HP_UX,\n    \"aix\": constants.HostOSTypes.AIX,\n    \"unknown\": constants.HostOSTypes.UNKNOWN\n}\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vplex/rest_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import cryptor\nfrom delfin import exception\nfrom delfin.drivers.dell_emc.vplex import consts\nfrom delfin.drivers.utils.rest_client import RestClient\n\nLOG = logging.getLogger(__name__)\n\n\nclass RestHandler(RestClient):\n\n    def __init__(self, **kwargs):\n        super(RestHandler, self).__init__(**kwargs)\n\n    def login(self):\n        try:\n            data = {}\n            self.init_http_head()\n            self.session.headers.update({\n                \"username\": self.rest_username,\n                \"password\": cryptor.decode(self.rest_password)})\n            res = self.do_call(consts.REST_AUTH_URL, data, 'GET')\n            if res.status_code != 200:\n                LOG.error(\"Login error. URL: %(url)s\\n\"\n                          \"Reason: %(reason)s.\",\n                          {\"url\": consts.REST_AUTH_URL, \"reason\": res.text})\n                if 'User authentication failed' in res.text:\n                    raise exception.InvalidUsernameOrPassword()\n                else:\n                    raise exception.StorageBackendException(\n                        six.text_type(res.text))\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise e\n\n    def get_rest_info(self, url, data=None, method='GET'):\n        \"\"\"Return dict result of the url response.\"\"\"\n        result_json = None\n        res = self.do_call(url, data, method)\n        if res.status_code == 200:\n            result_json = res.json().get('response')\n        return result_json\n\n    def get_virtual_volume_by_name_resp(self, cluster_name,\n                                        virtual_volume_name):\n        url = '%s/clusters/%s/virtual-volumes/%s' % \\\n              (consts.BASE_CONTEXT, cluster_name, virtual_volume_name)\n        response = self.get_rest_info(url)\n        return response\n\n    def get_virtual_volume_resp(self, cluster_name):\n        url = '%s/clusters/%s/virtual-volumes' % (\n            consts.BASE_CONTEXT, cluster_name)\n        response = self.get_rest_info(url)\n        return response\n\n    def get_cluster_resp(self):\n        uri = '%s/clusters' % consts.BASE_CONTEXT\n        response = self.get_rest_info(uri)\n        return response\n\n    def get_devcie_resp(self, cluster_name):\n        url = '%s/clusters/%s/devices' % (consts.BASE_CONTEXT, cluster_name)\n        response = self.get_rest_info(url)\n        return response\n\n    def get_device_by_name_resp(self, cluster_name, device_name):\n        url = '%s/clusters/%s/devices/%s' % (\n            consts.BASE_CONTEXT, cluster_name, device_name)\n        response = self.get_rest_info(url)\n        return response\n\n    def get_health_check_resp(self):\n        url = '%s/health-check' % consts.BASE_CONTEXT\n        data = {\"args\": \"-l\"}\n        response = self.get_rest_info(url, data, method='POST')\n        return response\n\n    def get_cluster_by_name_resp(self, cluster_name):\n        url = '%s/clusters/%s' % (consts.BASE_CONTEXT, cluster_name)\n        response = self.get_rest_info(url)\n        return response\n\n    def get_storage_volume_summary_resp(self, cluster_name):\n        url = '%s/storage-volume+summary' % consts.BASE_CONTEXT\n        args = '--clusters %s' % cluster_name\n        data = {\"args\": args}\n        response = self.get_rest_info(url, data, method='POST')\n        return response\n\n    def get_device_summary_resp(self, cluster_name):\n        url = '%s/local-device+summary' % consts.BASE_CONTEXT\n        args = '--clusters %s' % cluster_name\n        data = {\"args\": args}\n        response = self.get_rest_info(url, data, method='POST')\n        return response\n\n    def get_virtual_volume_summary_resp(self, cluster_name):\n        url = '%s/virtual-volume+summary' % consts.BASE_CONTEXT\n        args = '--clusters %s' % cluster_name\n        data = {\"args\": args}\n        response = self.get_rest_info(url, data, method='POST')\n        return response\n\n    def logout(self):\n        try:\n            if self.session:\n                self.session.close()\n        except Exception as e:\n            err_msg = \"Logout error: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    def get_engine_director_resp(self):\n        url = '%s/engines/*/directors/*' % consts.BASE_CONTEXT\n        response = self.get_rest_info(url)\n        return response\n\n    def get_version_verbose(self):\n        url = '%s/version' % consts.BASE_CONTEXT\n        args = '-a --verbose'\n        data = {\"args\": args}\n        response = self.get_rest_info(url, data, method='POST')\n        return response\n\n    def get_cluster_export_port_resp(self):\n        url = '%s/clusters/*/exports/ports/*' % consts.BASE_CONTEXT\n        response = self.get_rest_info(url)\n        return response\n\n    def get_engine_director_hardware_port_resp(self):\n        url = '%s/engines/*/directors/*/hardware/ports/*' % consts.BASE_CONTEXT\n        response = self.get_rest_info(url)\n        return response\n\n    def get_initiators_resp(self):\n        url = '%s/clusters/*/exports/initiator-ports/*' % consts.BASE_CONTEXT\n        response = self.get_rest_info(url)\n        return response\n\n    def get_storage_views(self):\n        url = '%s/clusters/*/exports/storage-views/*' % consts.BASE_CONTEXT\n        response = self.get_rest_info(url)\n        return response\n"
  },
  {
    "path": "delfin/drivers/dell_emc/vplex/vplex_stor.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport re\n\nimport six\nfrom delfin import exception\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.dell_emc.vplex import alert_handler\nfrom delfin.drivers.dell_emc.vplex import rest_handler\nfrom delfin.drivers.dell_emc.vplex import consts\n\nLOG = log.getLogger(__name__)\n\n\nclass VplexStorageDriver(driver.StorageDriver):\n    \"\"\"DELL EMC VPLEX storage driver implement the DELL EMC Storage driver\"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.rest_handler = rest_handler.RestHandler(**kwargs)\n        self.rest_handler.login()\n\n    def reset_connection(self, context, **kwargs):\n        self.rest_handler.logout()\n        self.rest_handler.verify = kwargs.get('verify', False)\n        self.rest_handler.login()\n\n    def get_storage(self, context):\n        health_check = self.rest_handler.get_health_check_resp()\n        all_cluster = self.rest_handler.get_cluster_resp()\n        cluster_name_list = VplexStorageDriver.get_resource_names(all_cluster)\n        if cluster_name_list:\n            health_map = {}\n            custom_data = health_check.get(\"custom-data\")\n            VplexStorageDriver.handle_detail(custom_data,\n                                             health_map, split=':')\n            for cluster_name in cluster_name_list:\n                response = self.rest_handler.get_cluster_by_name_resp(\n                    cluster_name)\n                attr_map = VplexStorageDriver.get_attribute_map(response)\n                operate_status = attr_map.get('operational-status')\n                health_status = attr_map.get('health-state')\n                status = VplexStorageDriver.analyse_storage_status(\n                    operate_status, health_status)\n                try:\n                    raw_capacity = self.get_cluster_raw_capacity(cluster_name)\n                    total_capacity = self.get_cluster_total_capacity(\n                        cluster_name)\n                    used_capacity = self.get_cluster_used_capacity(\n                        cluster_name)\n                except Exception:\n                    error_msg = \"Failed to get capacity from VPLEX!\"\n                    raise exception.StorageBackendException(error_msg)\n                free_capacity = total_capacity - used_capacity\n                if free_capacity < 0:\n                    free_capacity = 0\n                cluster = {\n                    'name': cluster_name,\n                    'vendor': 'DELL EMC',\n                    'description': 'EMC VPlex Storage',\n                    'status': status,\n                    'serial_number': attr_map.get('top-level-assembly'),\n                    'firmware_version': health_map.get(\"Product Version\"),\n                    'model': 'EMC VPLEX ' + health_map.get(\"Product Type\"),\n                    'location': '',\n                    'raw_capacity': int(raw_capacity),\n                    'total_capacity': int(total_capacity),\n                    'used_capacity': int(used_capacity),\n                    'free_capacity': int(free_capacity)\n                }\n                break\n        return cluster\n\n    def list_storage_pools(self, context):\n        device_list = []\n        all_cluster = self.rest_handler.get_cluster_resp()\n        cluster_name_list = VplexStorageDriver.get_resource_names(all_cluster)\n        for cluster_name in cluster_name_list:\n            response_device = self.rest_handler.get_devcie_resp(cluster_name)\n            map_device_childer = VplexStorageDriver.get_children_map(\n                response_device)\n            for name, resource_type in map_device_childer.items():\n                response_dn = self.rest_handler.get_device_by_name_resp(\n                    cluster_name, name)\n                map_dn_attribute = VplexStorageDriver.get_attribute_map(\n                    response_dn)\n                virtual_volume = map_dn_attribute.get(\"virtual-volume\")\n                total_capacity_str = map_dn_attribute.get(\"capacity\")\n                total_capacity = VplexStorageDriver.analyse_capacity(\n                    total_capacity_str)\n                operate_status = map_dn_attribute.get('operational-status')\n                health_status = map_dn_attribute.get('health-state')\n                used_capacity = 0\n                free_capacity = 0\n                if virtual_volume:\n                    used_capacity = total_capacity\n                else:\n                    free_capacity = total_capacity\n\n                device = {\n                    'name': name,\n                    'storage_id': self.storage_id,\n                    'native_storage_pool_id': map_dn_attribute.get(\n                        \"system-id\"),\n                    'description': 'EMC VPlex Pool',\n                    'status': self.analyse_status(operate_status,\n                                                  health_status),\n                    'storage_type': constants.StorageType.BLOCK,\n                    'total_capacity': int(total_capacity),\n                    'used_capacity': int(used_capacity),\n                    'free_capacity': int(free_capacity)\n                }\n                device_list.append(device)\n        return device_list\n\n    def list_volumes(self, context):\n        vv_list = []\n        all_cluster = self.rest_handler.get_cluster_resp()\n        cluster_name_list = VplexStorageDriver.get_resource_names(all_cluster)\n        for cluster_name in cluster_name_list:\n            resposne_vv = self.rest_handler.get_virtual_volume_resp(\n                cluster_name)\n            map_vv_children = VplexStorageDriver.get_children_map(resposne_vv)\n            for name, resource_type in map_vv_children.items():\n                response_vvn = self.rest_handler. \\\n                    get_virtual_volume_by_name_resp(cluster_name, name)\n                map_vvn_attribute = VplexStorageDriver.get_attribute_map(\n                    response_vvn)\n                thin_enabled = map_vvn_attribute.get(\"thin-enabled\")\n                operate_status = map_vvn_attribute.get('operational-status')\n                health_status = map_vvn_attribute.get('health-state')\n                vv_type = self.analyse_vv_type(thin_enabled)\n                total_capacity = VplexStorageDriver.analyse_capacity(\n                    map_vvn_attribute.get(\"capacity\"))\n                vpd_id = map_vvn_attribute.get(\"vpd-id\")\n                cells = vpd_id.split(\":\")\n                wwn = ''\n                if len(cells) > 1:\n                    wwn = cells[1]\n                used_capacity = 0\n                if vv_type == constants.VolumeType.THICK:\n                    used_capacity = total_capacity\n                vv = {\n                    'name': name,\n                    'storage_id': self.storage_id,\n                    'description': 'EMC VPlex volume',\n                    'status': self.analyse_status(operate_status,\n                                                  health_status),\n                    'native_volume_id': vpd_id,\n                    'native_storage_pool_id': map_vvn_attribute.get(\n                        'supporting-device'),\n                    'type': vv_type,\n                    'total_capacity': int(total_capacity),\n                    'used_capacity': int(used_capacity),\n                    'free_capacity': 0,\n                    'wwn': wwn\n                }\n                vv_list.append(vv)\n        return vv_list\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return alert_handler.AlertHandler().parse_alert(context, alert)\n\n    def list_alerts(self, context, query_para=None):\n        info_msg = \"list_alerts is not supported in model VPLEX\"\n        LOG.info(info_msg)\n        raise NotImplementedError(info_msg)\n\n    def clear_alert(self, context, alert):\n        pass\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n\n    @staticmethod\n    def get_attribute_map(response):\n        attr_map = {}\n        if response:\n            contexts = response.get(\"context\")\n            for context in contexts:\n                attributes = context.get(\"attributes\")\n                for attribute in attributes:\n                    key = attribute.get(\"name\")\n                    value = attribute.get(\"value\")\n                    attr_map[key] = value\n        return attr_map\n\n    @staticmethod\n    def analyse_capacity(capacity_str):\n        capacity = 0\n        if capacity_str.strip():\n            capacity = re.findall(\"\\\\d+\", capacity_str)[0]\n        return capacity\n\n    @staticmethod\n    def analyse_status(operational_status, health_status):\n        status = constants.StorageStatus.ABNORMAL\n        status_normal = [\"ok\"]\n        status_offline = [\"unknown\", \"isolated\", \"not-running\",\n                          \"non-recoverable-error\"]\n        if operational_status and health_status in status_normal:\n            status = constants.StorageStatus.NORMAL\n        elif operational_status and health_status in status_offline:\n            status = constants.StorageStatus.OFFLINE\n        return status\n\n    @staticmethod\n    def analyse_storage_status(operational_status, health_status):\n        status = constants.StorageStatus.ABNORMAL\n        status_normal = [\"ok\"]\n        status_offline = [\"unknown\", \"isolated\", \"not-running\",\n                          \"non-recoverable-error\"]\n        if operational_status == constants.StorageStatus.DEGRADED:\n            status = constants.StorageStatus.DEGRADED\n        elif operational_status and health_status in status_normal:\n            status = constants.StorageStatus.NORMAL\n        elif operational_status and health_status in status_offline:\n            status = constants.StorageStatus.OFFLINE\n        return status\n\n    @staticmethod\n    def analyse_vv_type(thin_enabled):\n        rs_type = constants.VolumeType.THICK\n        if thin_enabled == \"enabled\":\n            rs_type = constants.VolumeType.THIN\n        return rs_type\n\n    @staticmethod\n    def get_children_map(response):\n        child_map = {}\n        if response:\n            contexts = response.get(\"context\")\n            for context in contexts:\n                childrens = context.get(\"children\")\n                for children in childrens:\n                    name = children.get(\"name\")\n                    type = children.get(\"type\")\n                    child_map[name] = type\n        return child_map\n\n    @staticmethod\n    def get_resource_names(response):\n        resource_name_list = []\n        if response:\n            contexts = response.get('context')\n            for context in contexts:\n                childer_clusters = context.get(\"children\")\n                for childer_cluster in childer_clusters:\n                    cluster_name = childer_cluster.get(\"name\")\n                    resource_name_list.append(cluster_name)\n        return resource_name_list\n\n    @staticmethod\n    def handle_detail(detail_info, detail_map, split):\n        detail_arr = detail_info.split('\\n')\n        for detail in detail_arr:\n            if detail is not None and detail != '':\n                strinfo = detail.split(split, 1)\n                key = strinfo[0]\n                value = ''\n                if len(strinfo) > 1:\n                    value = strinfo[1]\n                detail_map[key] = value\n\n    def get_cluster_raw_capacity(self, cluster_name):\n        resposne_summary = self.rest_handler. \\\n            get_storage_volume_summary_resp(cluster_name)\n        try:\n            custom_data = resposne_summary.get(\"custom-data\")\n            find_capacity = re.findall(\n                r\"Capacity\\s+total\\s+(([0-9]*(\\.[0-9]{1,3}))|([0-9]+))\",\n                custom_data)\n            find_capacity_str = find_capacity[-1][0]\n            find_capacity_float = float(find_capacity_str)\n            capacity = int(find_capacity_float * units.Ti)\n        except Exception as e:\n            LOG.error(\"Storage raw capacity, cluster %s analyse error %s\" %\n                      cluster_name, six.text_type(e))\n            raise e\n        return capacity\n\n    def get_cluster_total_capacity(self, cluster_name):\n        resposne_summary = self.rest_handler.get_device_summary_resp(\n            cluster_name)\n        try:\n            custom_data = resposne_summary.get(\"custom-data\")\n            find_capacity = re.findall(\n                r'total.*?(([0-9]*(\\.[0-9]{1,3}))|([0-9]+))',\n                custom_data)\n            find_capacity_str = find_capacity[-1][0]\n            find_capacity_float = float(find_capacity_str)\n            capacity = int(find_capacity_float * units.Ti)\n        except Exception as e:\n            LOG.error(\"Storage total capacity, cluster %s analyse error %s\" %\n                      cluster_name, six.text_type(e))\n            raise e\n        return capacity\n\n    def get_cluster_used_capacity(self, cluster_name):\n        resposne_summary = self.rest_handler. \\\n            get_virtual_volume_summary_resp(cluster_name)\n        try:\n            custom_data = resposne_summary.get(\"custom-data\")\n            find_capacity = re.findall(\n                r\"capacity\\s+is\\s+(([0-9]*(\\.[0-9]{1,3}))|([0-9]+))\",\n                custom_data)\n            find_capacity_str = find_capacity[-1][0]\n            find_capacity_float = float(find_capacity_str)\n            capacity = int(find_capacity_float * units.Ti)\n        except Exception as e:\n            LOG.error(\"Storage used capacity, cluster %s analyse error %s\" %\n                      cluster_name, six.text_type(e))\n            raise e\n        return capacity\n\n    def list_controllers(self, context):\n        \"\"\"List all storage controllers from storage system.\"\"\"\n        ct_list = []\n        director_version_map = {}\n        version_resp = self.rest_handler.get_version_verbose()\n        all_director = self.rest_handler.get_engine_director_resp()\n        ct_context_list = VplexStorageDriver.get_context_list(all_director)\n        VplexStorageDriver.analyse_director_version(version_resp,\n                                                    director_version_map)\n        for ct_context in ct_context_list:\n            ct_attr_map = ct_context.get(\"attributes\")\n            communication_status = ct_attr_map.get('communication-status')\n            name = ct_attr_map.get('name')\n            ct = {\n                'native_controller_id': ct_attr_map.get('director-id'),\n                'name': name,\n                'status': VplexStorageDriver.analyse_director_status(\n                    communication_status),\n                'location': '',\n                'storage_id': self.storage_id,\n                'soft_version': self.get_value_from_nest_map(\n                    director_version_map, name, \"Director Software\"),\n                'cpu_info': '',\n                'memory_size': ''\n            }\n            ct_list.append(ct)\n        return ct_list\n\n    def list_ports(self, context):\n        \"\"\"List all ports from storage system.\"\"\"\n        port_list = []\n        hardware_port_map = {}\n        hardware_port_resp = self.rest_handler. \\\n            get_engine_director_hardware_port_resp()\n        export_port_resp = self.rest_handler.get_cluster_export_port_resp()\n        VplexStorageDriver.analyse_hardware_port(hardware_port_resp,\n                                                 hardware_port_map)\n        port_context_list = VplexStorageDriver. \\\n            get_context_list(export_port_resp)\n        for port_context in port_context_list:\n            port_attr = port_context.get('attributes')\n            port_name = port_attr.get('name')\n            export_status = port_attr.get('export-status')\n            speed, max_speed, protocols, role, port_status, \\\n                operational_status = self.get_hardware_port_info(\n                    hardware_port_map, port_name, 'attributes')\n            connection_status = VplexStorageDriver.analyse_port_connect_status(\n                export_status)\n            port = {\n                'native_port_id': port_attr.get('name'),\n                'name': port_attr.get('name'),\n                'type': VplexStorageDriver.analyse_port_type(protocols),\n                'logical_type': VplexStorageDriver.analyse_port_logical_type(\n                    role),\n                'connection_status': connection_status,\n                'health_status': VplexStorageDriver.analyse_port_health_status(\n                    operational_status),\n                'location': '',\n                'storage_id': self.storage_id,\n                'native_parent_id': port_attr.get('director-id'),\n                'speed': VplexStorageDriver.analyse_speed(speed),\n                'max_speed': VplexStorageDriver.analyse_speed(max_speed),\n                'wwn': port_attr.get('port-wwn'),\n                'mac_address': '',\n                'ipv4': '',\n                'ipv4_mask': '',\n                'ipv6': '',\n                'ipv6_mask': ''\n            }\n            port_list.append(port)\n        return port_list\n\n    @staticmethod\n    def get_context_list(response):\n        context_list = []\n        if response:\n            contexts = response.get(\"context\")\n            for context in contexts:\n                ct_type = context.get(\"type\")\n                parent = context.get(\"parent\")\n                attributes = context.get(\"attributes\")\n                context_map = {}\n                attr_map = {}\n                for attribute in attributes:\n                    key = attribute.get(\"name\")\n                    value = attribute.get(\"value\")\n                    attr_map[key] = value\n                context_map[\"type\"] = ct_type\n                context_map[\"parent\"] = parent\n                context_map[\"attributes\"] = attr_map\n                context_list.append(context_map)\n        return context_list\n\n    @staticmethod\n    def analyse_director_version(version_resp, director_version_map):\n        custom_data = version_resp.get('custom-data')\n        detail_arr = custom_data.split('\\n')\n        director_name = ''\n        version_name = ''\n        for detail in detail_arr:\n            if detail is not None and detail != '':\n                if \"For director\" in detail:\n                    match_obj = re.search(\n                        r'For director.+?directors/(.*?):', detail)\n                    if match_obj:\n                        director_name = match_obj.group(1)\n                    continue\n                if director_name:\n                    if \"What:\" in detail:\n                        match_obj = re.search(r'What:\\s+(.+?)$', detail)\n                        if match_obj:\n                            version_name = match_obj.group(1)\n                        continue\n                    if version_name:\n                        match_obj = re.search(r'Version:\\s+(.+?)$', detail)\n                        if match_obj:\n                            version_value = match_obj.group(1)\n                            if director_version_map.get(director_name):\n                                director_version_map.get(director_name)[\n                                    version_name] = version_value\n                            else:\n                                version_map = {}\n                                version_map[version_name] = version_value\n                                director_version_map[\n                                    director_name] = version_map\n\n    @staticmethod\n    def analyse_director_status(status):\n        return consts.CONTROLLER_STATUS_MAP. \\\n            get(status, constants.ControllerStatus.UNKNOWN)\n\n    def get_director_specified_version(self, version_map, director_name,\n                                       specified_name):\n        version_value = ''\n        if version_map:\n            director_map = version_map.get(director_name)\n            if director_map:\n                version_value = director_map.get(specified_name)\n        return version_value\n\n    def get_value_from_nest_map(self, nest_map, first_key, second_key):\n        final_value = ''\n        if nest_map:\n            second_map = nest_map.get(first_key)\n            if second_map:\n                final_value = second_map.get(second_key)\n        return final_value\n\n    def get_hardware_port_info(self, nest_map, first_key, second_key):\n        speed = ''\n        max_speed = ''\n        protocols = []\n        role = ''\n        port_status = ''\n        operational_status = ''\n        if nest_map:\n            second_map = nest_map.get(first_key)\n            if second_map:\n                third_map = second_map.get(second_key)\n                if third_map:\n                    speed = third_map.get('current-speed')\n                    max_speed = third_map.get('max-speed')\n                    protocols = third_map.get('protocols')\n                    role = third_map.get('role')\n                    port_status = third_map.get('port-status')\n                    operational_status = third_map.get('operational-status')\n        return (speed, max_speed, protocols, role, port_status,\n                operational_status)\n\n    @staticmethod\n    def analyse_hardware_port(resp, hardware_port_map):\n        port_list = VplexStorageDriver.get_context_list(resp)\n        if port_list:\n            for port in port_list:\n                port_attr = port.get(\"attributes\")\n                if port_attr:\n                    port_name = port_attr.get(\"target-port\")\n                    hardware_port_map[port_name] = port\n\n    @staticmethod\n    def analyse_port_type(protocols):\n        port_type = constants.PortType.OTHER\n        if protocols:\n            for protocol in protocols:\n                port_type_value = consts.PORT_TYPE_MAP.get(protocol)\n                if port_type_value:\n                    port_type = port_type_value\n                    break\n        return port_type\n\n    @staticmethod\n    def analyse_port_logical_type(role):\n        return consts.PORT_LOGICAL_TYPE_MAP. \\\n            get(role, constants.PortLogicalType.OTHER)\n\n    @staticmethod\n    def analyse_port_connect_status(status):\n        return consts.PORT_CONNECT_STATUS_MAP. \\\n            get(status, constants.PortConnectionStatus.UNKNOWN)\n\n    @staticmethod\n    def analyse_port_health_status(status):\n        return consts.PORT_HEALTH_STATUS_MAP. \\\n            get(status, constants.PortHealthStatus.UNKNOWN)\n\n    @staticmethod\n    def analyse_speed(speed_value):\n        speed = None\n        if speed_value:\n            match_obj = re.search(r'([1-9]\\d*\\.?\\d*)|(0\\.\\d*[1-9])',\n                                  speed_value)\n            if match_obj:\n                speed = int(match_obj.group(0))\n                if 'Gbit' in speed_value:\n                    speed = speed * units.G\n                elif 'Mbit' in speed_value:\n                    speed = speed * units.M\n                elif 'Kbit' in speed_value:\n                    speed = speed * units.k\n        return speed\n\n    def list_masking_views(self, content):\n        try:\n            view_list = []\n            view_response = self.rest_handler.get_storage_views()\n            storage_view_list = self.get_attributes_from_response(\n                view_response)\n            if storage_view_list:\n                host_list = self.list_storage_hosts(content)\n                host_map = {}\n                for host_value in host_list:\n                    host_map[host_value.get('name')] = \\\n                        host_value.get('native_storage_host_id')\n                for storage_view in storage_view_list:\n                    virtual_volumes = storage_view.get('virtual-volumes')\n                    initiators_list = storage_view.get('initiators')\n                    view_name = storage_view.get('name')\n                    if initiators_list:\n                        for initiator_info in initiators_list:\n                            native_masking_view_id = initiator_info\n                            native_storage_host_id = host_map.get(\n                                initiator_info)\n                            if virtual_volumes:\n                                for virtual_volume in virtual_volumes:\n                                    volume_value = virtual_volume.split(\n                                        ',')\n                                    native_volume_id = volume_value[2]\n                                    volume_id = native_volume_id.replace(\n                                        ':', '')\n                                    view_map = {\n                                        \"name\": view_name,\n                                        \"description\": view_name,\n                                        \"storage_id\": self.storage_id,\n                                        \"native_masking_view_id\":\n                                            native_masking_view_id + volume_id,\n                                        \"native_port_group_id\":\n                                            \"port_group_\" + initiator_info,\n                                        \"native_volume_id\":\n                                            native_volume_id,\n                                        \"native_storage_host_id\":\n                                            native_storage_host_id\n                                    }\n                                    view_list.append(view_map)\n            return view_list\n        except Exception:\n            LOG.error(\"Failed to get view  from vplex\")\n            raise\n\n    def list_storage_host_initiators(self, content):\n        try:\n            initiators_list = []\n            initiators_response = self.rest_handler.get_initiators_resp()\n            initiators_info_list = self.get_attributes_from_response(\n                initiators_response)\n            for initiators_map in initiators_info_list:\n                initiators_type = initiators_map.get('port_type')\n                initiators_type_arr = initiators_type.split('-')\n                initiators_type_index = initiators_type_arr[0]\n                description = consts.INITIATOR_DESCRIPTION.get(\n                    initiators_type_index,\n                    constants.InitiatorType.UNKNOWN)\n                initiator_item = {\n                    \"name\": initiators_map.get('name'),\n                    \"type\": description,\n                    \"storage_id\": self.storage_id,\n                    \"native_storage_host_initiator_id\":\n                        initiators_map.get('port-wwn'),\n                    \"wwn\": initiators_map.get('port-wwn'),\n                    \"alias\": initiators_map.get('port-wwn'),\n                    \"status\": constants.InitiatorStatus.ONLINE,\n                    \"native_storage_host_id\": initiators_map.get('port-wwn')\n                }\n                initiators_list.append(initiator_item)\n            return initiators_list\n        except Exception:\n            LOG.error(\"Failed to get host_initiators from vplex\")\n            raise\n\n    def list_storage_hosts(self, content):\n        try:\n            hosts_list = []\n            host_response = self.rest_handler.get_initiators_resp()\n            hosts_info_list = self.get_attributes_from_response(host_response)\n            for host_info in hosts_info_list:\n                os_type = host_info.get('type')\n                host_dict = {\n                    \"name\": host_info.get('name'),\n                    \"storage_id\": self.storage_id,\n                    \"os_type\": consts.HOST_TYPE_MAP.get(\n                        os_type, constants.HostOSTypes.UNKNOWN),\n                    \"native_storage_host_id\": host_info.get('port-wwn'),\n                    \"status\": constants.HostStatus.NORMAL\n                }\n                hosts_list.append(host_dict)\n            return hosts_list\n        except Exception:\n            LOG.error(\"Failed to get storage_host from vplex\")\n            raise\n\n    def list_port_groups(self, context):\n        try:\n            port_groups_list = []\n            port_group_relation_list = []\n            port_group_response = self.rest_handler.get_storage_views()\n            storage_view_list = self.get_attributes_from_response(\n                port_group_response)\n            for storage_view in storage_view_list:\n                ports = storage_view.get('ports')\n                initiators_info_list = storage_view.get('initiators')\n                if initiators_info_list:\n                    for initiator_info in initiators_info_list:\n                        port_group_map = {\n                            \"name\": \"port_group_\" + initiator_info,\n                            \"description\": \"port_group_\" + initiator_info,\n                            \"storage_id\": self.storage_id,\n                            \"native_port_group_id\": \"port_group_\"\n                                                    + initiator_info,\n                            \"ports\": ports\n                        }\n                        if ports:\n                            for port in ports:\n                                port_group_relation = {\n                                    'storage_id': self.storage_id,\n                                    'native_port_group_id': \"port_group_\"\n                                                            + initiator_info,\n                                    'native_port_id': port\n                                }\n                                port_group_relation_list.append(\n                                    port_group_relation)\n                        port_groups_list.append(port_group_map)\n            port_groups_result = {\n                'port_groups': port_groups_list,\n                'port_grp_port_rels': port_group_relation_list\n            }\n            return port_groups_result\n        except Exception:\n            LOG.error(\"Failed to get port_groups from vplex\")\n            raise\n\n    @staticmethod\n    def get_attributes_from_response(response):\n        attributes_list = []\n        if response:\n            contexts = response.get(\"context\")\n            for context in contexts:\n                child_map = {}\n                attributes = context.get(\"attributes\")\n                context_type = context.get(\"type\")\n                child_map['port_type'] = context_type\n                for children in attributes:\n                    name = children.get(\"name\")\n                    value = children.get(\"value\")\n                    child_map[name] = value\n                attributes_list.append(child_map)\n        return attributes_list\n\n\n@staticmethod\ndef handle_detail_list(detail_info, detail_map, split):\n    detail_arr = detail_info.split('\\n')\n    for detail in detail_arr:\n        if detail is not None and detail != '':\n            strinfo = detail.split(split, 1)\n            key = strinfo[0]\n            value = ''\n            if len(strinfo) > 1:\n                value = strinfo[1]\n            detail_map[key] = value\n"
  },
  {
    "path": "delfin/drivers/driver.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\nimport abc\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass StorageDriver(object):\n\n    def __init__(self, **kwargs):\n        \"\"\"\n        :param kwargs:  A dictionary, include access information. Pay\n            attention that it's not safe to save username and password\n            in memory, so suggest each driver use them to get session\n            instead of save them in memory directly.\n        \"\"\"\n        self.storage_id = kwargs.get('storage_id', None)\n\n    def delete_storage(self, context):\n        \"\"\"Cleanup storage device information from driver\"\"\"\n        pass\n\n    def add_storage(self, kwargs):\n        \"\"\"Add storage device information to driver\"\"\"\n        pass\n\n    @abc.abstractmethod\n    def reset_connection(self, context, **kwargs):\n        \"\"\" Reset connection with backend with new args \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def get_storage(self, context):\n        \"\"\"Get storage device information from storage system\"\"\"\n        pass\n\n    @abc.abstractmethod\n    def list_storage_pools(self, context):\n        \"\"\"List all storage pools from storage system.\"\"\"\n        pass\n\n    @abc.abstractmethod\n    def list_volumes(self, context):\n        \"\"\"List all storage volumes from storage system.\"\"\"\n        pass\n\n    def list_controllers(self, context):\n        \"\"\"List all storage controllers from storage system.\"\"\"\n        raise NotImplementedError(\n            \"Driver API list_controllers() is not Implemented\")\n\n    def list_ports(self, context):\n        \"\"\"List all ports from storage system.\"\"\"\n        raise NotImplementedError(\n            \"Driver API list_ports() is not Implemented\")\n\n    def list_disks(self, context):\n        \"\"\"List all disks from storage system.\"\"\"\n        raise NotImplementedError(\n            \"Driver API list_disks() is not Implemented\")\n\n    @abc.abstractmethod\n    def add_trap_config(self, context, trap_config):\n        \"\"\"Config the trap receiver in storage system.\"\"\"\n        pass\n\n    @abc.abstractmethod\n    def remove_trap_config(self, context, trap_config):\n        \"\"\"Remove trap receiver configuration from storage system.\"\"\"\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        \"\"\"Parse alert data got from snmp trap server.\"\"\"\n\n        \"\"\"\n        Alert Model\tDescription\n        *****Filled from driver side ***********************\n        alert_id\tUnique identification for a given alert type\n        alert_name\tUnique name for a given alert type\n        severity\tSeverity of the alert\n        category\tCategory of alert generated\n        type\tType of the alert generated\n        sequence_number\tSequence number for the alert, uniquely identifies a\n                                  given alert instance used for\n                                  clearing the alert\n        occur_time\tTime at which alert is generated from device in epoch\n                    format\n        description\tPossible cause description or other details about\n                                the alert\n        recovery_advice\tSome suggestion for handling the given alert\n        resource_type\tResource type of device/source generating alert\n        location\tDetailed info about the tracing the alerting device such as\n                    slot, rack, component, parts etc\n        *****************************************************\n        \"\"\"\n\n        pass\n\n    @abc.abstractmethod\n    def list_alerts(self, context, query_para=None):\n        \"\"\"List all current alerts from storage system.\"\"\"\n        \"\"\"\n        query_para is an optional para which contains 'begin_time' and\n        'end_time' (in milliseconds) which is to be used to filter\n        alerts at driver\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def clear_alert(self, context, sequence_number):\n        \"\"\"Clear alert from storage system.\"\"\"\n        pass\n\n    def collect_perf_metrics(self, context, storage_id,\n                             resource_metrics, start_time, end_time):\n        \"\"\"Collect performance metrics from storage system.\"\"\"\n\n        \"\"\"\n        Input:\n        context: context information\n        storage_id: storage identifier\n        resource_metrics: dictionary represents the collection configuration\n        Example:\n        resource_metrics =\n              {'storagePool':\n                        ['readThroughput', 'writeThroughput', 'responseTime'],\n                'volume':\n                        ['readThroughput', 'writeThroughput']}\n        start_time\tTime from which the performance metric to be collected\n                    It is in epoch format in milliseconds\n        end_time\tTime until which the performance metric to be collected\n                    It is in epoch format in milliseconds\n\n        Response: List of metric with details\n                Format : [[Metric(name=metric_1,\n                             labels={'key_1': value_1,\n                                     'key_2': value_2,},\n                             values={timestamp_0: value_0,\n                                     timestamp_n: value_n,})]\n        Example:\n        [[Metric(name='responseTime',\n                     labels={'storage_id': '1f8d6982-2ac2-4fa9-95ef-78f359de',\n                             'resource_type': 'storagePool'},\n                     values={1616560337249: 96.12081735538251}),\n          Metric(name='throughput',\n                     labels={'storage_id': '1f8d6982-2ac2-4fa9-95ef-78f359de',\n                             'resource_type': 'storagePool'},\n                     values={1616560337249: 90.08194398331271})]\n        \"\"\"\n        pass\n\n    def list_quotas(self, context):\n        \"\"\"List all quotas from storage system.\"\"\"\n        raise NotImplementedError(\n            \"Driver API list_quotas() is not Implemented\")\n\n    def list_filesystems(self, context):\n        \"\"\"List all filesystems from storage system.\"\"\"\n        raise NotImplementedError(\n            \"Driver API list_filesystems() is not Implemented\")\n\n    def list_qtrees(self, context):\n        \"\"\"List all qtrees from storage system.\"\"\"\n        raise NotImplementedError(\n            \"Driver API list_qtrees() is not Implemented\")\n\n    def list_shares(self, context):\n        \"\"\"List all shares from storage system.\"\"\"\n        raise NotImplementedError(\n            \"Driver API list_shares() is not Implemented\")\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        \"\"\"Get capability of driver:\n        is_historic (bool): required\n        performance_metric_retention_window (int): optional, default is None\n        collect_interval (int): optional, default is\n            TelemetryCollection.DEF_PERFORMANCE_COLLECTION_INTERVAL\n            in common/constants.py\n        failed_job_collect_interval (int): optional, default is\n            TelemetryCollection.FAILED_JOB_SCHEDULE_INTERVAL\n            in common/constants.py\n        resource_metrics (dict): required, please refer to\n            STORAGE_CAPABILITIES_SCHEMA\n            in api/schemas/storage_capabilities_schema.py.\n\n        For example:\n        {\n            'is_historic': True,\n            'performance_metric_retention_window': 4500,\n            'collect_interval': 900\n            'failed_job_collect_interval': 900,\n            'resource_metrics': {\n                'storage': {\n                    'iops': {\n                        'unit': 'IOPS',\n                        'description': 'Read/write operations per second'\n                    },\n                    ...\n                },\n                ...\n            }\n        }\n        \"\"\"\n        pass\n\n    def list_storage_host_initiators(self, context):\n        \"\"\"List all storage initiators from storage system.\"\"\"\n        \"\"\"\n        *********Model description**********\n        native_storage_host_initiator_id: Native id at backend side(mandatory)\n        native_storage_host_id: Native id of host at backend side if associated\n        name: Name of the initiator\n        description: Description of the initiator\n        alias: Alias of the initiator\n        type: initiator type (fc, iscsi, nvme_over_roce)\n        status: Health status(normal, offline, abnormal, unknown)\n        wwn: Worldwide name\n        storage_id: Storage id at delfin side\n        \"\"\"\n        raise NotImplementedError(\n            \"Driver API list_storage_host_initiators() is not Implemented\")\n\n    def list_storage_hosts(self, context):\n        \"\"\"List all storage hosts from storage system.\"\"\"\n        \"\"\"\n        *********Model description**********\n        native_storage_host_id: Native id of host at backend side(mandatory)\n        name: Name of the host\n        description: Description of the host\n        os_type: operating system type\n        status: Health status(normal, offline, abnormal, unknown)\n        ip_address: Ip address of the host\n        storage_id: Storage id at delfin side\n        \"\"\"\n        raise NotImplementedError(\n            \"Driver API list_storage_hosts() is not Implemented\")\n\n    def list_storage_host_groups(self, context):\n        \"\"\"\n        Returns a dict with following\n        'storage_host_groups': <List storage host groups from storage system>,\n        'storage_host_grp_host_rels': <List host groups to host relation>,\n        \"\"\"\n        \"\"\"\n        ********* storage_host_groups Model description**********\n        native_storage_host_group_id: Native id of host grp at backend side\n                                      (mandatory)\n        name: Name of the host grp\n        description: Description of the host grp\n        storage_hosts: List of associated hosts if any(, separated list)\n        storage_id: Storage id at delfin side\n        \"\"\"\n        raise NotImplementedError(\n            \"Driver API list_storage_host_groups() is not Implemented\")\n\n    def list_port_groups(self, context):\n        \"\"\"\n        Returns a dict with following\n        'port_groups': <List port groups from storage system>,\n        'port_grp_port_rels': <List port groups to port relation>,\n        \"\"\"\n        \"\"\"\n        ********* port_groups Model description**********\n        native_port_group_id: Native id of port grp at backend side (mandatory)\n        name: Name of the port grp\n        description: Description of the port grp\n        ports: List of associated ports if any(, separated list)\n        storage_id: Storage id at delfin side\n        \"\"\"\n        raise NotImplementedError(\n            \"Driver API list_port_groups() is not Implemented\")\n\n    def list_volume_groups(self, context):\n        \"\"\"\n        Returns a dict with following\n        'volume_groups': <List volume groups from storage system>,\n        'vol_grp_vol_rels': <List volume groups to port relation>,\n        \"\"\"\n        \"\"\"\n        ********* volume_groups Model description**********\n        native_volume_group_id: Native id of volume grp at backend side\n                                (mandatory)\n        name: Name of the volume grp\n        description: Description of the volume grp\n        volumes: List of associated volumes if any(, separated list)\n        storage_id: Storage id at delfin side\n        \"\"\"\n        raise NotImplementedError(\n            \"Driver API list_volume_groups() is not Implemented\")\n\n    def list_masking_views(self, context):\n        \"\"\"List all masking views from storage system.\"\"\"\n        \"\"\"\n        *********Model description**********\n        native_masking_view_id: Native id of volume grp at backend side\n                                (mandatory)\n        name: Name of the masking view\n        description: Description of the masking view\n        native_storage_host_group_id: Native id of host grp at backend side\n        native_port_group_id: Native id of port grp at backend side\n        native_volume_group_id: Native id of volume grp at backend side\n        native_storage_host_id: Native id of host at backend side\n        native_volume_id: Native id of volume at backend side\n        storage_id: Storage id at delfin side\n\n        Masking view filling guidelines:\n        Driver can have different backend scenarios such as\n         - Direct host -> direct volume mapping\n         - Direct host -> direct volume -> direct port mapping\n         - Direct host -> volume group mapping\n         - Host grp -> volume group mapping\n         - Host grp -> direct volume(s) mapping\n         So driver need to fill in group to item order based on availability\n         as given below\n        From host side: Mandatorily one of the (native_storage_host_group_id\n                        | native_storage_host_id)\n        From volume side: Mandatorily one of the (native_volume_group_id\n                                                 | native_volume_id)\n        From port side: Optionally (native_port_group_id)\n        \"\"\"\n        raise NotImplementedError(\n            \"Driver API list_masking_views() is not Implemented\")\n\n    def get_alert_sources(self, context):\n        return []\n\n    def get_latest_perf_timestamp(self, context):\n        \"\"\"Get the timestamp of the latest performance data of the device\"\"\"\n        pass\n"
  },
  {
    "path": "delfin/drivers/fake_storage/__init__.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport random\nimport decorator\n\nimport math\nimport time\n\nimport six\nfrom eventlet import greenthread\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_utils import uuidutils\n\nfrom delfin import exception, db\nfrom delfin.common import constants\nfrom delfin.common.constants import ResourceType, StorageMetric, \\\n    StoragePoolMetric, VolumeMetric, ControllerMetric, PortMetric, \\\n    DiskMetric, FileSystemMetric\nfrom delfin.drivers import driver\n\nCONF = cfg.CONF\n\nfake_opts = [\n    cfg.StrOpt('fake_pool_range',\n               default='1-100',\n               help='The range of pool number for one device.'),\n    cfg.StrOpt('fake_volume_range',\n               default='1-2000',\n               help='The range of volume number for one device.'),\n    cfg.StrOpt('fake_api_time_range',\n               default='0.1-0.5',\n               help='The range of time cost for each API.'),\n    cfg.StrOpt('fake_page_query_limit',\n               default='500',\n               help='The limitation of volumes for each query.'),\n]\n\nCONF.register_opts(fake_opts, \"fake_driver\")\n\nLOG = log.getLogger(__name__)\n\nMIN_WAIT, MAX_WAIT = 0.1, 0.5\nMIN_POOL, MAX_POOL = 1, 100\nMIN_PORTS, MAX_PORTS = 1, 10\nMIN_DISK, MAX_DISK = 1, 100\nMIN_VOLUME, MAX_VOLUME = 1, 2000\nMIN_CONTROLLERS, MAX_CONTROLLERS = 1, 5\nPAGE_LIMIT = 500\nMIN_STORAGE, MAX_STORAGE = 1, 10\nMIN_QUOTA, MAX_QUOTA = 1, 100\nMIN_FS, MAX_FS = 1, 10\nMIN_QTREE, MAX_QTREE = 1, 100\nMIN_SHARE, MAX_SHARE = 1, 100\n# Minimum sampling interval\nMINIMUM_SAMPLE_DURATION_IN_MS = 60 * 1000\n# count of instances for each resource type\nRESOURCE_COUNT_DICT = {\n    \"storage\": 1,\n    \"storagePool\": MAX_POOL,\n    \"volume\": MAX_VOLUME,\n    \"port\": MAX_PORTS,\n    \"controller\": MAX_CONTROLLERS,\n    \"disk\": MAX_DISK,\n    \"filesystem\": MAX_FS,\n}\n\n# Min and max are currently set to 1 to make sure at least one relation can be\n# built in fake driver for host mapping elements\nMIN_STORAGE_HOST_INITIATORS, MAX_STORAGE_HOST_INITIATORS = 1, 3\nMIN_STORAGE_HOSTS, MAX_STORAGE_HOSTS = 1, 5\nMIN_STORAGE_HOST_GROUPS, MAX_STORAGE_HOST_GROUPS = 1, 5\nMIN_VOLUME_GROUPS, MAX_VOLUME_GROUPS = 1, 5\nMIN_PORT_GROUPS, MAX_PORT_GROUPS = 1, 5\nMAX_GROUP_RESOURCES_SIZE = 5\nMIN_MASKING_VIEWS, MAX_MASKING_VIEWS = 1, 5\nNON_GROUP_BASED_MASKING, GROUP_BASED_MASKING = 0, 1\n\n\ndef get_range_val(range_str, t):\n    try:\n        rng = range_str.split('-')\n        if len(rng) != 2:\n            raise exception.InvalidInput\n        min_val = t(rng[0])\n        max_val = t(rng[1])\n        return min_val, max_val\n    except Exception:\n        LOG.error(\"Invalid range: {0}\".format(range_str))\n        raise exception.InvalidInput\n\n\ndef wait_random(low, high):\n    @decorator.decorator\n    def _wait(f, *a, **k):\n        rd = random.randint(0, 100)\n        secs = low + (high - low) * rd / 100\n        greenthread.sleep(secs)\n        return f(*a, **k)\n\n    return _wait\n\n\nclass FakeStorageDriver(driver.StorageDriver):\n    \"\"\"FakeStorageDriver shows how to implement the StorageDriver,\n    it also plays a role as faker to fake data for being tested by clients.\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        global MIN_WAIT, MAX_WAIT, MIN_POOL, MAX_POOL, MIN_VOLUME, MAX_VOLUME\n        global PAGE_LIMIT\n        MIN_WAIT, MAX_WAIT = get_range_val(\n            CONF.fake_driver.fake_api_time_range, float)\n        MIN_POOL, MAX_POOL = get_range_val(\n            CONF.fake_driver.fake_pool_range, int)\n        MIN_VOLUME, MAX_VOLUME = get_range_val(\n            CONF.fake_driver.fake_volume_range, int)\n        PAGE_LIMIT = int(CONF.fake_driver.fake_page_query_limit)\n        self.rd_volumes_count = random.randint(MIN_VOLUME, MAX_VOLUME)\n        self.rd_ports_count = random.randint(MIN_PORTS, MAX_PORTS)\n        self.rd_storage_hosts_count = random.randint(MIN_STORAGE_HOSTS,\n                                                     MAX_STORAGE_HOSTS)\n\n    def _get_random_capacity(self):\n        total = random.randint(1000, 2000)\n        used = int(random.randint(0, 100) * total / 100)\n        free = total - used\n        return total, used, free\n\n    def reset_connection(self, context, **kwargs):\n        pass\n\n    @wait_random(MIN_WAIT, MAX_WAIT)\n    def get_storage(self, context):\n        # Do something here\n\n        sn = six.text_type(uuidutils.generate_uuid())\n        try:\n            # use existing sn if already registered storage\n            storage = db.storage_get(context, self.storage_id)\n            if storage:\n                sn = storage['serial_number']\n        except exception.StorageNotFound:\n            LOG.debug('Registering new storage')\n        except Exception:\n            LOG.info('Error while retrieving storage from DB')\n        total, used, free = self._get_random_capacity()\n        raw = random.randint(2000, 3000)\n        subscribed = random.randint(3000, 4000)\n        return {\n            'name': 'fake_driver',\n            'description': 'fake driver.',\n            'vendor': 'fake_vendor',\n            'model': 'fake_model',\n            'status': 'normal',\n            'serial_number': sn,\n            'firmware_version': '1.0.0',\n            'location': 'HK',\n            'total_capacity': total,\n            'used_capacity': used,\n            'free_capacity': free,\n            'raw_capacity': raw,\n            'subscribed_capacity': subscribed\n        }\n\n    @wait_random(MIN_WAIT, MAX_WAIT)\n    def list_storage_pools(self, ctx):\n        rd_pools_count = random.randint(MIN_POOL, MAX_POOL)\n        LOG.info(\"###########fake_pools number for %s: %d\" % (self.storage_id,\n                                                              rd_pools_count))\n        pool_list = []\n        for idx in range(rd_pools_count):\n            total, used, free = self._get_random_capacity()\n            p = {\n                \"name\": \"storagePool_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_storage_pool_id\": \"storagePool_\" + str(idx),\n                \"description\": \"Fake Pool\",\n                \"status\": \"normal\",\n                \"total_capacity\": total,\n                \"used_capacity\": used,\n                \"free_capacity\": free,\n            }\n            pool_list.append(p)\n        return pool_list\n\n    def list_volumes(self, ctx):\n        # Get a random number as the volume count.\n        rd_volumes_count = self.rd_volumes_count\n        LOG.info(\"###########fake_volumes number for %s: %d\" % (\n            self.storage_id, rd_volumes_count))\n        loops = math.ceil(rd_volumes_count / PAGE_LIMIT)\n        volume_list = []\n        for idx in range(loops):\n            start = idx * PAGE_LIMIT\n            end = (idx + 1) * PAGE_LIMIT\n            if idx == (loops - 1):\n                end = rd_volumes_count\n            vs = self._get_volume_range(start, end)\n            volume_list = volume_list + vs\n        return volume_list\n\n    def list_controllers(self, ctx):\n        rd_controllers_count = random.randint(MIN_CONTROLLERS, MAX_CONTROLLERS)\n        LOG.info(\"###########fake_controllers for %s: %d\" %\n                 (self.storage_id, rd_controllers_count))\n        ctrl_list = []\n        for idx in range(rd_controllers_count):\n            total, used, free = self._get_random_capacity()\n            cpu = [\"Intel Xenon\", \"Intel Core ix\", \"ARM\"]\n            sts = list(constants.ControllerStatus.ALL)\n            sts_len = len(constants.ControllerStatus.ALL) - 1\n            c = {\n                \"name\": \"controller_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_controller_id\": \"controller_\" + str(idx),\n                \"location\": \"loc_\" + str(random.randint(0, 99)),\n                \"status\": sts[random.randint(0, sts_len)],\n                \"memory_size\": total,\n                \"cpu_info\": cpu[random.randint(0, 2)],\n                \"soft_version\": \"ver_\" + str(random.randint(0, 999)),\n            }\n            ctrl_list.append(c)\n        return ctrl_list\n\n    def list_ports(self, ctx):\n        rd_ports_count = self.rd_ports_count\n        LOG.info(\"###########fake_ports for %s: %d\" % (self.storage_id,\n                                                       rd_ports_count))\n        port_list = []\n        for idx in range(rd_ports_count):\n            max_s, normal, remain = self._get_random_capacity()\n            conn_sts = list(constants.PortConnectionStatus.ALL)\n            conn_sts_len = len(constants.PortConnectionStatus.ALL) - 1\n            health_sts = list(constants.PortHealthStatus.ALL)\n            health_sts_len = len(constants.PortHealthStatus.ALL) - 1\n            port_type = list(constants.PortType.ALL)\n            port_type_len = len(constants.PortType.ALL) - 1\n            logic_type = list(constants.PortLogicalType.ALL)\n            logic_type_len = len(constants.PortLogicalType.ALL) - 1\n            c = {\n                \"name\": \"port_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_port_id\": \"port_\" + str(idx),\n                \"location\": \"location_\" + str(random.randint(0, 99)),\n                \"connection_status\": conn_sts[\n                    random.randint(0, conn_sts_len)],\n                \"health_status\": health_sts[\n                    random.randint(0, health_sts_len)],\n                \"type\": port_type[\n                    random.randint(0, port_type_len)],\n                \"logical_type\": logic_type[\n                    random.randint(0, logic_type_len)],\n                \"speed\": normal,\n                \"max_speed\": max_s,\n                \"native_parent_id\": \"parent_id_\" + str(random.randint(0, 99)),\n                \"wwn\": \"wwn_\" + str(random.randint(0, 9999)),\n                \"mac_address\": \"mac_\" + str(random.randint(0, 9999)),\n                \"ipv4\": \"0.0.0.0\",\n                \"ipv4_mask\": \"255.255.255.0\",\n                \"ipv6\": \"0\",\n                \"ipv6_mask\": \"::\",\n            }\n            port_list.append(c)\n        return port_list\n\n    def list_disks(self, ctx):\n        rd_disks_count = random.randint(MIN_DISK, MAX_DISK)\n        LOG.info(\"###########fake_disks for %s: %d\" % (self.storage_id,\n                                                       rd_disks_count))\n        disk_list = []\n        for idx in range(rd_disks_count):\n            max_s, normal, remain = self._get_random_capacity()\n            manufacturer = [\"Intel\", \"Seagate\", \"WD\", \"Crucial\", \"HP\"]\n            sts = list(constants.DiskStatus.ALL)\n            sts_len = len(constants.DiskStatus.ALL) - 1\n            physical_type = list(constants.DiskPhysicalType.ALL)\n            physical_type_len = len(constants.DiskPhysicalType.ALL) - 1\n            logic_type = list(constants.DiskLogicalType.ALL)\n            logic_type_len = len(constants.DiskLogicalType.ALL) - 1\n            c = {\n                \"name\": \"disk_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_disk_id\": \"disk_\" + str(idx),\n                \"serial_number\": \"serial_\" + str(random.randint(0, 9999)),\n                \"manufacturer\": manufacturer[random.randint(0, 4)],\n                \"model\": \"model_\" + str(random.randint(0, 9999)),\n                \"firmware\": \"firmware_\" + str(random.randint(0, 9999)),\n                \"speed\": normal,\n                \"capacity\": max_s,\n                \"status\": sts[random.randint(0, sts_len)],\n                \"physical_type\": physical_type[\n                    random.randint(0, physical_type_len)],\n                \"logical_type\": logic_type[random.randint(0, logic_type_len)],\n                \"health_score\": random.randint(0, 100),\n                \"native_diskgroup_id\": \"dg_id_\" + str(random.randint(0, 99)),\n                \"location\": \"location_\" + str(random.randint(0, 99)),\n            }\n            disk_list.append(c)\n        return disk_list\n\n    def list_quotas(self, ctx):\n        rd_quotas_count = random.randint(MIN_QUOTA, MAX_QUOTA)\n        LOG.info(\"###########fake_quotas for %s: %d\"\n                 % (self.storage_id, rd_quotas_count))\n        quota_list = []\n        for idx in range(rd_quotas_count):\n            qtype = list(constants.QuotaType.ALL)\n            qtype_len = len(constants.QuotaType.ALL) - 1\n            max_cap = random.randint(1111, 9999)\n            fslimit = random.randint(max_cap * 7, max_cap * 8)\n            fhlimit = random.randint(max_cap * 8, max_cap * 9)\n            slimit = random.randint(max_cap * 7000, max_cap * 8000)\n            hlimit = random.randint(max_cap * 8000, max_cap * 9000)\n            user_group = ['usr_', 'grp_']\n            q = {\n                \"native_quota_id\": \"quota_\" + str(idx),\n                \"type\": qtype[random.randint(0, qtype_len)],\n                \"storage_id\": self.storage_id,\n                \"native_filesystem_id\": \"quota_\"\n                                        + str(random.randint(0, 99)),\n                \"native_qtree_id\": \"qtree_\"\n                                   + str(random.randint(0, 99)),\n                \"capacity_hard_limit\": hlimit,\n                \"capacity_soft_limit\": slimit,\n                \"file_hard_limit\": fhlimit,\n                \"file_soft_limit\": fslimit,\n                \"file_count\": random.randint(0, max_cap * 10),\n                \"used_capacity\": random.randint(0, max_cap * 10000),\n                \"user_group_name\": user_group[random.randint(0, 1)]\n                                   + str(random.randint(0, 99)),\n            }\n            quota_list.append(q)\n        return quota_list\n\n    def list_filesystems(self, ctx):\n        rd_filesystems_count = random.randint(MIN_FS, MAX_FS)\n        LOG.info(\"###########fake_filesystems for %s: %d\"\n                 % (self.storage_id, rd_filesystems_count))\n        filesystem_list = []\n        for idx in range(rd_filesystems_count):\n            total, used, free = self._get_random_capacity()\n            boolean = [True, False]\n            sts = list(constants.FilesystemStatus.ALL)\n            sts_len = len(constants.FilesystemStatus.ALL) - 1\n            worm = list(constants.WORMType.ALL)\n            worm_len = len(constants.WORMType.ALL) - 1\n            alloc_type = list(constants.VolumeType.ALL)\n            alloc_type_len = len(constants.VolumeType.ALL) - 1\n            security = list(constants.NASSecurityMode.ALL)\n            security_len = len(constants.NASSecurityMode.ALL) - 1\n            f = {\n                \"name\": \"filesystem_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_filesystem_id\": \"filesystem_\" + str(idx),\n                \"native_pool_id\": \"storagePool_\" + str(idx),\n                \"status\": sts[random.randint(0, sts_len)],\n                \"type\": alloc_type[random.randint(0, alloc_type_len)],\n                \"security_mode\": security[random.randint(0, security_len)],\n                \"total_capacity\": total,\n                \"used_capacity\": used,\n                \"free_capacity\": free,\n                \"worm\": worm[random.randint(0, worm_len)],\n                \"deduplicated\": boolean[random.randint(0, 1)],\n                \"compressed\": boolean[random.randint(0, 1)],\n            }\n            filesystem_list.append(f)\n        return filesystem_list\n\n    def list_qtrees(self, ctx):\n        rd_qtrees_count = random.randint(MIN_QTREE, MAX_QTREE)\n        LOG.info(\"###########fake_qtrees for %s: %d\"\n                 % (self.storage_id, rd_qtrees_count))\n        qtree_list = []\n        for idx in range(rd_qtrees_count):\n            security = list(constants.NASSecurityMode.ALL)\n            security_len = len(constants.NASSecurityMode.ALL) - 1\n\n            t = {\n                \"name\": \"qtree_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_qtree_id\": \"qtree_\" + str(idx),\n                \"native_filesystem_id\": \"filesystem_\"\n                                        + str(random.randint(0, 99)),\n                \"security_mode\": security[random.randint(0, security_len)],\n                \"path\": \"/path/qtree_\" + str(random.randint(0, 99)),\n            }\n            qtree_list.append(t)\n\n        return qtree_list\n\n    def list_shares(self, ctx):\n        rd_shares_count = random.randint(MIN_SHARE, MAX_SHARE)\n        LOG.info(\"###########fake_shares for %s: %d\"\n                 % (self.storage_id, rd_shares_count))\n        share_list = []\n        for idx in range(rd_shares_count):\n            pro = list(constants.ShareProtocol.ALL)\n            pro_len = len(constants.ShareProtocol.ALL) - 1\n            c = {\n                \"name\": \"share_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_share_id\": \"share_\" + str(idx),\n                \"native_filesystem_id\": \"filesystem_\"\n                                        + str(random.randint(0, 99)),\n                \"native_qtree_id\": \"qtree_\"\n                                   + str(random.randint(0, 99)),\n                \"protocol\": pro[random.randint(0, pro_len)],\n                \"path\": \"/path/share_\" + str(random.randint(0, 99)),\n            }\n            share_list.append(c)\n        return share_list\n\n    def add_trap_config(self, context, trap_config):\n        pass  # Fakedriver do not require to add trap config\n\n    def remove_trap_config(self, context, trap_config):\n        pass  # Fakedriver do not require to remove trap config\n\n    @staticmethod\n    def parse_alert(context, alert):\n        pass  # Fakedriver do not require to parse alert\n\n    def clear_alert(self, context, alert):\n        pass  # Fakedriver do not require to clear alert\n\n    def list_alerts(self, context, query_para=None):\n        alert_list = [{\n            \"storage_id\": self.storage_id,\n            'alert_id': str(random.randint(1111111, 9999999)),\n            'sequence_number': 100,\n            'alert_name': 'SNMP connect failed',\n            'category': 'Fault',\n            'severity': 'Major',\n            'type': 'OperationalViolation',\n            'location': 'NetworkEntity=entity1',\n            'description': \"SNMP connection to the storage failed.\",\n            'recovery_advice': \"Check snmp configurations.\",\n            'occur_time': int(time.time())\n        }, {\n            \"storage_id\": self.storage_id,\n            'alert_id': str(random.randint(1111111, 9999999)),\n            'sequence_number': 101,\n            'alert_name': 'Link state down',\n            'category': 'Fault',\n            'severity': 'Critical',\n            'type': 'CommunicationsAlarm',\n            'location': 'NetworkEntity=entity2',\n            'description': \"Backend link has gone down\",\n            'recovery_advice': \"Recheck the network configuration setting.\",\n            'occur_time': int(time.time())\n        }, {\n            \"storage_id\": self.storage_id,\n            'alert_id': str(random.randint(1111111, 9999999)),\n            'sequence_number': 102,\n            'alert_name': 'Power failure',\n            'category': 'Fault',\n            'severity': 'Fatal',\n            'type': 'OperationalViolation',\n            'location': 'NetworkEntity=entity3',\n            'description': \"Power failure occurred. \",\n            'recovery_advice': \"Investigate power connection.\",\n            'occur_time': int(time.time())\n        }, {\n            \"storage_id\": self.storage_id,\n            'alert_id': str(random.randint(1111111, 9999999)),\n            'sequence_number': 103,\n            'alert_name': 'Communication failure',\n            'category': 'Fault',\n            'severity': 'Critical',\n            'type': 'CommunicationsAlarm',\n            'location': 'NetworkEntity=network1',\n            'description': \"Communication link gone down\",\n            'recovery_advice': \"Consult network administrator\",\n            'occur_time': int(time.time())\n        }]\n        return alert_list\n\n    @wait_random(MIN_WAIT, MAX_WAIT)\n    def _get_volume_range(self, start, end):\n        volume_list = []\n\n        for i in range(start, end):\n            total, used, free = self._get_random_capacity()\n            v = {\n                \"name\": \"volume_\" + str(i),\n                \"storage_id\": self.storage_id,\n                \"description\": \"Fake Volume\",\n                \"status\": \"normal\",\n                \"native_volume_id\": \"volume_\" + str(i),\n                \"wwn\": \"fake_wwn_\" + str(i),\n                \"total_capacity\": total,\n                \"used_capacity\": used,\n                \"free_capacity\": free,\n            }\n            volume_list.append(v)\n        return volume_list\n\n    def _get_random_performance(self, metric_list, start_time, end_time):\n        def get_random_timestamp_value():\n            rtv = {}\n            timestamp = start_time\n            while timestamp < end_time:\n                rtv[timestamp] = random.uniform(1, 100)\n                timestamp += MINIMUM_SAMPLE_DURATION_IN_MS\n\n            return rtv\n\n        # The sample performance_params after filling looks like,\n        # performance_params = {timestamp1: value1, timestamp2: value2}\n        performance_params = {}\n        for key in metric_list.keys():\n            performance_params[key] = get_random_timestamp_value()\n        return performance_params\n\n    @wait_random(MIN_WAIT, MAX_WAIT)\n    def get_resource_perf_metrics(self, storage_id, start_time, end_time,\n                                  resource_type, metric_list):\n        LOG.info(\"###########collecting metrics for resource %s: from\"\n                 \" storage  %s\" % (resource_type, self.storage_id))\n        resource_metrics = []\n        resource_count = RESOURCE_COUNT_DICT[resource_type]\n\n        for i in range(resource_count):\n            labels = {'storage_id': storage_id,\n                      'resource_type': resource_type,\n                      'resource_id': resource_type + '_' + str(i),\n                      'type': 'RAW'}\n            fake_metrics = self._get_random_performance(metric_list,\n                                                        start_time, end_time)\n            for key in metric_list.keys():\n                labels['unit'] = metric_list[key]['unit']\n                m = constants.metric_struct(name=key, labels=labels,\n                                            values=fake_metrics[key])\n                resource_metrics.append(copy.deepcopy(m))\n        return resource_metrics\n\n    @wait_random(MIN_WAIT, MAX_WAIT)\n    def collect_perf_metrics(self, context, storage_id,\n                             resource_metrics, start_time,\n                             end_time):\n        \"\"\"Collects performance metric for the given interval\"\"\"\n        merged_metrics = []\n        for key in resource_metrics.keys():\n            m = self.get_resource_perf_metrics(storage_id,\n                                               start_time,\n                                               end_time, key,\n                                               resource_metrics[key])\n            merged_metrics += m\n        return merged_metrics\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        \"\"\"Get capability of supported driver\"\"\"\n        return {\n            'is_historic': False,\n            'performance_metric_retention_window': 4500,\n            'resource_metrics': {\n                ResourceType.STORAGE: {\n                    StorageMetric.THROUGHPUT.name: {\n                        \"unit\": StorageMetric.THROUGHPUT.unit,\n                        \"description\": StorageMetric.THROUGHPUT.description\n                    },\n                    StorageMetric.RESPONSE_TIME.name: {\n                        \"unit\": StorageMetric.RESPONSE_TIME.unit,\n                        \"description\": StorageMetric.RESPONSE_TIME.description\n                    },\n                    StorageMetric.READ_RESPONSE_TIME.name: {\n                        \"unit\": StorageMetric.READ_RESPONSE_TIME.unit,\n                        \"description\":\n                            StorageMetric.READ_RESPONSE_TIME.description\n                    },\n                    StorageMetric.WRITE_RESPONSE_TIME.name: {\n                        \"unit\": StorageMetric.WRITE_RESPONSE_TIME.unit,\n                        \"description\":\n                            StorageMetric.WRITE_RESPONSE_TIME.description\n                    },\n                    StorageMetric.IOPS.name: {\n                        \"unit\": StorageMetric.IOPS.unit,\n                        \"description\": StorageMetric.IOPS.description\n                    },\n                    StorageMetric.READ_THROUGHPUT.name: {\n                        \"unit\": StorageMetric.READ_THROUGHPUT.unit,\n                        \"description\":\n                            StorageMetric.READ_THROUGHPUT.description\n                    },\n                    StorageMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": StorageMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            StorageMetric.WRITE_THROUGHPUT.description\n                    },\n                    StorageMetric.READ_IOPS.name: {\n                        \"unit\": StorageMetric.READ_IOPS.unit,\n                        \"description\": StorageMetric.READ_IOPS.description\n                    },\n                    StorageMetric.WRITE_IOPS.name: {\n                        \"unit\": StorageMetric.WRITE_IOPS.unit,\n                        \"description\": StorageMetric.WRITE_IOPS.description\n                    },\n                },\n                ResourceType.STORAGE_POOL: {\n                    StoragePoolMetric.THROUGHPUT.name: {\n                        \"unit\": StoragePoolMetric.THROUGHPUT.unit,\n                        \"description\": StoragePoolMetric.THROUGHPUT.description\n                    },\n                    StoragePoolMetric.RESPONSE_TIME.name: {\n                        \"unit\": StoragePoolMetric.RESPONSE_TIME.unit,\n                        \"description\":\n                            StoragePoolMetric.RESPONSE_TIME.description\n                    },\n                    StoragePoolMetric.IOPS.name: {\n                        \"unit\": StoragePoolMetric.IOPS.unit,\n                        \"description\": StoragePoolMetric.IOPS.description\n                    },\n                    StoragePoolMetric.READ_THROUGHPUT.name: {\n                        \"unit\": StoragePoolMetric.READ_THROUGHPUT.unit,\n                        \"description\":\n                            StoragePoolMetric.READ_THROUGHPUT.description\n                    },\n                    StoragePoolMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": StoragePoolMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            StoragePoolMetric.WRITE_THROUGHPUT.description\n                    },\n                    StoragePoolMetric.READ_IOPS.name: {\n                        \"unit\": StoragePoolMetric.READ_IOPS.unit,\n                        \"description\": StoragePoolMetric.READ_IOPS.description\n                    },\n                    StoragePoolMetric.WRITE_IOPS.name: {\n                        \"unit\": StoragePoolMetric.WRITE_IOPS.unit,\n                        \"description\": StoragePoolMetric.WRITE_IOPS.description\n                    },\n\n                },\n                ResourceType.VOLUME: {\n                    VolumeMetric.THROUGHPUT.name: {\n                        \"unit\": VolumeMetric.THROUGHPUT.unit,\n                        \"description\": VolumeMetric.THROUGHPUT.description\n                    },\n                    VolumeMetric.RESPONSE_TIME.name: {\n                        \"unit\": VolumeMetric.RESPONSE_TIME.unit,\n                        \"description\": VolumeMetric.RESPONSE_TIME.description\n                    },\n                    VolumeMetric.READ_RESPONSE_TIME.name: {\n                        \"unit\": VolumeMetric.READ_RESPONSE_TIME.unit,\n                        \"description\":\n                            VolumeMetric.READ_RESPONSE_TIME.description\n                    },\n                    VolumeMetric.WRITE_RESPONSE_TIME.name: {\n                        \"unit\": VolumeMetric.WRITE_RESPONSE_TIME.unit,\n                        \"description\":\n                            VolumeMetric.WRITE_RESPONSE_TIME.description\n                    },\n                    VolumeMetric.IOPS.name: {\n                        \"unit\": VolumeMetric.IOPS.unit,\n                        \"description\": VolumeMetric.IOPS.description\n                    },\n                    VolumeMetric.READ_THROUGHPUT.name: {\n                        \"unit\": VolumeMetric.READ_THROUGHPUT.unit,\n                        \"description\": VolumeMetric.READ_THROUGHPUT.description\n                    },\n                    VolumeMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": VolumeMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            VolumeMetric.WRITE_THROUGHPUT.description\n                    },\n                    VolumeMetric.READ_IOPS.name: {\n                        \"unit\": VolumeMetric.READ_IOPS.unit,\n                        \"description\": VolumeMetric.READ_IOPS.description\n                    },\n                    VolumeMetric.WRITE_IOPS.name: {\n                        \"unit\": VolumeMetric.WRITE_IOPS.unit,\n                        \"description\": VolumeMetric.WRITE_IOPS.description\n                    },\n                    VolumeMetric.CACHE_HIT_RATIO.name: {\n                        \"unit\": VolumeMetric.CACHE_HIT_RATIO.unit,\n                        \"description\": VolumeMetric.CACHE_HIT_RATIO.description\n                    },\n                    VolumeMetric.READ_CACHE_HIT_RATIO.name: {\n                        \"unit\": VolumeMetric.READ_CACHE_HIT_RATIO.unit,\n                        \"description\":\n                            VolumeMetric.READ_CACHE_HIT_RATIO.description\n                    },\n                    VolumeMetric.WRITE_CACHE_HIT_RATIO.name: {\n                        \"unit\": VolumeMetric.WRITE_CACHE_HIT_RATIO.unit,\n                        \"description\":\n                            VolumeMetric.WRITE_CACHE_HIT_RATIO.description\n                    },\n                    VolumeMetric.IO_SIZE.name: {\n                        \"unit\": VolumeMetric.IO_SIZE.unit,\n                        \"description\": VolumeMetric.IO_SIZE.description\n                    },\n                    VolumeMetric.READ_IO_SIZE.name: {\n                        \"unit\": VolumeMetric.READ_IO_SIZE.unit,\n                        \"description\": VolumeMetric.READ_IO_SIZE.description\n                    },\n                    VolumeMetric.WRITE_IO_SIZE.name: {\n                        \"unit\": VolumeMetric.WRITE_IO_SIZE.unit,\n                        \"description\": VolumeMetric.WRITE_IO_SIZE.description\n                    },\n                },\n                ResourceType.CONTROLLER: {\n                    ControllerMetric.THROUGHPUT.name: {\n                        \"unit\": ControllerMetric.THROUGHPUT.unit,\n                        \"description\": ControllerMetric.THROUGHPUT.description\n                    },\n                    ControllerMetric.RESPONSE_TIME.name: {\n                        \"unit\": ControllerMetric.RESPONSE_TIME.unit,\n                        \"description\":\n                            ControllerMetric.RESPONSE_TIME.description\n                    },\n                    ControllerMetric.IOPS.name: {\n                        \"unit\": ControllerMetric.IOPS.unit,\n                        \"description\": ControllerMetric.IOPS.description\n                    },\n                    ControllerMetric.READ_THROUGHPUT.name: {\n                        \"unit\": ControllerMetric.READ_THROUGHPUT.unit,\n                        \"description\":\n                            ControllerMetric.READ_THROUGHPUT.description\n                    },\n                    ControllerMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": ControllerMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            ControllerMetric.WRITE_THROUGHPUT.description\n                    },\n                    ControllerMetric.READ_IOPS.name: {\n                        \"unit\": ControllerMetric.READ_IOPS.unit,\n                        \"description\": ControllerMetric.READ_IOPS.description\n                    },\n                    ControllerMetric.WRITE_IOPS.name: {\n                        \"unit\": ControllerMetric.WRITE_IOPS.unit,\n                        \"description\": ControllerMetric.WRITE_IOPS.description\n                    },\n                    ControllerMetric.CPU_USAGE.name: {\n                        \"unit\": ControllerMetric.CPU_USAGE.unit,\n                        \"description\": ControllerMetric.CPU_USAGE.description\n                    }\n                },\n                ResourceType.PORT: {\n                    PortMetric.THROUGHPUT.name: {\n                        \"unit\": PortMetric.THROUGHPUT.unit,\n                        \"description\": PortMetric.THROUGHPUT.description\n                    },\n                    PortMetric.RESPONSE_TIME.name: {\n                        \"unit\": PortMetric.RESPONSE_TIME.unit,\n                        \"description\": PortMetric.RESPONSE_TIME.description\n                    },\n                    PortMetric.IOPS.name: {\n                        \"unit\": PortMetric.IOPS.unit,\n                        \"description\": PortMetric.IOPS.description\n                    },\n                    PortMetric.READ_THROUGHPUT.name: {\n                        \"unit\": PortMetric.READ_THROUGHPUT.unit,\n                        \"description\": PortMetric.READ_THROUGHPUT.description\n                    },\n                    PortMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": PortMetric.WRITE_THROUGHPUT.unit,\n                        \"description\": PortMetric.WRITE_THROUGHPUT.description\n                    },\n                    PortMetric.READ_IOPS.name: {\n                        \"unit\": PortMetric.READ_IOPS.unit,\n                        \"description\": PortMetric.READ_IOPS.description\n                    },\n                    PortMetric.WRITE_IOPS.name: {\n                        \"unit\": PortMetric.WRITE_IOPS.unit,\n                        \"description\": PortMetric.WRITE_IOPS.description\n                    },\n\n                },\n                ResourceType.DISK: {\n                    DiskMetric.THROUGHPUT.name: {\n                        \"unit\": DiskMetric.THROUGHPUT.unit,\n                        \"description\": DiskMetric.THROUGHPUT.description\n                    },\n                    DiskMetric.RESPONSE_TIME.name: {\n                        \"unit\": DiskMetric.RESPONSE_TIME.unit,\n                        \"description\": DiskMetric.RESPONSE_TIME.description\n                    },\n                    DiskMetric.IOPS.name: {\n                        \"unit\": DiskMetric.IOPS.unit,\n                        \"description\": DiskMetric.IOPS.description\n                    },\n                    DiskMetric.READ_THROUGHPUT.name: {\n                        \"unit\": DiskMetric.READ_THROUGHPUT.unit,\n                        \"description\": DiskMetric.READ_THROUGHPUT.description\n                    },\n                    DiskMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": DiskMetric.WRITE_THROUGHPUT.unit,\n                        \"description\": DiskMetric.WRITE_THROUGHPUT.description\n                    },\n                    DiskMetric.READ_IOPS.name: {\n                        \"unit\": DiskMetric.READ_IOPS.unit,\n                        \"description\": DiskMetric.READ_IOPS.description\n                    },\n                    DiskMetric.WRITE_IOPS.name: {\n                        \"unit\": DiskMetric.WRITE_IOPS.unit,\n                        \"description\": DiskMetric.WRITE_IOPS.description\n                    },\n\n                },\n                ResourceType.FILESYSTEM: {\n                    FileSystemMetric.THROUGHPUT.name: {\n                        \"unit\": FileSystemMetric.THROUGHPUT.unit,\n                        \"description\": FileSystemMetric.THROUGHPUT.description\n                    },\n                    FileSystemMetric.READ_RESPONSE_TIME.name: {\n                        \"unit\": FileSystemMetric.READ_RESPONSE_TIME.unit,\n                        \"description\":\n                            FileSystemMetric.READ_RESPONSE_TIME.description\n                    },\n                    FileSystemMetric.WRITE_RESPONSE_TIME.name: {\n                        \"unit\": FileSystemMetric.WRITE_RESPONSE_TIME.unit,\n                        \"description\":\n                            FileSystemMetric.WRITE_RESPONSE_TIME.description\n                    },\n                    FileSystemMetric.IOPS.name: {\n                        \"unit\": FileSystemMetric.IOPS.unit,\n                        \"description\": FileSystemMetric.IOPS.description\n                    },\n                    FileSystemMetric.READ_THROUGHPUT.name: {\n                        \"unit\": FileSystemMetric.READ_THROUGHPUT.unit,\n                        \"description\":\n                            FileSystemMetric.READ_THROUGHPUT.description\n                    },\n                    FileSystemMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": FileSystemMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            FileSystemMetric.WRITE_THROUGHPUT.description\n                    },\n                    FileSystemMetric.READ_IOPS.name: {\n                        \"unit\": FileSystemMetric.READ_IOPS.unit,\n                        \"description\": FileSystemMetric.READ_IOPS.description\n                    },\n                    FileSystemMetric.WRITE_IOPS.name: {\n                        \"unit\": FileSystemMetric.WRITE_IOPS.unit,\n                        \"description\": FileSystemMetric.WRITE_IOPS.description\n                    },\n                    FileSystemMetric.IO_SIZE.name: {\n                        \"unit\": FileSystemMetric.IO_SIZE.unit,\n                        \"description\": FileSystemMetric.IO_SIZE.description\n                    },\n                    FileSystemMetric.READ_IO_SIZE.name: {\n                        \"unit\": FileSystemMetric.READ_IO_SIZE.unit,\n                        \"description\":\n                            FileSystemMetric.READ_IO_SIZE.description\n                    },\n                    FileSystemMetric.WRITE_IO_SIZE.name: {\n                        \"unit\": FileSystemMetric.WRITE_IO_SIZE.unit,\n                        \"description\":\n                            FileSystemMetric.WRITE_IO_SIZE.description\n                    },\n                },\n\n            }\n\n        }\n\n    def list_storage_host_initiators(self, ctx):\n        rd_storage_host_initiators_count = random.randint(\n            MIN_STORAGE_HOST_INITIATORS, MAX_STORAGE_HOST_INITIATORS)\n        LOG.info(\"###########fake_storage_host_initiators for %s: %d\"\n                 % (self.storage_id, rd_storage_host_initiators_count))\n        storage_host_initiators_list = []\n        for idx in range(rd_storage_host_initiators_count):\n            f = {\n                \"name\": \"storage_host_initiator_\" + str(idx),\n                \"description\": \"storage_host_initiator_\" + str(idx),\n                \"alias\": \"storage_host_initiator_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_storage_host_initiator_id\":\n                    \"storage_host_initiator_\" + str(idx),\n                \"wwn\": \"wwn_\" + str(idx),\n                \"status\": \"Normal\",\n                \"native_storage_host_id\": \"storage_host_\" + str(idx),\n            }\n            storage_host_initiators_list.append(f)\n        return storage_host_initiators_list\n\n    def list_storage_hosts(self, ctx):\n        rd_storage_hosts_count = self.rd_storage_hosts_count\n        LOG.info(\"###########fake_storage_hosts for %s: %d\"\n                 % (self.storage_id, rd_storage_hosts_count))\n        storage_host_list = []\n        for idx in range(rd_storage_hosts_count):\n            f = {\n                \"name\": \"storage_host_\" + str(idx),\n                \"description\": \"storage_host_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_storage_host_id\": \"storage_host_\" + str(idx),\n                \"os_type\": \"linux\",\n                \"status\": \"Normal\",\n                \"ip_address\": \"1.2.3.\" + str(idx)\n            }\n            storage_host_list.append(f)\n        return storage_host_list\n\n    def list_storage_host_groups(self, ctx):\n        rd_storage_host_groups_count = random.randint(\n            MIN_STORAGE_HOST_GROUPS, MAX_STORAGE_HOST_GROUPS)\n        LOG.info(\"###########fake_storage_host_groups for %s: %d\"\n                 % (self.storage_id, rd_storage_host_groups_count))\n        storage_host_grp_list = []\n        for idx in range(rd_storage_host_groups_count):\n            # Create hosts in hosts group\n            host_name_list = []\n            storage_hosts_count = self.rd_storage_hosts_count - 1\n            if storage_hosts_count > 0:\n                for i in range(MAX_GROUP_RESOURCES_SIZE):\n                    host_name = \"storage_host_\" + str(\n                        random.randint(0, storage_hosts_count))\n                    if host_name not in host_name_list:\n                        host_name_list.append(host_name)\n\n            # Create comma separated list\n            storage_hosts = None\n            for host in host_name_list:\n                if storage_hosts:\n                    storage_hosts = storage_hosts + \",\" + host\n                else:\n                    storage_hosts = host\n\n            f = {\n                \"name\": \"storage_host_group_\" + str(idx),\n                \"description\": \"storage_host_group_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_storage_host_group_id\": \"storage_host_group_\"\n                                                + str(idx),\n                \"storage_hosts\": storage_hosts\n            }\n            storage_host_grp_list.append(f)\n\n        storage_host_grp_relation_list = []\n        for storage_host_group in storage_host_grp_list:\n            storage_hosts = storage_host_group.pop('storage_hosts', None)\n            if not storage_hosts:\n                continue\n            storage_hosts = storage_hosts.split(',')\n\n            for storage_host in storage_hosts:\n                storage_host_group_relation = {\n                    'storage_id': self.storage_id,\n                    'native_storage_host_group_id':\n                        storage_host_group['native_storage_host_group_id'],\n                    'native_storage_host_id': storage_host\n                }\n                storage_host_grp_relation_list \\\n                    .append(storage_host_group_relation)\n\n        result = {\n            'storage_host_groups': storage_host_grp_list,\n            'storage_host_grp_host_rels': storage_host_grp_relation_list\n        }\n\n        return result\n\n    def list_port_groups(self, ctx):\n        rd_port_groups_count = random.randint(MIN_PORT_GROUPS,\n                                              MAX_PORT_GROUPS)\n        LOG.info(\"###########fake_port_groups for %s: %d\"\n                 % (self.storage_id, rd_port_groups_count))\n        port_grp_list = []\n        for idx in range(rd_port_groups_count):\n            # Create ports in ports group\n            port_name_list = []\n            ports_count = self.rd_ports_count - 1\n            if ports_count > 0:\n                for i in range(MAX_GROUP_RESOURCES_SIZE):\n                    port_name = \"port_\" + str(\n                        random.randint(0, ports_count))\n                    if port_name not in port_name_list:\n                        port_name_list.append(port_name)\n\n            # Create comma separated list\n            ports = None\n            for port in port_name_list:\n                if ports:\n                    ports = ports + \",\" + port\n                else:\n                    ports = port\n\n            f = {\n                \"name\": \"port_group_\" + str(idx),\n                \"description\": \"port_group_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_port_group_id\": \"port_group_\" + str(idx),\n                \"ports\": ports\n            }\n\n            port_grp_list.append(f)\n\n        port_group_relation_list = []\n        for port_group in port_grp_list:\n            ports = port_group.pop('ports', None)\n            if not ports:\n                continue\n            ports = ports.split(',')\n\n            for port in ports:\n                port_group_relation = {\n                    'storage_id': self.storage_id,\n                    'native_port_group_id':\n                        port_group['native_port_group_id'],\n                    'native_port_id': port\n                }\n                port_group_relation_list.append(port_group_relation)\n        result = {\n            'port_groups': port_grp_list,\n            'port_grp_port_rels': port_group_relation_list\n        }\n        return result\n\n    def list_volume_groups(self, ctx):\n        rd_volume_groups_count = random.randint(MIN_VOLUME_GROUPS,\n                                                MAX_VOLUME_GROUPS)\n        LOG.info(\"###########fake_volume_groups for %s: %d\"\n                 % (self.storage_id, rd_volume_groups_count))\n        volume_grp_list = []\n        for idx in range(rd_volume_groups_count):\n            # Create volumes in volumes group\n            volume_name_list = []\n            volumes_count = self.rd_volumes_count - 1\n            if volumes_count > 0:\n                for i in range(MAX_GROUP_RESOURCES_SIZE):\n                    volume_name = \"volume_\" + str(\n                        random.randint(0, volumes_count))\n                    if volume_name not in volume_name_list:\n                        volume_name_list.append(volume_name)\n\n            # Create comma separated list\n            volumes = None\n            for volume in volume_name_list:\n                if volumes:\n                    volumes = volumes + \",\" + volume\n                else:\n                    volumes = volume\n\n            f = {\n                \"name\": \"volume_group_\" + str(idx),\n                \"description\": \"volume_group_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_volume_group_id\": \"volume_group_\" + str(idx),\n                \"volumes\": volumes\n            }\n            volume_grp_list.append(f)\n\n        volume_group_relation_list = []\n        for volume_group in volume_grp_list:\n            volumes = volume_group.pop('volumes', None)\n            if not volumes:\n                continue\n            volumes = volumes.split(',')\n\n            for volume in volumes:\n                volume_group_relation = {\n                    'storage_id': self.storage_id,\n                    'native_volume_group_id':\n                        volume_group['native_volume_group_id'],\n                    'native_volume_id': volume}\n                volume_group_relation_list.append(volume_group_relation)\n\n        result = {\n            'volume_groups': volume_grp_list,\n            'vol_grp_vol_rels': volume_group_relation_list\n        }\n        return result\n\n    def list_masking_views(self, ctx):\n        rd_masking_views_count = random.randint(MIN_MASKING_VIEWS,\n                                                MAX_MASKING_VIEWS)\n        LOG.info(\"##########fake_masking_views for %s: %d\"\n                 % (self.storage_id, rd_masking_views_count))\n        masking_view_list = []\n\n        for idx in range(rd_masking_views_count):\n            is_group_based = random.randint(NON_GROUP_BASED_MASKING,\n                                            GROUP_BASED_MASKING)\n            if is_group_based:\n                native_storage_host_group_id = \"storage_host_group_\" + str(idx)\n                native_volume_group_id = \"volume_group_\" + str(idx)\n                native_port_group_id = \"port_group_\" + str(idx)\n                native_storage_host_id = \"\"\n                native_volume_id = \"\"\n\n            else:\n                native_storage_host_group_id = \"\"\n                native_volume_group_id = \"\"\n                native_port_group_id = \"\"\n                native_storage_host_id = \"storage_host_\" + str(idx)\n                native_volume_id = \"volume_\" + str(idx)\n\n            f = {\n                \"name\": \"masking_view_\" + str(idx),\n                \"description\": \"masking_view_\" + str(idx),\n                \"storage_id\": self.storage_id,\n                \"native_masking_view_id\": \"masking_view_\" + str(idx),\n                \"native_storage_host_group_id\": native_storage_host_group_id,\n                \"native_volume_group_id\": native_volume_group_id,\n                \"native_port_group_id\": native_port_group_id,\n                \"native_storage_host_id\": native_storage_host_id,\n                \"native_volume_id\": native_volume_id,\n            }\n            masking_view_list.append(f)\n        return masking_view_list\n"
  },
  {
    "path": "delfin/drivers/fujitsu/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/fujitsu/eternus/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/fujitsu/eternus/cli_handler.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hashlib\nimport re\nimport threading\n\nimport six\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers.fujitsu.eternus import consts\nfrom delfin.drivers.fujitsu.eternus.consts import DIGITAL_CONSTANT\nfrom delfin.drivers.fujitsu.eternus.eternus_ssh_client import \\\n    EternusSSHPool\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = log.getLogger(__name__)\n\n\nclass CliHandler(object):\n    lock = None\n\n    def __init__(self, **kwargs):\n        self.lock = threading.RLock()\n        self.kwargs = kwargs\n        self.ssh_pool = EternusSSHPool(**kwargs)\n\n    def login(self):\n        \"\"\"Test SSH connection \"\"\"\n        try:\n            self.exec_command(consts.GET_STORAGE_STATUS)\n        except Exception as e:\n            error = six.text_type(e)\n            LOG.error(\"Login error: %s\", error)\n            raise e\n\n    def exec_command(self, command, exe_time=consts.DEFAULT_EXE_TIME):\n        try:\n            self.lock.acquire()\n            res = self.ssh_pool.do_exec_shell([\n                consts.SET_CLIENV_FORCE_UNLOCK, command], exe_time)\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise e\n        finally:\n            self.lock.release()\n        if res:\n            if 'Error: ' in res:\n                LOG.info(res)\n                return None\n        return res\n\n    def common_data_encapsulation(self, command):\n        common_data_str = self.exec_command(command)\n        common_data_dict = dict()\n        if common_data_str:\n            common_data_arr = common_data_str.split('\\n')\n            for common_data_row in common_data_arr:\n                if '[' in common_data_row and ']' in common_data_row:\n                    name_start_index = common_data_row.index('[')\n                    name_end_index = common_data_row.index(']')\n                    key = common_data_row[:name_start_index].strip()\n                    value = common_data_row[name_start_index\n                                            + 1:name_end_index]\n                    common_data_dict[key] = value\n        return common_data_dict\n\n    def get_controllers(self):\n        controller_data_str = self.exec_command(consts.GET_STORAGE_CONTROLLER)\n        controller_info_list = []\n        try:\n            if controller_data_str:\n                result_data_arr = controller_data_str.split('\\n')\n                controller_info_map = {}\n                for common_data_row in result_data_arr:\n                    row_pattern = re.compile(consts.CONTROLLER_NEWLINE_PATTERN)\n                    row_search_obj = row_pattern.search(common_data_row)\n                    if row_search_obj:\n                        name = row_search_obj.group().split(' ')[0]\n                        if controller_info_map:\n                            controller_info_list.append(controller_info_map)\n                            controller_info_map = {}\n                        controller_info_map['name'] = name\n                    pattern = re.compile(consts.COMMON_VALUE_PATTERN)\n                    search_obj = pattern.search(common_data_row)\n                    if search_obj:\n                        self.analysis_data_to_map(common_data_row,\n                                                  consts.COMMON_VALUE_PATTERN,\n                                                  controller_info_map)\n                if controller_info_map:\n                    controller_info_list.append(controller_info_map)\n        except Exception as e:\n            err_msg = \"get controller info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return controller_info_list\n\n    def analysis_data_to_map(self, source_info, pattern_str, obj_map):\n        \"\"\"Get the contents in brackets through regular expressions.\n           source_info：Source data, example: \"Memory Size   [4.0GB]\"\n           pattern_str: regular expression. example：\"\\\\[.*\\\\]\"\n        \"\"\"\n        object_info = ''\n        object_infos = re.findall(pattern_str, source_info)\n        if object_infos:\n            object_info = object_infos[0]\n            key = source_info.replace(object_info, '').strip()\n            value = object_info.replace('[', '').replace(']', '')\n            obj_map[key] = value\n        return object_info\n\n    def get_volumes_type(self, volume_id_dict=None, command=None):\n        if volume_id_dict is None:\n            volume_id_dict = {}\n        try:\n            volumes_type_str = self.exec_command(command)\n        except Exception as e:\n            LOG.error(\"Get %s info error: %s\" % (command, six.text_type(e)))\n            return volume_id_dict\n        block = True\n        if volumes_type_str:\n            volumes_type_arr = volumes_type_str.replace('\\r', '').split('\\n')\n            for volumes_type_row_str in volumes_type_arr:\n                if not volumes_type_row_str or \\\n                        consts.CLI_STR in volumes_type_row_str:\n                    continue\n                if consts.SPECIAL_CHARACTERS_TWO in volumes_type_row_str:\n                    block = False\n                    continue\n                if block:\n                    continue\n                volume_type_dict = {}\n                volumes_type_row_arr = volumes_type_row_str.split()\n                volume_id = volumes_type_row_arr[DIGITAL_CONSTANT.ZERO_INT]\n                volume_type = volumes_type_row_arr[\n                    DIGITAL_CONSTANT.MINUS_SIX_INT]\n                volume_type_dict['type'] = volume_type.lower() if \\\n                    volume_type else constants.VolumeType.THICK\n                volume_type_dict['used_capacity'] = int(\n                    volumes_type_row_arr[\n                        DIGITAL_CONSTANT.MINUS_ONE_INT]) * units.Mi\n                volume_id_dict[volume_id] = volume_type_dict\n        return volume_id_dict\n\n    def get_alerts(self, command, query_para, list_alert=None):\n        if not list_alert:\n            list_alert = []\n        events_error_str = self.exec_command(command, consts.ALERT_EXE_TIME)\n        if not events_error_str:\n            return list_alert\n        events_error_dict = self.get_event(events_error_str, query_para)\n        for events_error_dict_values in events_error_dict.values():\n            alerts_model = dict()\n            description = events_error_dict_values.get('description')\n            alerts_model['alert_id'] = events_error_dict_values.get('code')\n            severity = events_error_dict_values.get('severity')\n            alerts_model['severity'] = consts.SEVERITY_MAP.get(\n                events_error_dict_values.get('severity'),\n                constants.Severity.NOT_SPECIFIED)\n            alerts_model['category'] = constants.Category.FAULT\n            occur_time = events_error_dict_values.get('occur_time')\n            alerts_model['occur_time'] = occur_time\n            alerts_model['description'] = description\n            alerts_model['type'] = constants.EventType.EQUIPMENT_ALARM\n            alerts_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n            alerts_model['alert_name'] = description\n            alerts_model['match_key'] = hashlib.md5('{}{}{}'.format(\n                occur_time, severity, description).encode()).hexdigest()\n            list_alert.append(alerts_model)\n        return list_alert\n\n    @staticmethod\n    def get_event(events_error_str, query_para):\n        events_error_dict = dict()\n        events_error_arr = events_error_str.split('\\n')\n        for events_error_row_str in events_error_arr:\n            events_error_row_str = events_error_row_str.strip()\n            reg = re.compile(r\"(\\d{4}-\\d{1,2}-\\d{1,2})\")\n            if not re.match(reg, events_error_row_str):\n                continue\n            error_description_dict = dict()\n            time_stamp = Tools().time_str_to_timestamp(\n                events_error_row_str[:consts.OCCUR_TIME_RANGE].strip(),\n                consts.TIME_PATTERN)\n            if query_para is not None:\n                try:\n                    if time_stamp is None or time_stamp \\\n                            < int(query_para.get('begin_time')) or \\\n                            time_stamp > int(query_para.get('end_time')):\n                        continue\n                except Exception as e:\n                    LOG.error(e)\n            severity = events_error_row_str[consts.SEVERITY_RANGE_BEGIN:\n                                            consts.SEVERITY_RANGE_END].strip()\n            code = events_error_row_str[consts.CODE_RANGE_BEGIN:\n                                        consts.CODE_RANGE_END].strip()\n            description = events_error_row_str[consts.DESCRIPTION_RANGE:] \\\n                .strip()\n            key = '{}{}{}'.format(severity, code, description)\n            if events_error_dict.get(key):\n                continue\n            error_description_dict['severity'] = severity\n            error_description_dict['code'] = code\n            error_description_dict['description'] = description\n            error_description_dict['occur_time'] = time_stamp\n            events_error_dict[key] = error_description_dict\n        return events_error_dict\n\n    def format_data(self, command, storage_id, method, is_port=False):\n        data_info = self.exec_command(command)\n        data_list = []\n        if not data_info:\n            return data_list\n        data_array = data_info.split('\\n')\n        data_map = {}\n        for data in data_array:\n            if data and data not in '\\r':\n                temp_data = data.split('  ')\n                temp_data = list(\n                    filter(lambda s: s and s.strip(), temp_data))\n                if len(temp_data) >= consts.DATA_VALUE_INDEX:\n                    data_length = consts.DATA_VALUE_INDEX\n                    if is_port:\n                        data_length = len(temp_data)\n                    for i in range(consts.DATA_KEY_INDEX, data_length):\n                        key = temp_data[0].strip()\n                        value = temp_data[i].replace('[', '').replace(']',\n                                                                      '')\n                        value = value.strip()\n                        if data_map.get(i):\n                            data_map[i][key] = value\n                        else:\n                            data_map[i] = {\n                                key: value\n                            }\n            else:\n                data_list.extend(method(data_map, storage_id))\n                data_map = {}\n        if data_map:\n            data_list.extend(method(data_map, storage_id))\n        return data_list\n\n    @staticmethod\n    def format_fc_ports(port_map, storage_id):\n        port_list = []\n        for key in port_map:\n            speed = None\n            if port_map[key].get('Transfer Rate') and (\n                    'Gbit/s' in port_map[key].get('Transfer Rate')):\n                speed = port_map[key].get('Transfer Rate').replace('Gbit/s',\n                                                                   '')\n                speed = int(speed) * units.G\n            name = port_map[key].get('Port')\n            port_model = {\n                'name': name,\n                'storage_id': storage_id,\n                'native_port_id': port_map[key].get('Port'),\n                'location': port_map[key].get('Port'),\n                'type': constants.PortType.FC,\n                'speed': speed,\n            }\n            port_list.append(port_model)\n        return port_list\n\n    @staticmethod\n    def format_disks(disk_map, storage_id):\n        disk_list = []\n        for key in disk_map:\n            speed = None\n            if 'rpm' in disk_map[key].get('Speed'):\n                speed = int(disk_map[key].get('Speed').replace('rpm', ''))\n            size = Tools.get_capacity_size(disk_map[key].get('Size'))\n            physical_type = constants.DiskPhysicalType.UNKNOWN\n            if 'SSD' in disk_map[key].get('Type'):\n                physical_type = consts.DiskPhysicalTypeMap.get('SSD')\n            elif 'Nearline' in disk_map[key].get('Type'):\n                physical_type = consts.DiskPhysicalTypeMap.get('Nearline')\n            elif 'Online' in disk_map[key].get('Type'):\n                physical_type = consts.DiskPhysicalTypeMap.get('Online')\n            elif 'SAS' in disk_map[key].get('Type'):\n                physical_type = consts.DiskPhysicalTypeMap.get('SAS')\n            logical_type = \\\n                consts.DiskLogicalTypeMap.get(\n                    disk_map[key].get('Usage'),\n                    constants.DiskLogicalType.UNKNOWN\n                )\n            status = None\n            if disk_map[key].get('Status').split('('):\n                status = disk_map[key].get('Status').split('(')[0]\n                status = \\\n                    consts.DISK_STATUS_MAP.get(\n                        status.strip(),\n                        constants.DiskStatus.OFFLINE)\n            disk_model = {\n                'name': disk_map[key].get('Location'),\n                'storage_id': storage_id,\n                'native_disk_id': disk_map[key].get('Location'),\n                'serial_number': disk_map[key].get('Serial Number'),\n                'manufacturer': disk_map[key].get('Vendor ID'),\n                'model': disk_map[key].get('Type'),\n                'firmware': disk_map[key].get('Firmware Revision'),\n                'location': disk_map[key].get('Location'),\n                'speed': speed,\n                'capacity': size,\n                'status': status,\n                'physical_type': physical_type,\n                'logical_type': logical_type\n            }\n            disk_list.append(disk_model)\n        return disk_list\n\n    def get_volumes_or_pool(self, command, str_pattern):\n        data_str = self.exec_command(command)\n        pool_info_list = []\n        try:\n            if data_str:\n                result_data_arr = data_str.replace('\\r', '').split('\\n')\n                titles = []\n                for common_data_row in result_data_arr:\n                    title_pattern = re.compile(str_pattern)\n                    title_search_obj = title_pattern.search(common_data_row)\n                    if title_search_obj:\n                        titles = common_data_row.split(\",\")\n                    else:\n                        if common_data_row:\n                            values = common_data_row.split(\",\")\n                            if values and len(values) == len(titles):\n                                obj_model = {}\n                                for num in range(len(values)):\n                                    key = titles[num].lower() \\\n                                        .replace(' ', '') \\\n                                        .replace('[', '') \\\n                                        .replace(']', '')\n                                    obj_model[key] = values[num]\n                                if obj_model:\n                                    pool_info_list.append(obj_model)\n        except Exception as e:\n            err_msg = \"execution {}: error: {}\".format(command,\n                                                       six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return pool_info_list\n\n    def get_ports_status(self):\n        port_data_str = self.exec_command(consts.GET_STORAGE_CONTROLLER)\n        port_info_dict = {}\n        try:\n            if port_data_str:\n                result_data_arr = port_data_str.split('\\n')\n                port_info_map = {}\n                name = None\n                for common_data_row in result_data_arr:\n                    row_pattern = re.compile(consts.PORT_NEWLINE_PATTERN)\n                    row_search_obj = row_pattern.search(common_data_row)\n                    if row_search_obj:\n                        name = row_search_obj.group().replace(\n                            ' Information', '')\n                        port_info_map['name'] = name\n                        continue\n                    elif port_info_map:\n                        pattern = re.compile(consts.COMMON_VALUE_PATTERN)\n                        search_obj = pattern.search(common_data_row)\n                        if search_obj:\n                            self.analysis_data_to_map(\n                                common_data_row, consts.COMMON_VALUE_PATTERN,\n                                port_info_map)\n                        if 'WWN' in common_data_row:\n                            port_info_dict[name] = port_info_map\n                            port_info_map = {}\n        except Exception as e:\n            err_msg = \"get fc port info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return port_info_dict\n"
  },
  {
    "path": "delfin/drivers/fujitsu/eternus/consts.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2016 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nfrom delfin.common import constants\n\n# get_storage function part\nGET_STORAGE_NAME = 'show storage-system-name'\nGET_STORAGE_VENDOR = 'FUJITSU'\nGET_ENCLOSURE_STATUS = 'show enclosure-status'\nGET_STORAGE_STATUS = 'show status'\nGET_STORAGE_SERIAL_NUMBER = 'show boxid'\nGET_STORAGE_FIRMWARE_VERSION = 'show firmware-version'\nGET_STORAGE_TOTAL_CAPACITY = 'show storage-cluster-license'\nGET_STORAGE_CONTROLLER = 'show fru-ce'\nGET_STORAGE_CONTROLLER_STATUS = 'show enclosure-status -type all'\nSET_CLIENV_FORCE_UNLOCK = 'set clienv-force-unlock'\nFIRMWARE_VERSION_CURRENT_COUNT = 3\nFIRMWARE_VERSION_LENGTH = 4\nCURRENT = 'Current'\nFIRMWARE_VERSION_NUMBER = 1\n\n# list_volume  function part\nGET_LIST_VOLUMES = 'show volumes'\nGET_LIST_VOLUMES_MODE_UID = 'show volumes -mode uid'\nGET_LIST_VOLUMES_TYPE_TPV = 'show volumes -type tpv'\nGET_LIST_VOLUMES_TYPE_FTV = 'show volumes -type ftv'\nCLI_STR = 'CLI>'\nSPECIAL_CHARACTERS_ONE = '^'\nSPECIAL_CHARACTERS_TWO = '--'\nVOLUME_TYPE_OPEN = 'open'\nVOLUME_ID_COUNT = 0\nVOLUME_NAME_COUNT = 1\nVOLUME_STATUS_COUNT = 2\nVOLUME_TYPE_COUNT = 3\nNATIVE_STORAGE_POOL_ID_COUNT = 5\nTOTAL_CAPACITY_COUNT = 7\nDEFAULT_USED_CAPACITY = 0\nDEFAULT_FREE_CAPACITY = 0\nVOLUMES_CYCLE = 5\nVOLUMES_LENGTH = 6\n\n# get_volumes_model function part\nGET_VOLUMES_MODEL_VOLUME_ID_COUNT = 0\nGET_VOLUMES_MODEL_VOLUME_NAME_COUNT = 1\nGET_VOLUMES_MODEL_VOLUME_STATUS_COUNT = 2\nGET_VOLUMES_MODEL_POOL_ID_COUNT = 4\nGET_VOLUMES_MODEL_TOTAL_CAPACITY_COUNT = 8\nGET_VOLUMES_MODEL_WWN_COUNT = 9\n\n# list_storage_pools function part\nGET_STORAGE_POOL_CSV = 'show raid-groups -csv'\nGET_STORAGE_POOL = 'show raid-groups'\nPOOL_ID_COUNT = 0\nPOOL_NAME_COUNT = 1\nPOOL_STATUS_COUNT = 4\nPOOL_TOTAL_CAPACITY_COUNT = 5\nPOOL_FREE_CAPACITY_COUNT = 6\nPOOL_CYCLE = 5\nPOOL_LENGTH = 6\n\nGET_DISK_COMMAND = 'show disks -disk all'\n\n# port\nGET_PORT_FC_PARAMETERS = 'show fc-parameters'\nGET_PORT_FCOE_PARAMETERS = 'show fcoe-parameters'\nPORT_NEWLINE_PATTERN = 'CM#\\\\d.*Port#\\\\d Information'\nDATA_KEY_INDEX = 1\nDATA_VALUE_INDEX = 2\n\nCONTROLLER_NEWLINE_PATTERN = 'CM#\\\\d Information'\nCOMMON_VALUE_PATTERN = '\\\\[.*\\\\]'\nSIZE_PATTERN = \"\\\\d+(?:\\\\.\\\\d+)?\"\nPOOL_TITLE_PATTERN = \"^\\\\[RAID Group No\\\\.\\\\],\\\\[RAID Group Name\"\nVOLUME_TITLE_PATTERN = \"^\\\\[Volume No\\\\.\\\\],\\\\[Volume Name]\"\nCONTROLLER_STATUS_PATTERN = 'Controller Module Status/Status Code'\nCONTROLLER_STATUS_NORMAL_KEY = 'Normal'\n\n# list_disk function part\nSPECIFIC_CHARACTER_ONE = '['\nSPECIFIC_CHARACTER_TWO = ']'\n\n# list_alert function\nSHOW_EVENTS_SEVERITY_WARNING = 'show events -severity warning'\nSHOW_EVENTS_SEVERITY_ERROR = 'show events -severity error'\nSHOW_EVENTS_LEVEL_WARNING = 'show events -level warning'\nSHOW_EVENTS_LEVEL_ERROR = 'show events -level error'\nOCCUR_TIME_RANGE = 19\nSEVERITY_RANGE_BEGIN = 22\nSEVERITY_RANGE_END = 34\nCODE_RANGE_BEGIN = 38\nCODE_RANGE_END = 46\nDESCRIPTION_RANGE = 48\nTIME_PATTERN = '%Y-%m-%d %H:%M:%S'\nALERT_EXE_TIME = 5\nDEFAULT_EXE_TIME = 0.5\n\n\nclass DIGITAL_CONSTANT(object):\n    ZERO_INT = 0\n    ONE_INT = 1\n    MINUS_ONE_INT = -1\n    TWO_INT = 2\n    THREE_INT = 3\n    FIVE_INT = 5\n    SIX_INT = 6\n    MINUS_SIX_INT = -6\n    SEVEN_INT = 7\n    THOUSAND_INT = 1000\n\n\nSTORAGE_STATUS_MAP = {'normal': constants.StorageStatus.NORMAL,\n                      'offline': constants.StorageStatus.OFFLINE,\n                      'abnormal': constants.StorageStatus.ABNORMAL,\n                      'degraded': constants.StorageStatus.DEGRADED,\n                      'Empty': constants.StorageStatus.OFFLINE,\n                      'Normal': constants.StorageStatus.NORMAL,\n                      'Pinned Data': constants.StorageStatus.OFFLINE,\n                      'Unused': constants.StorageStatus.OFFLINE,\n                      'Warning': constants.StorageStatus.OFFLINE,\n                      'Maintenance': constants.StorageStatus.ABNORMAL,\n                      'Error': constants.StorageStatus.ABNORMAL,\n                      'Loop Down': constants.StorageStatus.OFFLINE,\n                      'Not Ready': constants.StorageStatus.ABNORMAL,\n                      'Subsystem Down': constants.StorageStatus.ABNORMAL,\n                      'Change Assigned CM': constants.StorageStatus.ABNORMAL}\n\nSTORAGE_POOL_STATUS_MAP = {'Available': constants.StoragePoolStatus.NORMAL,\n                           'Spare in Use': constants.StoragePoolStatus.NORMAL,\n                           'Readying': constants.StoragePoolStatus.NORMAL,\n                           'Rebuild': constants.StoragePoolStatus.NORMAL,\n                           'Copyback': constants.StoragePoolStatus.NORMAL,\n                           'Redundant Copy':\n                               constants.StoragePoolStatus.NORMAL,\n                           'Partially Exposed Rebuild':\n                               constants.StoragePoolStatus.ABNORMAL,\n                           'Exposed Rebuild':\n                               constants.StoragePoolStatus.ABNORMAL,\n                           'Exposed': constants.StoragePoolStatus.ABNORMAL,\n                           'Partially Exposed':\n                               constants.StoragePoolStatus.ABNORMAL,\n                           'No Disk Path':\n                               constants.StoragePoolStatus.ABNORMAL,\n                           'SED Locked': constants.StoragePoolStatus.ABNORMAL,\n                           'Broken': constants.StoragePoolStatus.ABNORMAL,\n                           'Unknown': constants.StoragePoolStatus.UNKNOWN}\n\nLIST_VOLUMES_STATUS_MAP = {\n    'normal': constants.StorageStatus.NORMAL,\n    'offline': constants.StorageStatus.OFFLINE,\n    'abnormal': constants.StorageStatus.ABNORMAL,\n    'degraded': constants.StorageStatus.DEGRADED,\n    'Available': constants.StorageStatus.NORMAL,\n    'Spare in Use': constants.StorageStatus.ABNORMAL,\n    'Readying': constants.StorageStatus.ABNORMAL,\n    'Rebuild': constants.StorageStatus.ABNORMAL,\n    'Copyback': constants.StorageStatus.ABNORMAL,\n    'Redundant Copy': constants.StorageStatus.ABNORMAL,\n    'Partially Exposed Rebuild': constants.StorageStatus.ABNORMAL,\n    'Exposed': constants.StorageStatus.ABNORMAL,\n    'Partially Exposed': constants.StorageStatus.ABNORMAL,\n    'Not Ready': constants.StorageStatus.ABNORMAL,\n    'Broken': constants.StorageStatus.ABNORMAL,\n    'Data Lost': constants.StorageStatus.ABNORMAL,\n    'Not Available': constants.StorageStatus.OFFLINE,\n    'Unknown': constants.StorageStatus.UNKNOWN,\n}\n\nSEVERITY_MAP = {\n    'Warning': constants.Severity.WARNING,\n    'warning': constants.Severity.WARNING,\n    'Error': constants.Severity.FATAL,\n    'error': constants.Severity.FATAL\n}\n\nDiskPhysicalTypeMap = {\n    'Nearline': constants.DiskPhysicalType.UNKNOWN,\n    'Online': constants.DiskPhysicalType.UNKNOWN,\n    'SSD': constants.DiskPhysicalType.SSD,\n    'SAS': constants.DiskPhysicalType.SAS,\n    'unknown': constants.DiskPhysicalType.UNKNOWN\n}\n\nDiskLogicalTypeMap = {\n    'Data': constants.DiskLogicalType.MEMBER,\n    'Spare': constants.DiskLogicalType.SPARE,\n    'unknown': constants.DiskLogicalType.UNKNOWN,\n}\n\nDISK_STATUS_MAP = {\n    'Available': constants.DiskStatus.NORMAL,\n    'Spare': constants.DiskStatus.NORMAL,\n    'Present': constants.DiskStatus.NORMAL,\n    'Readying': constants.DiskStatus.NORMAL,\n    'Rebuild/Copyback': constants.DiskStatus.NORMAL,\n    'Copyback': constants.DiskStatus.NORMAL,\n    'Rebuild': constants.DiskStatus.NORMAL,\n    'Redundant': constants.DiskStatus.NORMAL,\n    'Not Supported': constants.DiskStatus.ABNORMAL,\n    'Not Exist': constants.DiskStatus.ABNORMAL,\n    'Failed Usable': constants.DiskStatus.ABNORMAL,\n    'Broken': constants.DiskStatus.NORMAL,\n    'Not Available': constants.DiskStatus.ABNORMAL,\n    'Formatting': constants.DiskStatus.NORMAL,\n    'Not Format': constants.DiskStatus.NORMAL\n}\n\nPARSE_ALERT_ALERT_ID = '1.3.6.1.2.1.1.3.0'\nPARSE_ALERT_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'\nPARSE_ALERT_COMPONENT = '1.3.6.1.4.1.211.1.21.1.150.7.0'\nPARSE_ALERT_LOCATION = '1.3.6.1.4.1.211.1.21.1.150.1.1.0'\nPARSE_ALERT_DESCRIPTION = '1.3.6.1.4.1.211.1.21.1.150.11.0'\n\nPARSE_ALERT_SEVERITY_MAP = {\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.5': constants.Severity.WARNING,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.2': constants.Severity.FATAL,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.3': constants.Severity.WARNING,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.9': constants.Severity.INFORMATIONAL,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.12': constants.Severity.INFORMATIONAL,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.50': constants.Severity.MINOR,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.51': constants.Severity.WARNING,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.60': constants.Severity.MINOR,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.61': constants.Severity.MINOR,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.62': constants.Severity.MINOR,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.64': constants.Severity.WARNING,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.65': constants.Severity.WARNING,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.66': constants.Severity.INFORMATIONAL,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.67': constants.Severity.MINOR,\n    '1.3.6.1.4.1.211.4.1.1.126.1.150.0.68': constants.Severity.MINOR\n}\n\n# list_storage_hosts\nGET_HOST_WWN_NAMES = 'show host-wwn-names'\nGET_HOST_PATH_STATUS = 'show host-path-state'\nGET_HOST_ISCSI_NAMES = 'show host-iscsi-names'\nGET_HOST_ISCSI_NAMES_NUMBER = 'show host-iscsi-names -host-number {}'\nGET_HOST_SAS_ADDRESSES = 'show host-sas-addresses'\nHOST_PATH_STATUS_SPECIFIC_ONE = '----'\nHOST_PATH_STATUS_SPECIFIC_TWO = 'Online'\nHOST_ID_COUNT = 0\nHOST_NAME_COUNT = 1\nHOST_WWN_COUNT = 2\nHOST_TYPE_COUNT = 4\nHOST_FC_ENCAPSULATE_DATA_TOTAL = 5\nHOST_PATH_STATUS_NAME = 2\nHOST_PATH_STATUS = 3\nHOST_PATH_STATUS_TOTAL = 4\nHOST_ISCSI_NAMES_ZERO = 0\nHOST_ISCSI_ONE = 1\nHOST_ISCSI_THREE = 3\nHOST_ISCSI_FOUR = 4\nHOST_ISCSI_NAMES_TWO = 2\nHOST_ISCSI_DETAIL_EIGHTEEN = 18\nHOST_ISCSI_NAMES_SEVEN = 7\nHOST_ISCSI_SPECIFIC_ONE = '*('\nHOST_SAS_ZERO = 0\nHOST_SAS_NAME = 1\nHOST_SAS_ADDRESS = 2\nHOST_SAS_OS = 4\nHOST_SAS_ENCAPSULATE_DATA_TOTAL = 5\n\n\n# list_storage_host_groups\nGET_HOST_GROUPS_ALL = 'show host-groups -all'\nHOST_GROUPS_SPECIFIC_ONE = '<Host List>'\nHOST_GROUPS_SPECIFIC_TWO = '----'\nHOST_GROUP_ZERO = 0\nHOST_GROUP_ONE = 1\nHOST_GROUP_TOTAL = 2\n\n# list_volume_groups\nGET_LUN_GROUPS = 'show lun-groups'\nLUN_GROUPS_SPECIFIC_TWO = '----'\nGET_LUN_GROUPS_LG_NUMBER = 'show lun-groups -lg-number {}'\nLUN_GROUPS_ID_COUNT = 0\nLUN_GROUPS_NAME_COUNT = 1\nLUN_VOLUME_ID = 1\nLUN_VOLUME_LENGTH = 3\n\n# list_masking_views\nGET_HOST_AFFINITY = 'show host-affinity'\nGET_PORT_GROUPS = 'show port-groups -all'\nGET_MAPPING = 'show mapping'\nPORT_GROUP_ARR_LENGTH = 2\nPORT_GROUP_ID_NUM = 0\nPORT_GROUP_NAME_NUM = 1\nHOST_NAME_NUM = 1\nHOST_GROUP_ID_NUM = 2\nLUN_GROUP_ID_NUM = 4\nLIST_MASKING_VIEWS_VOLUME_ID = 1\nPORT_GROUP_ROW_ARR_NUM = 0\nPORT_LIST_ROW_ARR_NUM = 1\nVIEWS_GROUP_NUM_ZERO = 0\nVIEWS_GROUP_ROW_KEY_LENGTH = 4\nVIEWS_HOST_ROW_KEY_LENGTH = 3\nVIEWS_GROUP_ROW_VALUE_LENGTH = 7\nLIST_MASKING_VIEWS_SPECIFIC_ONE = '---'\nLIST_MASKING_VIEWS_SPECIFIC_TWO = '<Port List>'\nLIST_MASKING_VIEWS_SPECIFIC_FOUR = '<Connection List>'\nLIST_MASKING_VIEWS_SPECIFIC_FIVE = 'CM#'\nLIST_MASKING_VIEWS_SPECIFIC_SIX = ' (Host'\nLIST_MASKING_VIEWS_SPECIFIC_SEVEN = 'LUN  Volume'\nVIEWS_REGULAR_SPECIFIC_ONE = '^Port Group'\nVIEWS_REGULAR_SPECIFIC_TWO = '^Host'\nLIST_MASKING_VIEWS_CONSTANT_ZERO = 0\nLIST_MASKING_VIEWS_CONSTANT_TWO = 2\n\nHOST_OS_TYPES_MAP = {\n    'linux': constants.HostOSTypes.LINUX,\n    'windows': constants.HostOSTypes.WINDOWS,\n    'solaris': constants.HostOSTypes.SOLARIS,\n    'solaris mpxio': constants.HostOSTypes.SOLARIS,\n    'hp-ux': constants.HostOSTypes.HP_UX,\n    'aix': constants.HostOSTypes.AIX,\n    'aix vxvm': constants.HostOSTypes.AIX,\n    'xenserver': constants.HostOSTypes.XEN_SERVER,\n    'vmware esx': constants.HostOSTypes.VMWARE_ESX,\n    'linux_vis': constants.HostOSTypes.LINUX_VIS,\n    'windows server 2012': constants.HostOSTypes.WINDOWS_SERVER_2012,\n    'oracle vm': constants.HostOSTypes.ORACLE_VM,\n    'open vms': constants.HostOSTypes.OPEN_VMS,\n    'unknown': constants.HostOSTypes.UNKNOWN\n}\n"
  },
  {
    "path": "delfin/drivers/fujitsu/eternus/eternus_ssh_client.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2011 OpenStack LLC\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport time\n\nimport paramiko\nimport six\nfrom oslo_log import log as logging\nfrom cryptography.hazmat.primitives.asymmetric import dsa\n\nfrom delfin import cryptor\nfrom delfin import exception, utils\nfrom delfin.drivers.utils.ssh_client import SSHPool\n\nLOG = logging.getLogger(__name__)\n\n\ndef override_check_dsa_parameters(parameters):\n    if parameters.p.bit_length() not in [512, 1024, 2048, 3072, 4096]:\n        raise ValueError(\n            \"p must be exactly 1024, 2048, 3072, or 4096 bits long\"\n        )\n    if parameters.q.bit_length() not in [160, 224, 256]:\n        raise ValueError(\"q must be exactly 160, 224, or 256 bits long\")\n\n    if not (1 < parameters.g < parameters.p):\n        raise ValueError(\"g, p don't satisfy 1 < g < p.\")\n\n\ndsa._check_dsa_parameters = override_check_dsa_parameters\n\n\nclass EternusSSHPool(SSHPool):\n    def create(self):\n        ssh = paramiko.SSHClient()\n        try:\n            if self.ssh_pub_key is None:\n                ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n            else:\n                host_key = '%s %s %s' % \\\n                           (self.ssh_host, self.ssh_pub_key_type,\n                            self.ssh_pub_key)\n                self.set_host_key(host_key, ssh)\n            try:\n                ssh.connect(hostname=self.ssh_host, port=self.ssh_port,\n                            username=self.ssh_username,\n                            password=cryptor.decode(self.ssh_password),\n                            timeout=self.ssh_conn_timeout)\n            except Exception as e:\n                if 'Authentication failed' in six.text_type(e):\n                    ssh.connect(hostname=self.ssh_host, port=self.ssh_port,\n                                username=self.ssh_username,\n                                password=cryptor.decode(self.ssh_password),\n                                timeout=self.ssh_conn_timeout,\n                                look_for_keys=False)\n                else:\n                    raise e\n            if self.conn_timeout:\n                transport = ssh.get_transport()\n                transport.set_keepalive(self.SOCKET_TIMEOUT)\n            return ssh\n        except Exception as e:\n            err = six.text_type(e)\n            LOG.error(err)\n            if 'timed out' in err:\n                raise exception.InvalidIpOrPort()\n            elif 'No authentication methods available' in err \\\n                    or 'Authentication failed' in err:\n                raise exception.InvalidUsernameOrPassword()\n            elif 'not a valid RSA private key file' in err:\n                raise exception.InvalidPrivateKey()\n            elif 'not found in known_hosts' in err:\n                raise exception.SSHNotFoundKnownHosts(self.ssh_host)\n            else:\n                raise exception.SSHException(err)\n\n    def do_exec_shell(self, command_list, exe_time):\n        result = ''\n        try:\n            with self.item() as ssh:\n                if command_list and ssh:\n                    channel = ssh.invoke_shell()\n                    for command in command_list:\n                        utils.check_ssh_injection(command)\n                        channel.send(command + '\\r\\n')\n                        time.sleep(exe_time)\n                    channel.send(\"exit\" + \"\\r\\n\")\n                    channel.close()\n                    while True:\n                        resp = channel.recv(9999).decode('utf8')\n                        if not resp:\n                            time.sleep(exe_time)\n                            break\n                        result += resp\n            if 'is not a recognized command' in result \\\n                    or 'Unknown command' in result:\n                raise exception.StorageBackendException(result)\n        except paramiko.AuthenticationException as ae:\n            LOG.error('doexec Authentication error:{}'.format(ae))\n            raise exception.InvalidUsernameOrPassword()\n        except Exception as e:\n            err = six.text_type(e)\n            LOG.error(err)\n            if 'timed out' in err \\\n                    or 'SSH connect timeout' in err:\n                raise exception.SSHConnectTimeout()\n            elif 'No authentication methods available' in err \\\n                    or 'Authentication failed' in err \\\n                    or 'Invalid username or password' in err:\n                raise exception.InvalidUsernameOrPassword()\n            elif 'not a valid RSA private key file' in err \\\n                    or 'not a valid RSA private key' in err:\n                raise exception.InvalidPrivateKey()\n            elif 'Unable to connect to port' in err \\\n                    or 'Invalid ip or port' in err:\n                raise exception.InvalidIpOrPort()\n            else:\n                raise exception.SSHException(err)\n        return result\n"
  },
  {
    "path": "delfin/drivers/fujitsu/eternus/eternus_stor.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hashlib\nimport re\n\nimport six\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception, utils\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.fujitsu.eternus import cli_handler, consts\nfrom delfin.drivers.fujitsu.eternus.consts import DIGITAL_CONSTANT\nfrom delfin.drivers.utils.tools import Tools\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass EternusDriver(driver.StorageDriver):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.cli_handler = cli_handler.CliHandler(**kwargs)\n        self.login = self.cli_handler.login()\n\n    def list_volumes(self, context):\n        list_volumes = self.get_volumes_model()\n        if not list_volumes:\n            list_volumes = self.get_volumes_old()\n        return list_volumes\n\n    def get_volumes_model(self):\n        list_volumes = []\n        volumes_str = self.cli_handler.exec_command(\n            consts.GET_LIST_VOLUMES_MODE_UID)\n        volume_id_dict = self.cli_handler.get_volumes_type(\n            command=consts.GET_LIST_VOLUMES_TYPE_TPV)\n        volume_id_dict = self.cli_handler.get_volumes_type(\n            volume_id_dict, consts.GET_LIST_VOLUMES_TYPE_TPV)\n        block = True\n        if volumes_str:\n            volumes_arr = volumes_str.replace('\\r', '').split('\\n')\n            for volumes_row_str in volumes_arr:\n                if not volumes_row_str or \\\n                        consts.CLI_STR in volumes_row_str:\n                    continue\n                if consts.SPECIAL_CHARACTERS_TWO in volumes_row_str:\n                    block = False\n                    continue\n                if block:\n                    continue\n                volumes_row_arr = volumes_row_str.split()\n                volume_id = volumes_row_arr[\n                    consts.GET_VOLUMES_MODEL_VOLUME_ID_COUNT]\n                type_capacity = volume_id_dict.get(volume_id, {})\n                volume_type = type_capacity.get('type',\n                                                constants.VolumeType.THICK)\n                used_capacity = type_capacity.get('used_capacity',\n                                                  DIGITAL_CONSTANT.ZERO_INT)\n                volume_name = volumes_row_arr[\n                    consts.GET_VOLUMES_MODEL_VOLUME_NAME_COUNT]\n                volume_status = volumes_row_arr[\n                    consts.GET_VOLUMES_MODEL_VOLUME_STATUS_COUNT]\n                pool_id = volumes_row_arr[\n                    consts.GET_VOLUMES_MODEL_POOL_ID_COUNT]\n                total_capacity = \\\n                    int(volumes_row_arr[consts.\n                        GET_VOLUMES_MODEL_TOTAL_CAPACITY_COUNT]) * units.Mi\n                wwn = volumes_row_arr[consts.GET_VOLUMES_MODEL_WWN_COUNT]\n                volume = {\n                    'name': volume_name,\n                    'storage_id': self.storage_id,\n                    'status': consts.LIST_VOLUMES_STATUS_MAP.get(\n                        volume_status),\n                    'native_volume_id': volume_id,\n                    'native_storage_pool_id': pool_id,\n                    'type': volume_type,\n                    'wwn': wwn,\n                    'total_capacity': total_capacity,\n                    'used_capacity': used_capacity,\n                    'free_capacity': total_capacity - used_capacity\n                }\n                list_volumes.append(volume)\n        return list_volumes\n\n    def get_volumes_old(self):\n        list_volumes = []\n        volumes_str = self.cli_handler.exec_command(consts.GET_LIST_VOLUMES)\n        volumes_arr = volumes_str.split('\\n')\n        if len(volumes_arr) < consts.VOLUMES_LENGTH:\n            return list_volumes\n        for volumes_num in range(consts.VOLUMES_CYCLE, len(volumes_arr)):\n            volumes_row_str = volumes_arr[volumes_num]\n            if not volumes_row_str or \\\n                    consts.CLI_STR in volumes_row_str.strip():\n                continue\n            volumes_row_arr = volumes_row_str.split()\n            volume_id = volumes_row_arr[consts.VOLUME_ID_COUNT]\n            volume_name = volumes_row_arr[consts.VOLUME_NAME_COUNT]\n            volume_status = volumes_row_arr[consts.VOLUME_STATUS_COUNT]\n            volume_type = volumes_row_arr[consts.VOLUME_TYPE_COUNT]\n            pool_id = volumes_row_arr[consts.NATIVE_STORAGE_POOL_ID_COUNT]\n            total_capacity = volumes_row_arr[consts.TOTAL_CAPACITY_COUNT]\n            volume_results = {\n                'name': volume_name,\n                'storage_id': self.storage_id,\n                'status': consts.LIST_VOLUMES_STATUS_MAP.get(\n                    volume_status),\n                'native_volume_id': volume_id,\n                'native_storage_pool_id': pool_id,\n                'type': constants.VolumeType.THIN if\n                volume_type and consts.VOLUME_TYPE_OPEN in volume_type else\n                constants.VolumeType.THICK,\n                'total_capacity': int(total_capacity) * units.Mi,\n                'used_capacity': consts.DEFAULT_USED_CAPACITY,\n                'free_capacity': consts.DEFAULT_FREE_CAPACITY\n            }\n            list_volumes.append(volume_results)\n        return list_volumes\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def clear_alert(self, context, alert):\n        pass\n\n    def get_storage(self, context):\n        storage_name_dict = self.cli_handler.common_data_encapsulation(\n            consts.GET_STORAGE_NAME)\n        storage_name = storage_name_dict.get('Name')\n        storage_description = storage_name_dict.get('Description')\n        storage_location = storage_name_dict.get('Installation Site')\n\n        enclosure_status = self.cli_handler.common_data_encapsulation(\n            consts.GET_ENCLOSURE_STATUS)\n        storage_model = enclosure_status.get('Model Name')\n        storage_serial_number = enclosure_status.get('Serial Number')\n        storage_firmware_version = enclosure_status.get('Firmware Version')\n\n        storage_status_dict = self.cli_handler.common_data_encapsulation(\n            consts.GET_STORAGE_STATUS)\n        storage_status = consts.STORAGE_STATUS_MAP.get(\n            storage_status_dict.get('Summary Status'))\n\n        raw_capacity = consts.DIGITAL_CONSTANT.ZERO_INT\n        list_disks = self.list_disks(context)\n        if list_disks:\n            for disks in list_disks:\n                raw_capacity += disks.get('capacity',\n                                          consts.DIGITAL_CONSTANT.ZERO_INT)\n        total_capacity = consts.DIGITAL_CONSTANT.ZERO_INT\n        used_capacity = consts.DIGITAL_CONSTANT.ZERO_INT\n        free_capacity = consts.DIGITAL_CONSTANT.ZERO_INT\n        list_storage_pools = self.list_storage_pools(context)\n        if list_storage_pools:\n            for pools in list_storage_pools:\n                total_capacity += pools.get('total_capacity')\n                used_capacity += pools.get('used_capacity')\n                free_capacity += pools.get('free_capacity')\n        storage = {\n            'name': storage_name,\n            'vendor': consts.GET_STORAGE_VENDOR,\n            'description': storage_description,\n            'model': storage_model,\n            'status': storage_status,\n            'serial_number': storage_serial_number,\n            'firmware_version': storage_firmware_version,\n            'location': storage_location,\n            'raw_capacity': raw_capacity,\n            'total_capacity': total_capacity,\n            'used_capacity': used_capacity,\n            'free_capacity': free_capacity\n        }\n        return storage\n\n    def list_controllers(self, context):\n        controllers = self.cli_handler.get_controllers()\n        controllers_status = self.cli_handler.common_data_encapsulation(\n            consts.GET_STORAGE_CONTROLLER_STATUS)\n        controller_list = []\n        for controller in (controllers or []):\n            name = controller.get('name')\n            status = constants.ControllerStatus.FAULT\n            if controllers_status and controllers_status.get(name):\n                status_value = controllers_status.get(name)\n                if status_value and \\\n                        consts.CONTROLLER_STATUS_NORMAL_KEY in status_value:\n                    status = constants.ControllerStatus.NORMAL\n            controller_model = {\n                'name': controller.get('name'),\n                'storage_id': self.storage_id,\n                'native_controller_id': controller.get('Serial Number'),\n                'status': status,\n                'location': controller.get('name'),\n                'soft_version': controller.get('Hard Revision'),\n                'cpu_info': controller.get('CPU Clock'),\n                'cpu_count': consts.DIGITAL_CONSTANT.ONE_INT,\n                'memory_size': str(int(\n                    Tools.get_capacity_size(controller.get('Memory Size'))))\n            }\n            controller_list.append(controller_model)\n        return controller_list\n\n    def list_disks(self, context):\n        try:\n            disk_list = \\\n                self.cli_handler.format_data(\n                    consts.GET_DISK_COMMAND,\n                    self.storage_id,\n                    self.cli_handler.format_disks,\n                    False)\n            return disk_list\n        except Exception as e:\n            error = six.text_type(e)\n            LOG.error(\"Failed to get disk from fujitsu eternus %s\" % error)\n            raise exception.InvalidResults(error)\n\n    def list_ports(self, context):\n        port_list = self.cli_handler.format_data(\n            consts.GET_PORT_FC_PARAMETERS, self.storage_id,\n            self.cli_handler.format_fc_ports, True)\n        ports_status = self.cli_handler.get_ports_status()\n        for port in port_list:\n            name = port.get('name')\n            status_dict = ports_status.get(name, {})\n            if status_dict:\n                link_status = status_dict.get('Link Status')\n                connection_status = constants.PortConnectionStatus.UNKNOWN\n                if 'Gbit/s' in link_status:\n                    reality = link_status.split()[0].replace('Gbit/s', '')\n                    speed = int(reality) * units.G\n                    port['speed'] = speed\n                if 'Link Up' in link_status:\n                    connection_status = \\\n                        constants.PortConnectionStatus.CONNECTED\n                if 'Link Down' in link_status:\n                    connection_status = \\\n                        constants.PortConnectionStatus.DISCONNECTED\n\n                status_keys = status_dict.keys()\n                status_dicts = {}\n                for status_key in status_keys:\n                    if status_key and 'Status/Status Code' in status_key:\n                        status_dicts['Status/Status Code'] = status_dict.get(\n                            status_key)\n                    if status_key and status_key in 'Port WWN':\n                        status_dicts['WWN'] = status_dict.get(status_key)\n                status = status_dicts.get('Status/Status Code')\n                health_status = constants.PortHealthStatus.UNKNOWN\n                if 'Normal' in status or 'normal' in status:\n                    health_status = constants.PortHealthStatus.NORMAL\n                elif 'Unconnected' in status or 'unconnected' in status:\n                    health_status = constants.PortHealthStatus.UNKNOWN\n                elif 'Error' in status or 'error' in status:\n                    health_status = constants.PortHealthStatus.ABNORMAL\n                port['connection_status'] = connection_status\n                port['wwn'] = status_dicts.get('WWN')\n                port['health_status'] = health_status\n        return port_list\n\n    def list_storage_pools(self, context):\n        pool_list = self.get_list_pools()\n        if not pool_list:\n            pool_list = self.get_list_pools_old(pool_list)\n        return pool_list\n\n    def get_list_pools_old(self, pool_list):\n        pools_str = self.cli_handler.exec_command(consts.GET_STORAGE_POOL)\n        if not pools_str:\n            return pool_list\n        pools_row_str = pools_str.split('\\n')\n        if len(pools_row_str) < consts.POOL_LENGTH:\n            return pool_list\n        for pools_row_num in range(consts.POOL_CYCLE, len(pools_row_str)):\n            pools_row_arr = pools_row_str[pools_row_num].strip()\n            if pools_row_arr in consts.CLI_STR or \\\n                    pools_row_arr in consts.SPECIAL_CHARACTERS_ONE:\n                continue\n            pools_arr = pools_row_arr.split()\n            pool_id = pools_arr[consts.POOL_ID_COUNT]\n            pool_name = pools_arr[consts.POOL_NAME_COUNT]\n            pool_status = consts.STORAGE_POOL_STATUS_MAP.get(\n                pools_arr[consts.POOL_STATUS_COUNT],\n                constants.StoragePoolStatus.UNKNOWN)\n            try:\n                total_capacity = int(\n                    pools_arr[consts.POOL_TOTAL_CAPACITY_COUNT]) * units.Mi\n                free_capacity = int(\n                    pools_arr[consts.POOL_FREE_CAPACITY_COUNT]) * units.Mi\n            except Exception as e:\n                LOG.info('Conversion digital exception:%s' % six.text_type(e))\n                return pool_list\n            pool_model = {\n                'name': pool_name,\n                'storage_id': self.storage_id,\n                'native_storage_pool_id': str(pool_id),\n                'status': pool_status,\n                'storage_type': constants.StorageType.BLOCK,\n                'total_capacity': total_capacity,\n                'used_capacity': total_capacity - free_capacity,\n                'free_capacity': free_capacity\n            }\n            pool_list.append(pool_model)\n        return pool_list\n\n    def get_list_pools(self):\n        pool_list = []\n        pools = self.cli_handler.get_volumes_or_pool(\n            consts.GET_STORAGE_POOL_CSV, consts.POOL_TITLE_PATTERN)\n        for pool in (pools or []):\n            free_cap = float(\n                pool.get(\"freecapacity(mb)\")) * units.Mi\n            total_cap = float(\n                pool.get(\"totalcapacity(mb)\")) * units.Mi\n            used_cap = total_cap - free_cap\n            status = consts.STORAGE_POOL_STATUS_MAP.get(\n                pool.get('status'),\n                constants.StoragePoolStatus.UNKNOWN)\n            pool_model = {\n                'name': pool.get('raidgroupname'),\n                'storage_id': self.storage_id,\n                'native_storage_pool_id': str(pool.get('raidgroupno.')),\n                'status': status,\n                'storage_type': constants.StorageType.BLOCK,\n                'total_capacity': int(total_cap),\n                'used_capacity': int(used_cap),\n                'free_capacity': int(free_cap)\n            }\n            pool_list.append(pool_model)\n        return pool_list\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    def reset_connection(self, context, **kwargs):\n        pass\n\n    def list_alerts(self, context, query_para=None):\n        list_alert = self.cli_handler.get_alerts(\n            consts.SHOW_EVENTS_SEVERITY_WARNING, query_para)\n        list_alert = self.cli_handler.get_alerts(\n            consts.SHOW_EVENTS_SEVERITY_ERROR, query_para, list_alert)\n        if not list_alert:\n            list_alert = self.cli_handler.get_alerts(\n                consts.SHOW_EVENTS_LEVEL_WARNING, query_para)\n            list_alert = self.cli_handler.get_alerts(\n                consts.SHOW_EVENTS_LEVEL_ERROR, query_para, list_alert)\n        return list_alert\n\n    @staticmethod\n    def parse_alert(context, alert):\n        try:\n            if consts.PARSE_ALERT_DESCRIPTION in alert.keys():\n                alert_model = dict()\n                alert_model['alert_id'] = alert.get(\n                    consts.PARSE_ALERT_ALERT_ID)\n                alert_model['severity'] = consts.PARSE_ALERT_SEVERITY_MAP.get(\n                    alert.get(consts.PARSE_ALERT_SEVERITY),\n                    constants.Severity.NOT_SPECIFIED)\n                alert_model['category'] = constants.Category.FAULT\n                alert_model['occur_time'] = utils.utcnow_ms()\n                alert_model['description'] = alert.get(\n                    consts.PARSE_ALERT_DESCRIPTION)\n                alert_model['location'] = '{}{}'.format(alert.get(\n                    consts.PARSE_ALERT_LOCATION),\n                    alert.get(consts.PARSE_ALERT_COMPONENT))\n                alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n                alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n                alert_model['alert_name'] = alert.get(\n                    consts.PARSE_ALERT_DESCRIPTION)\n                alert_model['match_key'] = hashlib.md5(str(alert.get(\n                    consts.PARSE_ALERT_ALERT_ID)).encode()).hexdigest()\n                return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (_(\"Failed to build alert model as some attributes missing\"))\n            raise exception.InvalidResults(msg)\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n\n    def list_storage_host_initiators(self, ctx):\n        initiator_list = []\n        host_status = self.get_host_status()\n        self.get_fc_sas_initiator(host_status, initiator_list,\n                                  consts.GET_HOST_WWN_NAMES,\n                                  consts.HOST_FC_ENCAPSULATE_DATA_TOTAL,\n                                  consts.HOST_NAME_COUNT,\n                                  consts.HOST_WWN_COUNT,\n                                  constants.InitiatorType.FC)\n        self.get_iscsi_initiator(host_status, initiator_list)\n        self.get_fc_sas_initiator(host_status, initiator_list,\n                                  consts.GET_HOST_SAS_ADDRESSES,\n                                  consts.HOST_SAS_ENCAPSULATE_DATA_TOTAL,\n                                  consts.HOST_SAS_NAME,\n                                  consts.HOST_SAS_ADDRESS,\n                                  constants.InitiatorType.SAS)\n        return initiator_list\n\n    def get_fc_sas_initiator(self, host_status, initiator_list, command,\n                             encapsulate_data_total, name_count,\n                             wwn_count, initiator_type):\n        host_fc_list = self.get_data(command)\n        for host_fc in host_fc_list:\n            if len(host_fc) < encapsulate_data_total:\n                continue\n            fc_name = host_fc[name_count]\n            fc_wwn = host_fc[wwn_count]\n            state = host_status.get(fc_name)\n            initiator_item = self.initiator_dict(\n                fc_wwn, fc_name, state, initiator_type)\n            initiator_list.append(initiator_item)\n\n    def initiator_dict(self, wwn, host_id, state, initiator_type):\n        status = constants.InitiatorStatus.OFFLINE\n        if state is not None and state == consts.HOST_PATH_STATUS_SPECIFIC_TWO:\n            status = constants.InitiatorStatus.ONLINE\n        initiator_item = {\n            \"name\": wwn,\n            \"storage_id\": self.storage_id,\n            \"native_storage_host_initiator_id\": wwn,\n            \"wwn\": wwn,\n            \"status\": status,\n            \"native_storage_host_id\": host_id,\n            'type': initiator_type\n        }\n        return initiator_item\n\n    def get_iscsi_initiator(self, host_status, initiator_list):\n        host_iscsi_list = self.get_iscsi_host_data()\n        for host_iscsi in host_iscsi_list:\n            iscsi_name = host_iscsi.get('name')\n            state = host_status.get(iscsi_name)\n            iqn = host_iscsi.get('iqn')\n            initiator_item = self.initiator_dict(\n                iqn, iscsi_name, state, constants.InitiatorType.ISCSI)\n            initiator_item['alias'] = host_iscsi.get('alias')\n            initiator_list.append(initiator_item)\n\n    def list_storage_hosts(self, ctx):\n        host_list = []\n        host_status = self.get_host_status()\n        self.get_fc_sas_host(host_list, host_status, consts.GET_HOST_WWN_NAMES,\n                             consts.HOST_FC_ENCAPSULATE_DATA_TOTAL,\n                             consts.HOST_NAME_COUNT, consts.HOST_TYPE_COUNT)\n        self.get_iscsi_host(host_list, host_status)\n        self.get_fc_sas_host(host_list, host_status,\n                             consts.GET_HOST_SAS_ADDRESSES,\n                             consts.HOST_SAS_ENCAPSULATE_DATA_TOTAL,\n                             consts.HOST_SAS_NAME, consts.HOST_SAS_OS)\n        return host_list\n\n    def get_fc_sas_host(self, host_list, host_status, command,\n                        encapsulate_data_total, name_count, type_count):\n        host_fc_list = self.get_data(command)\n        for host_fc in host_fc_list:\n            if len(host_fc) < encapsulate_data_total:\n                continue\n            fc_name = host_fc[name_count]\n            os = host_fc[type_count].lower()\n            state = host_status.get(fc_name)\n            status = constants.HostStatus.OFFLINE\n            if state is not None and state == \\\n                    consts.HOST_PATH_STATUS_SPECIFIC_TWO:\n                status = constants.HostStatus.NORMAL\n            host_d = {\n                \"name\": fc_name,\n                \"storage_id\": self.storage_id,\n                \"native_storage_host_id\": fc_name,\n                \"os_type\": consts.HOST_OS_TYPES_MAP.get(\n                    os, constants.HostOSTypes.UNKNOWN),\n                \"status\": status\n            }\n            host_list.append(host_d)\n\n    def get_iscsi_host(self, host_list, host_status):\n        host_iscsi_list = self.get_iscsi_host_data()\n        for host_iscsi in host_iscsi_list:\n            iscsi_name = host_iscsi.get('name')\n            state = host_status.get(iscsi_name)\n            os = host_iscsi.get('os')\n            os = os.lower() if os else None\n            status = constants.HostStatus.OFFLINE\n            if state is not None and state ==\\\n                    consts.HOST_PATH_STATUS_SPECIFIC_TWO:\n                status = constants.HostStatus.NORMAL\n            host_d = {\n                \"name\": iscsi_name,\n                \"storage_id\": self.storage_id,\n                \"native_storage_host_id\": iscsi_name,\n                \"os_type\": consts.HOST_OS_TYPES_MAP.get(\n                    os, constants.HostOSTypes.UNKNOWN),\n                \"status\": status,\n                'ip_address': host_iscsi.get('address')\n            }\n            host_list.append(host_d)\n\n    def get_data(self, command):\n        host_list = []\n        host_str = self.cli_handler.exec_command(command)\n        block = True\n        length_list = []\n        if host_str:\n            host_arr = host_str.strip().replace('\\r', '').split('\\n')\n            for host_row_str in host_arr:\n                if not host_row_str or \\\n                        consts.CLI_STR in host_row_str:\n                    continue\n                if consts.SPECIAL_CHARACTERS_TWO in host_row_str:\n                    length_list.extend(\n                        [len(identify) for identify in host_row_str.split()])\n                    block = False\n                    continue\n                if block:\n                    continue\n                volume_list = []\n                key_length = DIGITAL_CONSTANT.ZERO_INT\n                for length_key in length_list:\n                    volume = host_row_str[key_length:\n                                          key_length + length_key].strip()\n                    volume_list.append(volume)\n                    key_length =\\\n                        key_length + length_key + DIGITAL_CONSTANT.ONE_INT\n                host_list.append(volume_list)\n        return host_list\n\n    def get_iscsi_host_data(self):\n        iscsi_list = []\n        iscsi_ids_str = self.cli_handler.exec_command(\n            consts.GET_HOST_ISCSI_NAMES)\n        block = True\n        if iscsi_ids_str:\n            iscsi_ids_arr = iscsi_ids_str.strip().replace('\\r', '').split('\\n')\n            for iscsi_ids_row_str in iscsi_ids_arr:\n                if not iscsi_ids_row_str or \\\n                        consts.CLI_STR in iscsi_ids_row_str:\n                    continue\n                if consts.HOST_PATH_STATUS_SPECIFIC_ONE in iscsi_ids_row_str:\n                    block = False\n                    continue\n                if block:\n                    continue\n                iscsi_ids_row_arr = iscsi_ids_row_str.strip().split()\n                if len(iscsi_ids_row_arr) < consts.HOST_ISCSI_NAMES_SEVEN:\n                    continue\n                details = self.get_iscsi_details(\n                    iscsi_ids_row_arr[consts.HOST_ISCSI_NAMES_ZERO])\n                iscsi_d = {\n                    'iscsi_id': details.get('Host No.'),\n                    'name': details.get('Host Name'),\n                    'iqn': details.get('iSCSI Name'),\n                    'alias': details.get('Alias Name'),\n                    'address': None if\n                    consts.HOST_ISCSI_SPECIFIC_ONE in\n                    details.get('IP Address') else details.get('IP Address'),\n                    'os': details.get('Host Response Name')\n                }\n                iscsi_list.append(iscsi_d)\n        return iscsi_list\n\n    def get_iscsi_details(self, number):\n        details = {}\n        iscsi_details_str = self.cli_handler.exec_command(\n            consts.GET_HOST_ISCSI_NAMES_NUMBER.format(number))\n        if iscsi_details_str:\n            iscsi_ids_arr = iscsi_details_str.strip().replace('\\r', '') \\\n                .split('\\n')\n            for row_str in iscsi_ids_arr:\n                if not row_str or consts.CLI_STR in row_str:\n                    continue\n                iscsi_details_row_arr = row_str.strip().split('   ')\n                if len(iscsi_details_row_arr) < consts.HOST_ISCSI_NAMES_TWO:\n                    continue\n                key = row_str[:consts.HOST_ISCSI_DETAIL_EIGHTEEN].strip()\n                value = row_str[consts.HOST_ISCSI_DETAIL_EIGHTEEN:].strip()\n                details[key] = value\n        return details\n\n    def get_host_status(self):\n        status_d = {}\n        status_list = self.get_data(consts.GET_HOST_PATH_STATUS)\n        for status_row in status_list:\n            if len(status_row) < consts.HOST_PATH_STATUS_TOTAL:\n                continue\n            host_name = status_row[consts.HOST_PATH_STATUS_NAME]\n            path_state = status_row[consts.HOST_PATH_STATUS]\n            status_d[host_name] = path_state\n        return status_d\n\n    def list_storage_host_groups(self, ctx):\n        host_group_list = []\n        host_group_all = self.cli_handler.exec_command(\n            consts.GET_HOST_GROUPS_ALL)\n        if host_group_all:\n            host_group_all_arr = host_group_all.replace('\\r', '').split('\\n\\n')\n            for host_group_str in host_group_all_arr:\n                host_group_arr = host_group_str.split(\n                    consts.HOST_GROUPS_SPECIFIC_ONE)\n                host_group_row_arr = host_group_arr[\n                    consts.HOST_GROUP_ZERO].strip().split('\\n')\n                host_group_id = None\n                host_group_name = None\n                block = True\n                for host_group_row_str in host_group_row_arr:\n                    if not host_group_row_str or \\\n                            consts.CLI_STR in host_group_row_str:\n                        continue\n                    if consts.HOST_GROUPS_SPECIFIC_TWO in host_group_row_str:\n                        block = False\n                        continue\n                    if block:\n                        continue\n                    host_group = host_group_row_str.split()\n                    host_group_id = host_group[consts.HOST_GROUP_ZERO]\n                    host_group_name = host_group[consts.HOST_GROUP_ONE]\n                storage_hosts = self.get_storage_hosts(host_group_arr)\n                host_g = {\n                    'name': host_group_name,\n                    'storage_id': self.storage_id,\n                    'native_storage_host_group_id': host_group_id,\n                    'storage_hosts': storage_hosts,\n                }\n                host_group_list.append(host_g)\n        storage_host_grp_relation_list = []\n        for storage_host_group in host_group_list:\n            storage_hosts = storage_host_group.pop('storage_hosts', None)\n            if not storage_hosts:\n                continue\n            storage_hosts = storage_hosts.split(',')\n\n            for storage_host in storage_hosts:\n                storage_host_group_relation = {\n                    'storage_id': self.storage_id,\n                    'native_storage_host_group_id': storage_host_group.get(\n                        'native_storage_host_group_id'),\n                    'native_storage_host_id': storage_host\n                }\n                storage_host_grp_relation_list \\\n                    .append(storage_host_group_relation)\n        result = {\n            'storage_host_groups': host_group_list,\n            'storage_host_grp_host_rels': storage_host_grp_relation_list\n        }\n        return result\n\n    @staticmethod\n    def get_storage_hosts(host_group_arr):\n        storage_hosts = None\n        if len(host_group_arr) == consts.HOST_GROUP_TOTAL:\n            host_row_arr = host_group_arr[consts.HOST_GROUP_ONE].split('\\n')\n            block = True\n            for host_row_str in host_row_arr:\n                if not host_row_str or consts.CLI_STR in host_row_str:\n                    continue\n                if consts.HOST_GROUPS_SPECIFIC_TWO in host_row_str:\n                    block = False\n                    continue\n                if block:\n                    continue\n                host_arr = host_row_str.split()\n                host_id = host_arr[consts.HOST_GROUP_ONE]\n                if storage_hosts:\n                    storage_hosts = \"{0},{1}\".format(storage_hosts,\n                                                     host_id)\n                else:\n                    storage_hosts = \"{0}\".format(host_id)\n        return storage_hosts\n\n    def list_volume_groups(self, ctx):\n        vol_group_list = []\n        storage_id = self.storage_id\n        lun_groups_list = self.get_data(consts.GET_LUN_GROUPS)\n        for lun in lun_groups_list:\n            lun_groups_id = lun[consts.LUN_GROUPS_ID_COUNT]\n            lun_groups_name = lun[consts.LUN_GROUPS_NAME_COUNT]\n            volumes_str = self.get_lun_group_details(lun_groups_id)\n            vol_g = {\n                'name': lun_groups_name,\n                'storage_id': storage_id,\n                'native_volume_group_id': lun_groups_id,\n                'volumes': volumes_str\n            }\n            vol_group_list.append(vol_g)\n        vol_grp_vol_relation_list = []\n        for vol_group in vol_group_list:\n            volumes = vol_group.pop('volumes', None)\n            if not volumes:\n                continue\n            for volume_id in volumes.split(','):\n                storage_volume_group_relation = {\n                    'storage_id': storage_id,\n                    'native_volume_group_id': vol_group.get(\n                        'native_volume_group_id'),\n                    'native_volume_id': volume_id\n                }\n                vol_grp_vol_relation_list \\\n                    .append(storage_volume_group_relation)\n        result = {\n            'volume_groups': vol_group_list,\n            'vol_grp_vol_rels': vol_grp_vol_relation_list\n        }\n        return result\n\n    def get_lun_group_details(self, lun_groups_id):\n        lun_group_details_str = self.cli_handler.exec_command(\n            consts.GET_LUN_GROUPS_LG_NUMBER.format(lun_groups_id))\n        volumes_str = None\n        if lun_group_details_str:\n            lun_group_details_arr = lun_group_details_str.strip(\n            ).replace('\\r', '').split('\\n')\n            block = True\n            for lun_details_row_str in lun_group_details_arr:\n                if not lun_details_row_str or \\\n                        consts.CLI_STR in lun_details_row_str:\n                    continue\n                if consts.LUN_GROUPS_SPECIFIC_TWO in lun_details_row_str:\n                    block = False\n                    continue\n                if block:\n                    continue\n                lun_details_arr = lun_details_row_str.strip().split()\n                volume_id = lun_details_arr[consts.LUN_VOLUME_ID]\n                if volumes_str:\n                    volumes_str = \"{0},{1}\".format(volumes_str, volume_id)\n                else:\n                    volumes_str = \"{0}\".format(volume_id)\n        return volumes_str\n\n    def list_port_groups(self, ctx):\n        port_group_list = []\n        storage_id = self.storage_id\n        port_groups_str = self.cli_handler.exec_command(consts.GET_PORT_GROUPS)\n        if port_groups_str:\n            port_groups_arr = port_groups_str.strip().replace('\\r', '').split(\n                '\\n\\n')\n            for port_group_str in port_groups_arr:\n                port_group_arr = port_group_str.split(\n                    consts.LIST_MASKING_VIEWS_SPECIFIC_TWO)\n                port_g_row_arr = port_group_arr[\n                    consts.PORT_GROUP_ROW_ARR_NUM].split('\\n')\n                port_group_id = None\n                port_group_name = None\n                block = True\n                for port_g_row_str in port_g_row_arr:\n                    if not port_g_row_str or \\\n                            consts.CLI_STR in port_g_row_str:\n                        continue\n                    if consts.LIST_MASKING_VIEWS_SPECIFIC_ONE \\\n                            in port_g_row_str:\n                        block = False\n                        continue\n                    if block:\n                        continue\n                    port_group = port_g_row_str.strip().split()\n                    port_group_id = port_group[consts.PORT_GROUP_ID_NUM]\n                    port_group_name = port_group[consts.PORT_GROUP_NAME_NUM]\n                    break\n                ports_str = None\n                if len(port_group_arr) == consts.PORT_GROUP_ARR_LENGTH:\n                    port_list_row_arr = port_group_arr[\n                        consts.PORT_LIST_ROW_ARR_NUM].strip().split('\\n')\n                    for port in port_list_row_arr:\n                        port_id = port.strip()\n                        if port_id in consts.CLI_STR:\n                            continue\n                        if ports_str:\n                            ports_str = \"{0},{1}\".format(ports_str, port_id)\n                        else:\n                            ports_str = \"{0}\".format(port_id)\n                port_g = {\n                    'name': port_group_name,\n                    'storage_id': storage_id,\n                    'native_port_group_id': port_group_id,\n                    'ports': ports_str\n                }\n                port_group_list.append(port_g)\n        port_grp_port_relation_list = []\n        for port_group in port_group_list:\n            ports = port_group.pop('ports', None)\n            if not ports:\n                continue\n            ports = ports.split(',')\n            for ports_id in ports:\n                port_groups_relation = {\n                    'storage_id': storage_id,\n                    'native_port_group_id': port_group.get(\n                        'native_port_group_id'),\n                    'native_port_id': ports_id\n                }\n                port_grp_port_relation_list \\\n                    .append(port_groups_relation)\n        result = {\n            'port_groups': port_group_list,\n            'port_grp_port_rels': port_grp_port_relation_list\n        }\n        return result\n\n    def list_masking_views(self, ctx):\n        list_masking_views = []\n        view_id_dict = {}\n        views_str = self.cli_handler.exec_command(consts.GET_HOST_AFFINITY)\n        if views_str:\n            views_arr = views_str.strip().replace('\\r', '').split('\\n\\n')\n            for views_group_str in views_arr:\n                if consts.LIST_MASKING_VIEWS_SPECIFIC_FOUR \\\n                        in views_group_str:\n                    self.get_host_group_views(\n                        view_id_dict, list_masking_views, views_group_str)\n                else:\n                    self.get_host_views(list_masking_views,\n                                        views_group_str, view_id_dict)\n        return list_masking_views\n\n    def get_host_views(self, list_masking_views,\n                       views_group_str, view_id_dict):\n        views_row_arr = views_group_str.strip().split('\\n')\n        block = True\n        key = []\n        port_id = None\n        for views_row_str in views_row_arr:\n            if not views_row_str or \\\n                    consts.CLI_STR in views_row_str:\n                continue\n            if consts.LIST_MASKING_VIEWS_SPECIFIC_FIVE in views_row_str:\n                port_id = views_row_str.split(\n                    consts.LIST_MASKING_VIEWS_SPECIFIC_SIX)[\n                    consts.LIST_MASKING_VIEWS_CONSTANT_ZERO]\n            self.get_group_key(views_row_str,\n                               consts.VIEWS_REGULAR_SPECIFIC_TWO, key)\n            if consts.LIST_MASKING_VIEWS_SPECIFIC_ONE in views_row_str:\n                block = False\n                continue\n            if block:\n                continue\n            if len(key) != consts.VIEWS_HOST_ROW_KEY_LENGTH:\n                continue\n            views_arr = views_row_str.strip().split()\n            volume_group_id = views_arr[consts.LIST_MASKING_VIEWS_CONSTANT_TWO]\n            host_name = views_arr[consts.HOST_NAME_NUM]\n            view_id = '{}{}{}{}'.format(\n                'host_group_id', volume_group_id, host_name, 'volume_id')\n            if view_id_dict.get(view_id):\n                continue\n            view_id_dict[view_id] = view_id\n            view = {\n                'native_masking_view_id': view_id,\n                'name': view_id,\n                'native_storage_host_id': host_name,\n                'native_volume_group_id': volume_group_id,\n                'native_port_id': port_id,\n                'storage_id': self.storage_id,\n            }\n            list_masking_views.append(view)\n\n    def get_host_group_views(self, view_id_dict, list_masking_views,\n                             views_group_str):\n        views_group_arr = views_group_str.strip().split(\n            consts.LIST_MASKING_VIEWS_SPECIFIC_FOUR)\n        views_group_row_arr = views_group_arr[\n            consts.VIEWS_GROUP_NUM_ZERO].strip().split('\\n')\n        block = True\n        group_key = []\n        for views_group_row in views_group_row_arr:\n            if not views_group_row or \\\n                    consts.CLI_STR in views_group_row:\n                continue\n            self.get_group_key(views_group_row,\n                               consts.VIEWS_REGULAR_SPECIFIC_ONE, group_key)\n            if consts.LIST_MASKING_VIEWS_SPECIFIC_ONE in views_group_row:\n                block = False\n                continue\n            if block:\n                continue\n            views_row_arr = views_group_row.strip().split()\n            if len(views_row_arr) != consts.VIEWS_GROUP_ROW_VALUE_LENGTH \\\n                    or len(group_key) != consts.VIEWS_GROUP_ROW_KEY_LENGTH:\n                continue\n            host_group_id = views_row_arr[consts.HOST_GROUP_ID_NUM]\n            volume_group_id = views_row_arr[consts.LUN_GROUP_ID_NUM]\n            view_id = '{}{}{}{}'.format(host_group_id, volume_group_id,\n                                        'host_id', 'volume_id')\n            if view_id_dict.get(view_id):\n                continue\n            view_id_dict[view_id] = view_id\n            view = {\n                'native_masking_view_id': view_id,\n                'name': view_id,\n                'native_storage_host_group_id': host_group_id,\n                'native_port_group_id': views_row_arr[\n                    consts.PORT_GROUP_ID_NUM],\n                'native_volume_group_id': volume_group_id,\n                'storage_id': self.storage_id,\n            }\n            list_masking_views.append(view)\n\n    @staticmethod\n    def get_group_key(views_group_row, regular_str, key):\n        title_pattern = re.compile(regular_str)\n        title_search_obj = title_pattern.search(views_group_row)\n        if title_search_obj:\n            views_row_arr = views_group_row.strip().split('  ')\n            for views in views_row_arr:\n                if views:\n                    key.append(views.strip())\n        return key\n"
  },
  {
    "path": "delfin/drivers/h3c/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/h3c/unistor_cf/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/h3c/unistor_cf/unistor_cf.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.drivers.hpe.hpe_3par.hpe_3parstor import Hpe3parStorDriver\n\n\nclass H3cUniStorCfDriver(Hpe3parStorDriver):\n    def get_storage(self, context):\n        storage_info = super().get_storage(context)\n        storage_info['vendor'] = 'H3C'\n        return storage_info\n"
  },
  {
    "path": "delfin/drivers/helper.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log\n\nfrom delfin import cryptor\nfrom delfin import db\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\ndef encrypt_password(context, access_info):\n    for access in constants.ACCESS_TYPE:\n        if access_info.get(access):\n            access_info[access]['password'] = cryptor.encode(\n                access_info[access]['password'])\n\n\ndef check_storage_repetition(context, storage):\n    if not storage:\n        raise exception.StorageBackendNotFound()\n\n    if not storage.get('serial_number'):\n        msg = _(\"Serial number should be provided by storage.\")\n        raise exception.InvalidResults(msg)\n\n    filters = dict(serial_number=storage['serial_number'])\n    storage_list = db.storage_get_all(context, filters=filters)\n    if storage_list:\n        msg = (_(\"Failed to register storage. Reason: same serial number: \"\n                 \"%s detected.\") % storage['serial_number'])\n        LOG.error(msg)\n        raise exception.StorageAlreadyExists()\n\n\ndef check_storage_consistency(context, storage_id, storage_new):\n    \"\"\"Check storage response returned by driver whether it matches the\n    storage stored in database.\n\n    :param context: The context of delfin.\n    :type context: delfin.context.RequestContext\n    :param storage_id: The uuid of storage in database.\n    :type storage_id: string\n    :param storage_new: The storage response returned by driver.\n    :type storage_new: dict\n    \"\"\"\n    if not storage_new:\n        raise exception.StorageBackendNotFound()\n\n    if not storage_new.get('serial_number'):\n        msg = _(\"Serial number should be provided by storage.\")\n        raise exception.InvalidResults(msg)\n\n    storage_present = db.storage_get(context, storage_id)\n    if storage_new['serial_number'] != storage_present['serial_number']:\n        msg = (_(\"Serial number %s does not match \"\n                 \"the existing storage serial number %s.\") %\n               (storage_new['serial_number'],\n                storage_present['serial_number']))\n        raise exception.StorageSerialNumberMismatch(msg)\n"
  },
  {
    "path": "delfin/drivers/hitachi/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/hitachi/hnas/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/hitachi/hnas/constants.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport re\n\nfrom delfin.common import constants\n\nDATA_HEAD_PATTERN = re.compile('[-]{3,}')\nALERT_HEAD_PATTERN = re.compile('[*]{3,}')\nSTORAGE_VENDOR = 'Hitachi'\nTIME_TYPE = '%Y-%m-%d %H:%M:%S'\n\nOID_TRAP_DATA = '1.3.6.1.4.1.11096.6.1.1'\n\nSTORAGE_INFO_COMMAND = \"cluster-show\"\nSTORAGE_MODEL_COMMAND = \"ver\"\nLOCATION_COMMAND = 'system-information-get'\n\nDISK_INFO_COMMAND = \"sd-list --scsi\"\n\nPOOL_INFO_COMMAND = \"span-list\"\nPOOL_SIZE_COMMAND = \"span-space-distribution\"\n\nCONTROLLER_INFO_COMMAND = \"cluster-show -y\"\n\nALERT_INFO_COMMAND = \"event-log-show -w -s -x\"\nALERT_TIME = \" --from '%s'\"\nALERT_FORMAT_TIME = \"%Y-%m-%d %H:%M:%S\"\n\nFC_PORT_COMMAND = \"fc-hports\"\nFC_SPEED_COMMAND = \"fc-link-speed\"\nETH_PORT_COMMAND = \"ifconfig\"\n\nFS_INFO_COMMAND = 'df -k'\nFS_STATUS_COMMAND = 'filesystem-list'\n\nCHECK_EVS = 'evs-select %s'\nQUOTA_INFO_COMMAND = \"quota list %s\"\n\nTREE_INFO_COMMAND = 'virtual-volume list --verbose %s'\n\nCIFS_SHARE_COMMAND = 'cifs-share list'\n\nNFS_SHARE_COMMAND = \"nfs-export list\"\n\nCLUSTER_STATUS = {\n    'Robust': constants.StorageStatus.NORMAL,\n    'Degraded': constants.StorageStatus.DEGRADED,\n    'Critical': constants.StorageStatus.ABNORMAL,\n    'OK': constants.StorageStatus.NORMAL,\n    'Failure(s)': constants.StorageStatus.ABNORMAL\n}\n\nSEVERITY_MAP = {\n    'Severe': constants.Severity.FATAL,\n    'Warning': constants.Severity.WARNING,\n    'Information': constants.Severity.INFORMATIONAL\n}\n\nFS_STATUS_MAP = {\n    'Fail!': constants.FilesystemStatus.FAULTY,\n    'OK': constants.FilesystemStatus.NORMAL,\n    'NoEVS': constants.FilesystemStatus.NORMAL,\n    'EVS-D': constants.FilesystemStatus.NORMAL,\n    'Hiddn': constants.FilesystemStatus.NORMAL,\n    'Clust': constants.FilesystemStatus.FAULTY,\n    'Unavl': constants.FilesystemStatus.NORMAL,\n    'Check': constants.FilesystemStatus.NORMAL,\n    'Fixng': constants.FilesystemStatus.NORMAL,\n    'Mount': constants.FilesystemStatus.NORMAL,\n    'MntRO': constants.FilesystemStatus.NORMAL,\n    'SysLk': constants.FilesystemStatus.NORMAL,\n    'SysRO': constants.FilesystemStatus.NORMAL,\n    'RepTg': constants.FilesystemStatus.NORMAL,\n    'Rcvry': constants.FilesystemStatus.NORMAL,\n    'UnMnt': constants.FilesystemStatus.FAULTY,\n    'Mntg': constants.FilesystemStatus.NORMAL,\n    'Formt': constants.FilesystemStatus.NORMAL,\n    'Failg': constants.FilesystemStatus.FAULTY,\n    None: constants.FilesystemStatus.NORMAL,\n}\n\nFS_INDEX = {\n    'status_len': 6,\n    'id_index': 1,\n    'pool_index': 2,\n    'status_index': 3,\n    'detail_len': 8,\n    'total_index': 3,\n    'used_index': 4,\n    'free_index': 7,\n    'type_index': 8,\n}\n\nETH_INDEX = {\n    'name_len': 1,\n    'name_index': 0,\n    'status_len': 2,\n    'status_index': 0,\n    'ip_len': 2,\n    'ip_index': 1,\n    'mask_index': 3\n}\n\nALERT_INDEX = {\n    'alert_len': 4,\n    'table_head': 0,\n    'severity_index': 1,\n    'year_index': 2,\n    'time_index': 3,\n    'id_index': 0\n}\n\nNODE_INDEX = {\n    'node_len': 2,\n    'status_index': 2,\n    'name_index': 1,\n    'id_index': 0\n}\n\nPOOL_INDEX = {\n    'pool_len': 6,\n    'total_index': 3,\n    'free_index': 0,\n    'status_index': 1,\n    'name_index': 0,\n}\n\nDISK_INDEX = {\n    'type_len': 2,\n    'model_index': 1,\n    'vendor_index': 0,\n    'version_index': 2\n}\n"
  },
  {
    "path": "delfin/drivers/hitachi/hnas/hds_nas.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.drivers import driver\nfrom delfin.drivers.hitachi.hnas import nas_handler\n\n\nclass HitachiHNasDriver(driver.StorageDriver):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.nas_handler = nas_handler.NasHandler(**kwargs)\n        self.nas_handler.login()\n\n    def reset_connection(self, context, **kwargs):\n        self.nas_handler.login()\n\n    def get_storage(self, context):\n        return self.nas_handler.get_storage()\n\n    def list_storage_pools(self, context):\n        return self.nas_handler.get_pool(self.storage_id)\n\n    def list_volumes(self, context):\n        return []\n\n    def list_controllers(self, context):\n        return self.nas_handler.list_controllers(self.storage_id)\n\n    def list_ports(self, context):\n        return self.nas_handler.list_ports(self.storage_id)\n\n    def list_disks(self, context):\n        return self.nas_handler.get_disk(self.storage_id)\n\n    def list_alerts(self, context, query_para=None):\n        return self.nas_handler.list_alerts(query_para)\n\n    def list_qtrees(self, context):\n        return self.nas_handler.list_qtrees(self.storage_id)\n\n    def list_quotas(self, context):\n        return self.nas_handler.list_quotas(self.storage_id)\n\n    def list_filesystems(self, context):\n        return self.nas_handler.list_filesystems(self.storage_id)\n\n    def list_shares(self, context):\n        return self.nas_handler.list_shares(self.storage_id)\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return nas_handler.NasHandler.parse_alert(alert)\n\n    def clear_alert(self, context, alert):\n        pass\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n"
  },
  {
    "path": "delfin/drivers/hitachi/hnas/nas_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WarrayANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport hashlib\nimport time\n\nimport eventlet\nimport six\n\nfrom oslo_log import log as logging\n\nfrom oslo_utils import units\nfrom delfin import exception, utils\nfrom delfin.common import constants\nfrom delfin.drivers.utils import ssh_client\nfrom delfin.drivers.hitachi.hnas import constants as constant\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = logging.getLogger(__name__)\n\n\nclass NasHandler(object):\n\n    def __init__(self, **kwargs):\n        self.ssh_pool = ssh_client.SSHPool(**kwargs)\n        self.evs_list = []\n\n    def ssh_do_exec(self, command_list):\n        res = None\n        with eventlet.Timeout(60, False):\n            res = self.ssh_pool.do_exec_shell(command_list)\n            while 'Failed to establish SSC connection' in res:\n                res = self.ssh_pool.do_exec_shell(command_list)\n        if res:\n            return res\n        else:\n            raise \\\n                exception.ConnectTimeout(\n                    'Failed to establish SSC connection from hitachi hnas'\n                )\n\n    def login(self):\n        try:\n            result = self.ssh_do_exec(['cluster-show -y'])\n            if 'EVS' not in result:\n                raise exception.InvalidIpOrPort()\n        except Exception as e:\n            LOG.error(\"Failed to login hnas %s\" %\n                      (six.text_type(e)))\n            raise e\n\n    @staticmethod\n    def format_data_to_map(\n            value_info,\n            value_key,\n            line='\\r\\n',\n            split=\":\",\n            split_key=None):\n        map_list = []\n        detail_array = value_info.split(line)\n        value_map = {}\n        for detail in detail_array:\n            if detail:\n                string_info = detail.split(split)\n                key = string_info[0].replace(' ', '')\n                value = ''\n                if len(string_info) > 1:\n                    for string in string_info[1:]:\n                        value += string.\\\n                            replace('\"\"', '').\\\n                            replace('\\'', '').\\\n                            replace(' ', '')\n                if value_map.get(key):\n                    value_map[key + '1'] = value\n                else:\n                    value_map[key] = value\n            else:\n                if value_key in value_map:\n                    map_list.append(value_map)\n                value_map = {}\n            if split_key and split_key in detail:\n                if value_key in value_map:\n                    map_list.append(value_map)\n                value_map = {}\n        if value_key in value_map:\n            map_list.append(value_map)\n        return map_list\n\n    @staticmethod\n    def get_table_data(values, is_alert=False):\n        header_index = 0\n        table = values.split('\\r\\n')\n        for i in range(len(table)):\n            if constant.DATA_HEAD_PATTERN.search(table[i]):\n                header_index = i\n            if is_alert and constant.ALERT_HEAD_PATTERN.search(table[i]):\n                header_index = i\n                return table[(header_index + 1):]\n        return table[(header_index + 1):]\n\n    def format_storage_info(self, storage_map_list,\n                            model_map_list, version_map_list,\n                            location_map_list, serial_map_list):\n        if not storage_map_list:\n            raise exception.StorageBackendException(\n                'Failed to get HNAS storage')\n        model_map = model_map_list[-1] if model_map_list else {}\n        model = model_map.get('Model')\n        model = model.replace('HNAS', 'HNAS ')\n        version_map = version_map_list[-1] if version_map_list else {}\n        location_map = location_map_list[-1] if location_map_list else {}\n        serial_map = serial_map_list[-1] if serial_map_list else {}\n        version = version_map.get(\"Software\").split('(')\n        serial_number = serial_map.get(\"Hardware\").split('(')[-1]\n        storage_map = storage_map_list[-1]\n        disk_list = self.get_disk(None)\n        total_capacity = \\\n            raw_capacity = \\\n            used_capacity = \\\n            free_capacity = 0\n        for disk in disk_list:\n            raw_capacity += disk['capacity']\n        status = \\\n            constant.CLUSTER_STATUS.get(storage_map['ClusterHealth'])\n        pool_list = self.get_pool(None)\n        for pool in pool_list:\n            total_capacity += pool['total_capacity']\n            used_capacity += pool['used_capacity']\n            free_capacity += pool['free_capacity']\n        storage_model = {\n            \"name\": storage_map['ClusterName'],\n            \"vendor\": constant.STORAGE_VENDOR,\n            \"model\": model,\n            \"status\": status,\n            \"serial_number\": serial_number.replace(')', ''),\n            \"firmware_version\": version[0],\n            \"location\": location_map['Location'],\n            \"total_capacity\": total_capacity,\n            \"raw_capacity\": raw_capacity,\n            \"used_capacity\": used_capacity,\n            \"free_capacity\": free_capacity\n        }\n        return storage_model\n\n    def get_storage(self):\n        try:\n            storage_info = self.ssh_do_exec([constant.STORAGE_INFO_COMMAND])\n            model_info = self.ssh_do_exec([constant.STORAGE_MODEL_COMMAND])\n            location_info = self.ssh_do_exec(([constant.LOCATION_COMMAND]))\n            model_map_list = \\\n                self.format_data_to_map(model_info, 'Model')\n            storage_map_list = \\\n                self.format_data_to_map(\n                    storage_info, 'ClusterName', split=\"=\")\n            version_map_list = \\\n                self.format_data_to_map(model_info, 'Software')\n            location_map_list = \\\n                self.format_data_to_map(location_info, 'Location')\n            serial_map_list =\\\n                self.format_data_to_map(model_info, 'Hardware')\n            storage_model = \\\n                self.format_storage_info(\n                    storage_map_list, model_map_list, version_map_list,\n                    location_map_list, serial_map_list)\n            return storage_model\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_disk(self, storage_id):\n        try:\n            disk_info = self.ssh_do_exec([constant.DISK_INFO_COMMAND])\n            disk_map_list = \\\n                self.format_data_to_map(disk_info, 'Capacity')\n            disks_list = []\n            for disk_map in disk_map_list:\n                if 'Status' in disk_map:\n                    size = disk_map['Capacity'].split('GiB')[0] + \"GB\"\n                    status = constants.DiskStatus.NORMAL \\\n                        if disk_map['Status'] == 'OK' \\\n                        else constants.DiskStatus.ABNORMAL\n                    disk_type = disk_map['Type']\n                    type_array = disk_type.split(';')\n                    model = vendor = version = None\n                    if len(type_array) > constant.DISK_INDEX['type_len']:\n                        model = \\\n                            type_array[constant.DISK_INDEX[\n                                'model_index']].replace('Model', '')\n                        vendor = \\\n                            type_array[constant.DISK_INDEX[\n                                'vendor_index']].replace('Make', '')\n                        version = \\\n                            type_array[constant.DISK_INDEX[\n                                'version_index']].replace('Revision', '')\n                    pool_id = disk_map.get('Usedinspan')\n                    serial_number = disk_map['Luid'].split(']')[-1]\n                    if pool_id:\n                        pool_id = pool_id.split('(')[0]\n                    disk_model = {\n                        'name': disk_map['HDSdevname'],\n                        'storage_id': storage_id,\n                        'native_disk_id': disk_map['DeviceID'],\n                        'serial_number': serial_number,\n                        'manufacturer': vendor,\n                        'model': model,\n                        'firmware': version,\n                        'capacity': int(Tools.get_capacity_size(size)),\n                        'status': status,\n                        'native_disk_group_id': pool_id\n                    }\n                    disks_list.append(disk_model)\n            return disks_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get disk from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get disk from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_pool_size(self):\n        size_info = self.ssh_do_exec([constant.POOL_SIZE_COMMAND])\n        size_array = size_info.split('\\r\\n')\n        size_map = {}\n        pool_name = None\n        for size in size_array:\n            if 'Span ' in size:\n                pool_name = size.split()[-1].replace(':', '')\n                size_map[pool_name] = 0\n            if '[Free space]' in size:\n                free_array = size.split()\n                if len(free_array) > 2:\n                    free_size = free_array[0].replace('GiB', 'GB')\n                    size_map[pool_name] += Tools.get_capacity_size(free_size)\n        return size_map\n\n    def get_pool(self, storage_id):\n        try:\n            pool_info = self.ssh_do_exec([constant.POOL_INFO_COMMAND])\n            pool_list = []\n            pool_array = self.get_table_data(pool_info)\n            size_map = self.get_pool_size()\n            for pool in pool_array:\n                value_array = pool.split()\n                if len(value_array) == constant.POOL_INDEX['pool_len']:\n                    total_capacity = \\\n                        Tools.get_capacity_size(\n                            value_array[constant.POOL_INDEX['total_index']] +\n                            'GB')\n                    free_capacity = \\\n                        size_map.get(\n                            value_array[constant.POOL_INDEX['free_index']],\n                            total_capacity)\n                    status = constants.StoragePoolStatus.NORMAL \\\n                        if value_array[\n                            constant.POOL_INDEX['status_index']] == 'Yes' \\\n                        else constants.StoragePoolStatus.ABNORMAL\n                    pool_model = {\n                        'name': value_array[constant.POOL_INDEX['name_index']],\n                        'storage_id': storage_id,\n                        'native_storage_pool_id': value_array[\n                            constant.POOL_INDEX['name_index']],\n                        'status': status,\n                        'storage_type': constants.StorageType.FILE,\n                        'total_capacity': total_capacity,\n                        'used_capacity': total_capacity - free_capacity,\n                        'free_capacity': free_capacity,\n                    }\n                    pool_list.append(pool_model)\n            return pool_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get pool from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get pool from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_controllers(self, storage_id):\n        try:\n            controller_list = []\n            node_info = self.ssh_do_exec([constant.CONTROLLER_INFO_COMMAND])\n            nodes_array = self.get_table_data(node_info)\n            for nodes in nodes_array:\n                node = nodes.split()\n                if len(node) > constant.NODE_INDEX['node_len']:\n                    status = constants.ControllerStatus.NORMAL \\\n                        if node[\n                            constant.NODE_INDEX[\n                                'status_index']] == 'ONLINE' \\\n                        else constants.ControllerStatus.OFFLINE\n                    controller_model = {\n                        'name': node[constant.NODE_INDEX['name_index']],\n                        'storage_id': storage_id,\n                        'native_controller_id': node[\n                            constant.NODE_INDEX['id_index']],\n                        'status': status\n                    }\n                    controller_list.append(controller_model)\n            return controller_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get controllers from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get controllers from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    @staticmethod\n    def format_alert_list(alert_array, query_para):\n        alert_list = []\n        alert_model = {}\n        for alert in alert_array:\n            if alert and 'CAUSE' not in alert:\n                alert_data = alert.split()\n                if len(alert_data) > constant.ALERT_INDEX['alert_len'] \\\n                        and alert_data[\n                    constant.ALERT_INDEX['severity_index']] \\\n                        in constant.SEVERITY_MAP:\n                    occur_time = \\\n                        alert_data[constant.ALERT_INDEX['year_index']] + \\\n                        ' ' + alert_data[constant.ALERT_INDEX[\n                            'time_index']].split(\"+\")[0]\n                    occur_time = \\\n                        int(time.mktime(time.strptime(\n                            occur_time, constant.TIME_TYPE))) * 1000\n                    if not query_para or \\\n                            (int(query_para['begin_time'])\n                             <= occur_time\n                             <= int(query_para['end_time'])):\n                        description = ''\n                        for i in range(4, len(alert_data)):\n                            description += alert_data[i] + ' '\n                        severity = \\\n                            constant.SEVERITY_MAP.get(\n                                alert_data[constant.ALERT_INDEX[\n                                    'severity_index']])\n                        alert_model['alert_id'] = \\\n                            alert_data[constant.ALERT_INDEX['id_index']]\n                        alert_model['alert_name'] = \\\n                            alert_data[constant.ALERT_INDEX['id_index']]\n                        alert_model['severity'] = severity\n                        alert_model['category'] = constants.Category.FAULT\n                        alert_model['type'] = \\\n                            constants.EventType.EQUIPMENT_ALARM\n                        alert_model['occur_time'] = occur_time\n                        alert_model['description'] = description.lstrip()\n                        alert_model['match_key'] = \\\n                            hashlib.md5(\n                                (alert_data[constant.ALERT_INDEX['id_index']]\n                                    + severity\n                                    + description).encode()).hexdigest()\n                        alert_model['resource_type'] = \\\n                            constants.DEFAULT_RESOURCE_TYPE\n            if alert and alert_model and 'CAUSE' in alert:\n                alert_data = alert.split(':')\n                alert_model['location'] = alert_data[-1]\n            if not alert:\n                alert_list.append(alert_model)\n                alert_model = {}\n        return alert_list\n\n    def list_alerts(self, query_para):\n        try:\n            command = constant.ALERT_INFO_COMMAND\n            if query_para and 'begin_time' in query_para:\n                timeArray = time.gmtime(int(query_para['begin_time']) / 1000)\n                begin_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n                command += constant.ALERT_TIME % begin_time\n            alert_info = self.ssh_do_exec([command])\n            alert_array = self.get_table_data(alert_info, True)\n            alert_list = self.format_alert_list(alert_array, query_para)\n            alert_list = \\\n                sorted(alert_list,\n                       key=lambda x: x['occur_time'], reverse=True)\n            return alert_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get alerts from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get alerts from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    @staticmethod\n    def parse_alert(alert):\n        try:\n            alert_info = alert.get(constant.OID_TRAP_DATA)\n            alert_array = alert_info.split(':')\n            if len(alert_array) > 1:\n                description = alert_array[1]\n                alert = alert_array[0].split()\n                if len(alert) > 1:\n                    alert_id = alert[0]\n                    severity = constant.SEVERITY_MAP.get(alert[1])\n                    if severity == constant.SEVERITY_MAP.get('Information'):\n                        return\n                    alert_model = {\n                        'alert_id': alert_id,\n                        'alert_name': alert_id,\n                        'severity': severity,\n                        'category': constants.Category.FAULT,\n                        'type': constants.EventType.EQUIPMENT_ALARM,\n                        'occur_time': utils.utcnow_ms(),\n                        'description': description,\n                        'match_key': hashlib.md5(\n                            (alert_id + severity +\n                             description).encode()).hexdigest(),\n                        'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n                        'location': ''\n                    }\n                    return alert_model\n        except exception.DelfinException as e:\n            err_msg = \"Failed to parse alert from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to parse alert from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_ports(self, storage_id):\n        try:\n            ports_list = self.get_fc_port(storage_id)\n            return ports_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get ports from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get ports from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_fc_port(self, storage_id):\n        try:\n            fc_info = self.ssh_do_exec([constant.FC_PORT_COMMAND])\n            fc_map_list = \\\n                self.format_data_to_map(fc_info, 'Portname')\n            fc_list = []\n            speed_info = self.ssh_do_exec([constant.FC_SPEED_COMMAND])\n            speed_map_list = \\\n                self.format_data_to_map(speed_info, 'FC1')\n            speed_map = speed_map_list[-1]\n            for value_map in fc_map_list:\n                if 'Portname' in value_map:\n                    status = value_map.get('Status')\n                    health = constants.PortHealthStatus.ABNORMAL\n                    if status == 'Good':\n                        health = constants.PortHealthStatus.NORMAL\n                    connection_status = \\\n                        constants.PortConnectionStatus.DISCONNECTED\n                    if 'FCLinkisup' in value_map:\n                        connection_status = \\\n                            constants.PortConnectionStatus.CONNECTED\n                    port_id = ''\n                    for key in value_map.keys():\n                        if 'HostPort' in key:\n                            port_id = key.replace('HostPort', '')\n                            break\n                    speed = \\\n                        int(speed_map.get('FC' + port_id).replace('Gbps', ''))\n                    fc_model = {\n                        'name': 'FC' + port_id,\n                        'storage_id': storage_id,\n                        'native_port_id': port_id,\n                        'connection_status': connection_status,\n                        'health_status': health,\n                        'type': constants.PortType.FC,\n                        'speed': speed * units.G,\n                        'max_speed': 8 * units.G,\n                        'wwn': value_map.get('Portname'),\n                    }\n                    fc_list.append(fc_model)\n            return fc_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get fc ports from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get fc ports from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_filesystems(self, storage_id):\n        try:\n            fs_list = []\n            fs_info = self.ssh_do_exec([constant.FS_INFO_COMMAND])\n            fs_array = self.get_table_data(fs_info)\n            status_info = self.ssh_do_exec([constant.FS_STATUS_COMMAND])\n            status_array = self.get_table_data(status_info)\n            status_map = {}\n            for status in status_array:\n                status_info = status.split()\n                if len(status_info) > constant.FS_INDEX['status_len']:\n                    status_map[status_info[constant.FS_INDEX['id_index']]] = \\\n                        [status_info[constant.FS_INDEX['pool_index']],\n                         status_info[constant.FS_INDEX['status_index']]]\n            for fs in fs_array:\n                fs_info = list(filter(None, fs.split('  ')))\n                if len(fs_info) > constant.FS_INDEX['detail_len']:\n                    total_capacity = \\\n                        fs_info[constant.FS_INDEX['total_index']].replace(\n                            ' ', '')\n                    used_capacity = \\\n                        fs_info[constant.FS_INDEX['used_index']].replace(\n                            ' ', '').split('(')[0]\n                    free_capacity = \\\n                        fs_info[constant.FS_INDEX['free_index']].replace(\n                            ' ', '').split('(')[0]\n                    total_capacity = Tools.get_capacity_size(total_capacity)\n                    used_capacity = Tools.get_capacity_size(used_capacity)\n                    free_capacity = Tools.get_capacity_size(free_capacity)\n                    volume_type = constants.VolumeType.THICK \\\n                        if fs_info[constant.FS_INDEX['type_index']] == 'No' \\\n                        else constants.VolumeType.THIN\n                    pool_id = status_map.get(fs_info[0])[0] \\\n                        if status_map.get(fs_info[0]) else None\n                    status = status_map.get(fs_info[0])[1] \\\n                        if status_map.get(fs_info[0]) else None\n                    fs_model = {\n                        'name': fs_info[1],\n                        'storage_id': storage_id,\n                        'native_filesystem_id': fs_info[1],\n                        'native_pool_id': pool_id,\n                        'status': constant.FS_STATUS_MAP[status],\n                        'type': volume_type,\n                        'total_capacity': total_capacity,\n                        'used_capacity': used_capacity,\n                        'free_capacity': free_capacity\n                    }\n                    fs_list.append(fs_model)\n            return fs_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get filesystem from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get filesystem from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_fs_evs(self):\n        fs_info = self.ssh_do_exec([constant.FS_STATUS_COMMAND])\n        fs_array = self.get_table_data(fs_info)\n        evs_list = []\n        for fs in fs_array:\n            fs_info_array = fs.split()\n            if len(fs_info_array) > 6:\n                evs_list.append([fs_info_array[0], fs_info_array[4]])\n        return evs_list\n\n    def list_quotas(self, storage_id):\n        try:\n            evs_list = self.get_fs_evs()\n            quota_list = []\n            for evs in evs_list:\n                quota_info = self.ssh_do_exec([\n                    constant.CHECK_EVS % evs[1],\n                    constant.QUOTA_INFO_COMMAND % evs[0]])\n                quota_map_list = \\\n                    self.format_data_to_map(quota_info, 'Usage')\n                for quota_map in quota_map_list:\n                    quota_type = None\n                    user_group_name = None\n                    qtree_id = None\n                    if 'Target' in quota_map:\n                        if 'Group' in quota_map.get('Target'):\n                            quota_type = constants.QuotaType.GROUP\n                            user_group_name = \\\n                                quota_map.get('Target').replace('Group', '')\n                        elif 'User' in quota_map.get('Target'):\n                            quota_type = constants.QuotaType.USER\n                            user_group_name = \\\n                                quota_map.get('Target').replace('User', '')\n                        elif 'ViVol' in quota_map.get('Target'):\n                            quota_type = constants.QuotaType.TREE\n                            user_group_name = \\\n                                quota_map.get('Target').replace('ViVol', '')\n                            qtree_id = evs[0] + '-' + user_group_name\n                    quota_id = \\\n                        evs[0] + '-' + quota_type + '-' + user_group_name\n                    capacity_hard_limit, capacity_soft_limit = None, None\n                    file_soft_limit, file_hard_limit = None, None\n                    if 'Soft' in quota_map.get('Limit'):\n                        capacity_soft_limit = \\\n                            quota_map.get('Limit').replace('(Soft)', '')\n                    elif 'Hard' in quota_map.get('Limit'):\n                        capacity_hard_limit = capacity_soft_limit = \\\n                            quota_map.get('Limit').replace('(Hard)', '')\n                    if 'Soft' in quota_map.get('Limit1'):\n                        file_soft_limit = \\\n                            quota_map.get('Limit1').replace('(Soft)', '')\n                    elif 'Hard' in quota_map.get('Limit1'):\n                        file_soft_limit = file_hard_limit = \\\n                            quota_map.get('Limit1').replace('(Hard)', '')\n                    quota = {\n                        'native_quota_id': quota_id,\n                        'type': quota_type,\n                        'storage_id': storage_id,\n                        'native_filesystem_id': evs[0],\n                        'native_qtree_id': qtree_id,\n                        \"capacity_hard_limit\": capacity_hard_limit,\n                        'capacity_soft_limit':\n                            Tools.get_capacity_size(capacity_soft_limit),\n                        \"file_hard_limit\": file_hard_limit,\n                        'file_soft_limit': file_soft_limit,\n                        'file_count': quota_map.get('FileCount'),\n                        'used_capacity':\n                            Tools.get_capacity_size(quota_map.get('Usage')),\n                        'user_group_name': user_group_name\n                    }\n                    quota_list.append(quota)\n            return quota_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage quota from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage quota from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_qtrees(self, storage_id):\n        try:\n            evs_list = self.get_fs_evs()\n            return self.get_qtree(evs_list, storage_id)\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage qtree from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage qtree from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_qtree(self, evs_list, storage_id):\n        qtree_list = []\n        for evs in evs_list:\n            tree_info = self.ssh_do_exec([\n                constant.CHECK_EVS % evs[1],\n                constant.TREE_INFO_COMMAND % evs[0]])\n            tree_map_list = \\\n                self.format_data_to_map(\n                    tree_info, 'root', split_key='last modified')\n            for qt_map in tree_map_list:\n                qt_name = ''\n                for key in qt_map:\n                    if qt_map[key] == '' and key != 'email':\n                        qt_name = key\n                qt_id = evs[0] + '-' + qt_name\n                qt_model = {\n                    'name': qt_name,\n                    'storage_id': storage_id,\n                    'native_qtree_id': qt_id,\n                    'path': qt_map.get('root'),\n                    'native_filesystem_id': evs[0],\n                }\n                qtree_list.append(qt_model)\n        return qtree_list\n\n    def get_cifs_share(self, evs_list, storage_id):\n        share_list = []\n        evs_array = []\n        for evs in evs_list:\n            if evs[1] not in evs_array:\n                evs_array.append(evs[1])\n        for evs in evs_array:\n            cifs_share = self.ssh_do_exec([\n                constant.CHECK_EVS % evs,\n                constant.CIFS_SHARE_COMMAND])\n            cifs_map_list = \\\n                self.format_data_to_map(cifs_share, 'Sharename')\n            for cifs in cifs_map_list:\n                qtree_id = None\n                if 'VirtualVolume' in cifs.get('Sharecomment'):\n                    qtree = cifs.get('Sharecomment').split('Volume')\n                    if cifs.get('Filesystemlabel'):\n                        qtree_id = \\\n                            cifs.get('Filesystemlabel') + '-' + qtree[1]\n                if cifs.get('Filesystemlabel'):\n                    native_share_id = \\\n                        '%s-%s-%s' % (cifs.get('Filesystemlabel'),\n                                      cifs.get('Sharename'),\n                                      constants.ShareProtocol.CIFS),\n                else:\n                    native_share_id = \\\n                        cifs.get('Sharename') + '-' + \\\n                        constants.ShareProtocol.CIFS,\n                share = {\n                    'name': cifs.get('Sharename'),\n                    'storage_id': storage_id,\n                    'native_share_id': native_share_id,\n                    'native_qtree_id': qtree_id,\n                    'native_filesystem_id': cifs.get('Filesystemlabel'),\n                    'path': cifs.get('Sharepath'),\n                    'protocol': constants.ShareProtocol.CIFS\n                }\n                share_list.append(share)\n        return share_list\n\n    def get_nfs_share(self, evs_list, storage_id):\n        share_list = []\n        evs_array = []\n        for evs in evs_list:\n            if evs[1] not in evs_array:\n                evs_array.append(evs[1])\n        for evs in evs_array:\n            nfs_share = self.ssh_do_exec([\n                constant.CHECK_EVS % evs,\n                constant.NFS_SHARE_COMMAND])\n            nfs_map_list = \\\n                self.format_data_to_map(nfs_share, 'Exportname')\n            qtree_list = self.get_qtree(evs_list, None)\n            for nfs in nfs_map_list:\n                qtree_id = None\n                for qtree in qtree_list:\n                    if nfs.get('Exportpath') == qtree['path'] \\\n                            and qtree['native_filesystem_id'] \\\n                            == nfs.get('Filesystemlabel'):\n                        qtree_id = qtree['native_qtree_id']\n                if nfs.get('Filesystemlabel'):\n                    native_share_id = \\\n                        nfs.get('Filesystemlabel') \\\n                        + '-' + nfs.get('Exportname') \\\n                        + '-' + constants.ShareProtocol.NFS,\n                else:\n                    native_share_id = \\\n                        nfs.get('Exportname') + '-' +\\\n                        constants.ShareProtocol.NFS,\n                share = {\n                    'name': nfs.get('Exportname'),\n                    'storage_id': storage_id,\n                    'native_share_id': native_share_id,\n                    'native_qtree_id': qtree_id,\n                    'native_filesystem_id': nfs.get('Filesystemlabel'),\n                    'path': nfs.get('Exportpath'),\n                    'protocol': constants.ShareProtocol.NFS\n                }\n                share_list.append(share)\n        return share_list\n\n    def list_shares(self, storage_id):\n        try:\n            evs_list = self.get_fs_evs()\n            share_list = []\n            share_list.extend(self.get_cifs_share(evs_list, storage_id))\n            share_list.extend(self.get_nfs_share(evs_list, storage_id))\n            return share_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage share from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage share from \" \\\n                      \"hitachi nas: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n"
  },
  {
    "path": "delfin/drivers/hitachi/vsp/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/hitachi/vsp/consts.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nSOCKET_TIMEOUT = 180\nERROR_SESSION_INVALID_CODE = 403\nERROR_SESSION_IS_BEING_USED_CODE = 409\nBLOCK_SIZE = 512\nLDEV_NUMBER_OF_PER_REQUEST = 300\nSUPPORTED_VSP_SERIES = ('VSP G350', 'VSP G370', 'VSP G700', 'VSP G900',\n                        'VSP F350', 'VSP F370', 'VSP F700', 'VSP F900')\n# the max number when get volumes in a request\nMAX_VOLUME_NUMBER = 16384\n"
  },
  {
    "path": "delfin/drivers/hitachi/vsp/rest_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport threading\nimport time\n\nimport requests\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import cryptor\nfrom delfin import exception\nfrom delfin.drivers.hitachi.vsp import consts\nfrom delfin.drivers.utils.rest_client import RestClient\n\nLOG = logging.getLogger(__name__)\n\n\nclass RestHandler(RestClient):\n    COMM_URL = '/ConfigurationManager/v1/objects/storages'\n    LOGOUT_URL = '/ConfigurationManager/v1/objects/sessions/'\n\n    AUTH_KEY = 'Authorization'\n\n    def __init__(self, **kwargs):\n        super(RestHandler, self).__init__(**kwargs)\n        self.session_lock = threading.Lock()\n        self.session_id = None\n        self.storage_device_id = None\n        self.device_model = None\n        self.serial_number = None\n\n    def call(self, url, data=None, method=None,\n             calltimeout=consts.SOCKET_TIMEOUT):\n        try:\n            res = self.call_with_token(url, data, method, calltimeout)\n            if (res.status_code == consts.ERROR_SESSION_INVALID_CODE\n                    or res.status_code ==\n                    consts.ERROR_SESSION_IS_BEING_USED_CODE):\n                LOG.error(\"Failed to get token=={0}=={1},get token again\"\n                          .format(res.status_code, res.text))\n                # if method is logout,return immediately\n                if method == 'DELETE' and RestHandler. \\\n                        LOGOUT_URL in url:\n                    return res\n                if self.get_token():\n                    res = self.call_with_token(url, data, method, calltimeout)\n                else:\n                    LOG.error('Login error,get access_session failed')\n            elif res.status_code == 503:\n                raise exception.InvalidResults(res.text)\n\n            return res\n\n        except Exception as e:\n            err_msg = \"Get RestHandler.call failed: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    def call_with_token(self, url, data, method, calltimeout):\n        auth_key = None\n        if self.session:\n            auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None)\n            if auth_key:\n                self.session.headers[RestHandler.AUTH_KEY] \\\n                    = cryptor.decode(auth_key)\n        res = self. \\\n            do_call(url, data, method, calltimeout)\n        if auth_key:\n            self.session.headers[RestHandler.AUTH_KEY] = auth_key\n        return res\n\n    def get_rest_info(self, url, timeout=consts.SOCKET_TIMEOUT, data=None):\n        result_json = None\n        if self.session and url != RestHandler.COMM_URL:\n            auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None)\n            if auth_key is None:\n                self.get_token()\n        res = self.call(url, data, 'GET', timeout)\n        if res.status_code == 200:\n            result_json = res.json()\n        return result_json\n\n    def get_token(self):\n        try:\n            succeed = False\n            if self.san_address:\n                url = '%s/%s/sessions' % \\\n                      (RestHandler.COMM_URL,\n                       self.storage_device_id)\n                data = {}\n\n                with self.session_lock:\n                    if self.session is None:\n                        self.init_http_head()\n                    self.session.auth = \\\n                        requests.auth.HTTPBasicAuth(\n                            self.rest_username,\n                            cryptor.decode(self.rest_password))\n                    res = self.call_with_token(url, data, 'POST', 30)\n                    if res.status_code == 200:\n                        succeed = True\n                        result = res.json()\n                        self.session_id = cryptor.encode(\n                            result.get('sessionId'))\n                        access_session = 'Session %s' % result.get('token')\n                        self.session.headers[\n                            RestHandler.AUTH_KEY] = cryptor.encode(\n                            access_session)\n                    else:\n                        LOG.error(\"Login error. URL: %(url)s\\n\"\n                                  \"Reason: %(reason)s.\",\n                                  {\"url\": url, \"reason\": res.text})\n                        if 'authentication failed' in res.text:\n                            raise exception.InvalidUsernameOrPassword()\n                        elif 'KART30005-E' in res.text:\n                            raise exception.StorageBackendException(\n                                six.text_type(res.text))\n                        else:\n                            raise exception.BadResponse(res.text)\n            else:\n                LOG.error('Token Parameter error')\n\n            return succeed\n        except Exception as e:\n            LOG.error(\"Get token error: %s\", six.text_type(e))\n            raise e\n\n    def login(self):\n        try:\n            self.get_device_id()\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise e\n\n    def logout(self):\n        try:\n            url = RestHandler.LOGOUT_URL\n            if self.session_id is not None:\n                url = '%s/%s/sessions/%s' % \\\n                      (RestHandler.COMM_URL,\n                       self.storage_device_id,\n                       cryptor.decode(self.session_id))\n                if self.san_address:\n                    self.call(url, method='DELETE')\n                    url = None\n                    self.session_id = None\n                    self.storage_device_id = None\n                    self.device_model = None\n                    self.serial_number = None\n                    self.session = None\n            else:\n                LOG.error('logout error:session id not found')\n        except Exception as err:\n            LOG.error('logout error:{}'.format(err))\n            raise exception.StorageBackendException(\n                reason='Failed to Logout from restful')\n\n    def get_device_id(self):\n        try:\n            if self.session is None:\n                self.init_http_head()\n            storage_systems = self.get_system_info()\n            system_info = storage_systems.get('data')\n            for system in system_info:\n                self.storage_device_id = system.get('storageDeviceId')\n                self.device_model = system.get('model')\n                self.serial_number = system.get('serialNumber')\n                if system.get('svpIp'):\n                    if system.get('svpIp') == self.rest_host:\n                        self.storage_device_id = system.get('storageDeviceId')\n                        self.device_model = system.get('model')\n                        self.serial_number = system.get('serialNumber')\n                        break\n                elif system.get('ctl1Ip') == self.rest_host or \\\n                        system.get('ctl2Ip') == self.rest_host:\n                    self.storage_device_id = system.get('storageDeviceId')\n                    self.device_model = system.get('model')\n                    self.serial_number = system.get('serialNumber')\n                    break\n            if self.storage_device_id is None:\n                error_msg = f'Get device id fail,' \\\n                    f'system info:{storage_systems}'\n                LOG.error(error_msg)\n                raise exception.StorageBackendException(error_msg)\n        except Exception as e:\n            LOG.error(\"Get device id error: %s\", six.text_type(e))\n            raise e\n\n    def get_firmware_version(self):\n        url = '%s/%s' % \\\n              (RestHandler.COMM_URL, self.storage_device_id)\n        result_json = self.get_rest_info(url)\n        if result_json is None:\n            return None\n        firmware_version = result_json.get('dkcMicroVersion')\n\n        return firmware_version\n\n    def get_capacity(self):\n        url = '%s/%s/total-capacities/instance' % \\\n              (RestHandler.COMM_URL, self.storage_device_id)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_pools(self):\n        url = '%s/%s/pools' % \\\n              (RestHandler.COMM_URL, self.storage_device_id)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_volumes(self, head_id,\n                    max_number=consts.LDEV_NUMBER_OF_PER_REQUEST):\n        url = '%s/%s/ldevs?headLdevId=%s&count=%s&ldevOption=defined' % \\\n              (RestHandler.COMM_URL, self.storage_device_id, head_id,\n               max_number)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_system_info(self):\n        result_json = self.get_rest_info(RestHandler.COMM_URL, timeout=10)\n\n        return result_json\n\n    def get_controllers(self):\n        url = '%s/%s/components/instance' % \\\n              (RestHandler.COMM_URL, self.storage_device_id)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_disks(self):\n        url = '%s/%s/drives' % \\\n              (RestHandler.COMM_URL, self.storage_device_id)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_ports(self):\n        url = '%s/%s/ports' % \\\n              (RestHandler.COMM_URL, self.storage_device_id)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_detail_ports(self, port_id):\n        url = '%s/%s/ports/%s' % \\\n              (RestHandler.COMM_URL, self.storage_device_id, port_id)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_alerts(self, param, start, end):\n        url = '%s/%s/alerts?%s&start=%s&count=%s' % (RestHandler.COMM_URL,\n                                                     self.storage_device_id,\n                                                     param, start, end)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_all_host_groups(self):\n        url = '%s/%s/host-groups' % \\\n              (RestHandler.COMM_URL, self.storage_device_id)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_specific_host_group(self, port_id):\n        url = '%s/%s/host-groups?portId=%s' % \\\n              (RestHandler.COMM_URL, self.storage_device_id, port_id)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_host_wwn(self, port_id, group_number):\n        url = '%s/%s/host-wwns?portId=%s&hostGroupNumber=%s' % \\\n              (RestHandler.COMM_URL, self.storage_device_id, port_id,\n               group_number)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_iscsi_name(self, port_id, group_number):\n        url = '%s/%s/host-iscsis?portId=%s&hostGroupNumber=%s' % \\\n              (RestHandler.COMM_URL, self.storage_device_id, port_id,\n               group_number)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_lun_path(self, port_id, group_number):\n        url = '%s/%s/luns?portId=%s&hostGroupNumber=%s&' \\\n              'isBasicLunInformation=true' % \\\n              (RestHandler.COMM_URL, self.storage_device_id, port_id,\n               group_number)\n        result_json = self.get_rest_info(url)\n        return result_json\n\n    def get_volumes_with_defined(self):\n        url = '%s/%s/ldevs?ldevOption=defined&count=%s' % \\\n              (RestHandler.COMM_URL, self.storage_device_id,\n               consts.MAX_VOLUME_NUMBER)\n        LOG.info('get volume start time:%s' % time.time())\n        result_json = self.get_rest_info(url, timeout=None)\n        LOG.info('get volume end time:%s' % time.time())\n        return result_json\n"
  },
  {
    "path": "delfin/drivers/hitachi/vsp/vsp_stor.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hashlib\nimport time\n\nimport six\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin.common import alert_util\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.hitachi.vsp import consts\nfrom delfin.drivers.hitachi.vsp import rest_handler\n\nLOG = log.getLogger(__name__)\n\n\nclass HitachiVspDriver(driver.StorageDriver):\n    POOL_STATUS_MAP = {\"POLN\": constants.StoragePoolStatus.NORMAL,\n                       \"POLF\": constants.StoragePoolStatus.NORMAL,\n                       \"POLS\": constants.StoragePoolStatus.ABNORMAL,\n                       \"POLE\": constants.StoragePoolStatus.OFFLINE\n                       }\n    ALERT_LEVEL_MAP = {\"Acute\": constants.Severity.CRITICAL,\n                       \"Serious\": constants.Severity.MAJOR,\n                       \"Moderate\": constants.Severity.WARNING,\n                       \"Service\": constants.Severity.INFORMATIONAL\n                       }\n    TRAP_ALERT_LEVEL_MAP = {\n        \"1.3.6.1.4.1.116.3.11.4.1.1.0.1\": constants.Severity.CRITICAL,\n        \"1.3.6.1.4.1.116.3.11.4.1.1.0.2\": constants.Severity.MAJOR,\n        \"1.3.6.1.4.1.116.3.11.4.1.1.0.3\": constants.Severity.WARNING,\n        \"1.3.6.1.4.1.116.3.11.4.1.1.0.4\": constants.Severity.INFORMATIONAL\n    }\n    DISK_LOGIC_TYPE_MAP = {\"DATA\": constants.DiskLogicalType.MEMBER,\n                           \"SPARE\": constants.DiskLogicalType.SPARE,\n                           \"FREE\": constants.DiskLogicalType.FREE\n                           }\n    DISK_PHYSICAL_TYPE_MAP = {\"SAS\": constants.DiskPhysicalType.SAS,\n                              \"SATA\": constants.DiskPhysicalType.SATA,\n                              \"SSD\": constants.DiskPhysicalType.SSD,\n                              \"FC\": constants.DiskPhysicalType.FC\n                              }\n    PORT_TYPE_MAP = {\"FIBRE\": constants.PortType.FC,\n                     \"SCSI\": constants.PortType.OTHER,\n                     \"ISCSI\": constants.PortType.ETH,\n                     \"ENAS\": constants.PortType.OTHER,\n                     \"ESCON\": constants.PortType.OTHER,\n                     \"FICON\": constants.PortType.FICON,\n                     \"FCoE\": constants.PortType.FCOE,\n                     \"HNASS\": constants.PortType.OTHER,\n                     \"HNASU\": constants.PortType.OTHER\n                     }\n    OS_TYPE_MAP = {\"HP-UX\": constants.HostOSTypes.HP_UX,\n                   \"SOLARIS\": constants.HostOSTypes.SOLARIS,\n                   \"AIX\": constants.HostOSTypes.AIX,\n                   \"WIN\": constants.HostOSTypes.WINDOWS,\n                   \"LINUX/IRIX\": constants.HostOSTypes.LINUX,\n                   \"TRU64\": constants.HostOSTypes.UNKNOWN,\n                   \"OVMS\": constants.HostOSTypes.OPEN_VMS,\n                   \"NETWARE\": constants.HostOSTypes.UNKNOWN,\n                   \"VMWARE\": constants.HostOSTypes.VMWARE_ESX,\n                   \"VMWARE_EX\": constants.HostOSTypes.VMWARE_ESX,\n                   \"WIN_EX\": constants.HostOSTypes.WINDOWS\n                   }\n    DISK_STATUS_TYPE = {\"NML\": constants.DiskStatus.NORMAL,\n                        \"CPY\": constants.DiskStatus.NORMAL,\n                        \"CPI\": constants.DiskStatus.NORMAL,\n                        \"RSV\": constants.DiskStatus.NORMAL,\n                        \"FAI\": constants.DiskStatus.ABNORMAL,\n                        \"BLK\": constants.DiskStatus.ABNORMAL,\n                        \"WAR\": constants.DiskStatus.ABNORMAL,\n                        \"UNK\": constants.DiskStatus.NORMAL,\n                        \"Unknown\": constants.DiskStatus.NORMAL\n                        }\n    TIME_PATTERN = '%Y-%m-%dT%H:%M:%S'\n    AUTO_PORT_SPEED = 8 * units.Gi\n\n    REFCODE_OID = '1.3.6.1.4.1.116.5.11.4.2.3'\n    DESC_OID = '1.3.6.1.4.1.116.5.11.4.2.7'\n    TRAP_TIME_OID = '1.3.6.1.4.1.116.5.11.4.2.6'\n    TRAP_DATE_OID = '1.3.6.1.4.1.116.5.11.4.2.5'\n    TRAP_NICKNAME_OID = '1.3.6.1.4.1.116.5.11.4.2.2'\n    OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'\n    SECONDS_TO_MS = 1000\n    ALERT_START = 1\n    CTL_ALERT_COUNT = 255\n    DKC_ALERT_COUNT = 10239\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.rest_handler = rest_handler.RestHandler(**kwargs)\n        self.rest_handler.login()\n\n    def reset_connection(self, context, **kwargs):\n        self.rest_handler.logout()\n        self.rest_handler.verify = kwargs.get('verify', False)\n        self.rest_handler.login()\n\n    def close_connection(self):\n        self.rest_handler.logout()\n\n    def get_storage(self, context):\n        self.rest_handler.get_device_id()\n        if self.rest_handler.device_model in consts.SUPPORTED_VSP_SERIES:\n            capacity_json = self.rest_handler.get_capacity()\n            free_capacity = capacity_json.get(\"total\").get(\"freeSpace\") * \\\n                units.Ki\n            total_capacity = \\\n                capacity_json.get(\"total\").get(\"totalCapacity\") * units.Ki\n        else:\n            free_capacity = 0\n            total_capacity = 0\n            pools_info = self.rest_handler.get_all_pools()\n            if pools_info is not None:\n                pools = pools_info.get('data')\n                for pool in pools:\n                    total_cap = \\\n                        int(pool.get(\n                            'totalPoolCapacity')) * units.Mi\n                    free_cap = int(\n                        pool.get(\n                            'availableVolumeCapacity')) * units.Mi\n                    free_capacity = free_capacity + free_cap\n                    total_capacity = total_capacity + total_cap\n        firmware_version = self.rest_handler.get_firmware_version()\n        status = constants.StorageStatus.OFFLINE\n        if firmware_version is not None:\n            status = constants.StorageStatus.NORMAL\n        system_name = '%s_%s' % (self.rest_handler.device_model,\n                                 self.rest_handler.rest_host)\n\n        s = {\n            'name': system_name,\n            'vendor': 'Hitachi',\n            'description': 'Hitachi VSP Storage',\n            'model': str(self.rest_handler.device_model),\n            'status': status,\n            'serial_number': str(self.rest_handler.serial_number),\n            'firmware_version': str(firmware_version),\n            'location': '',\n            'raw_capacity': int(total_capacity),\n            'total_capacity': int(total_capacity),\n            'used_capacity': int(total_capacity - free_capacity),\n            'free_capacity': int(free_capacity)\n        }\n        return s\n\n    def list_storage_pools(self, context):\n        try:\n            pools_info = self.rest_handler.get_all_pools()\n            pool_list = []\n            pools = pools_info.get('data')\n            for pool in pools:\n                status = self.POOL_STATUS_MAP.get(\n                    pool.get('poolStatus'),\n                    constants.StoragePoolStatus.ABNORMAL\n                )\n                storage_type = constants.StorageType.BLOCK\n                total_cap = \\\n                    int(pool.get('totalPoolCapacity')) * units.Mi\n                free_cap = int(\n                    pool.get('availableVolumeCapacity')) * units.Mi\n                used_cap = total_cap - free_cap\n                p = {\n                    'name': pool.get('poolName'),\n                    'storage_id': self.storage_id,\n                    'native_storage_pool_id': str(pool.get('poolId')),\n                    'description': 'Hitachi VSP Pool',\n                    'status': status,\n                    'storage_type': storage_type,\n                    'total_capacity': int(total_cap),\n                    'used_capacity': int(used_cap),\n                    'free_capacity': int(free_cap),\n                }\n                pool_list.append(p)\n\n            return pool_list\n        except exception.DelfinException as err:\n            err_msg = \"Failed to get pool metrics from hitachi vsp: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            err_msg = \"Failed to get pool metrics from hitachi vsp: %s\" % \\\n                      (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    @staticmethod\n    def to_vsp_lun_id_format(lun_id):\n        hex_str = hex(lun_id)\n        result = ''\n        hex_lun_id = hex_str[2::].rjust(6, '0')\n        is_first = True\n        for i in range(0, len(hex_lun_id), 2):\n            if is_first is True:\n                result = '%s' % (hex_lun_id[i:i + 2])\n                is_first = False\n            else:\n                result = '%s:%s' % (result, hex_lun_id[i:i + 2])\n        return result\n\n    def list_volumes(self, context):\n        try:\n            volume_list = []\n            volumes = self.rest_handler.get_volumes_with_defined()\n            if not volumes:\n                return volume_list\n            volume_list = self.parse_volumes(volumes)\n            if len(volumes.get('data')) >= consts.MAX_VOLUME_NUMBER:\n                head_id = volumes.get('data')[-1].get('ldevId') + 1\n                while True:\n                    volumes_info = self.rest_handler.get_volumes(head_id)\n                    if not volumes_info or not volumes_info.get('data'):\n                        break\n                    volume_list.extend(self.parse_volumes(volumes_info))\n                    head_id = volumes_info.get('data')[-1].get('ldevId') + 1\n        except exception.DelfinException as err:\n            err_msg = \"Failed to get volume from hitachi vsp: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            err_msg = \"Failed to get volume from hitachi vsp: %s\" % \\\n                      (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return volume_list\n\n    def parse_volumes(self, volumes):\n        try:\n            volume_list = []\n            volumes = volumes.get('data')\n            for volume in volumes:\n                orig_pool_id = volume.get('poolId')\n                compressed = False\n                deduplicated = False\n                if volume.get('dataReductionMode') == \\\n                        'compression_deduplication':\n                    deduplicated = True\n                    compressed = True\n                if volume.get('dataReductionMode') == 'compression':\n                    compressed = True\n                if volume.get('status') == 'NML':\n                    status = 'normal'\n                else:\n                    status = 'abnormal'\n\n                vol_type = constants.VolumeType.THICK\n                for voltype in volume.get('attributes'):\n                    if voltype == 'HTI':\n                        vol_type = constants.VolumeType.THIN\n\n                total_cap = \\\n                    int(volume.get('blockCapacity')) * consts.BLOCK_SIZE\n                used_cap = \\\n                    int(volume.get('blockCapacity')) * consts.BLOCK_SIZE\n                # Because there is only subscribed capacity in device,so free\n                # capacity always 0\n                free_cap = 0\n                native_volume_id = HitachiVspDriver.to_vsp_lun_id_format(\n                    volume.get('ldevId'))\n                if volume.get('label'):\n                    name = volume.get('label')\n                else:\n                    name = native_volume_id\n\n                v = {\n                    'name': name,\n                    'storage_id': self.storage_id,\n                    'description': 'Hitachi VSP volume',\n                    'status': status,\n                    'native_volume_id': str(native_volume_id),\n                    'native_storage_pool_id': orig_pool_id,\n                    'type': vol_type,\n                    'total_capacity': total_cap,\n                    'used_capacity': used_cap,\n                    'free_capacity': free_cap,\n                    'compressed': compressed,\n                    'deduplicated': deduplicated,\n                }\n\n                volume_list.append(v)\n            return volume_list\n        except exception.DelfinException as err:\n            err_msg = \"Failed to get volumes metrics from hitachi vsp: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise err\n        except Exception as e:\n            err_msg = \"Failed to get volumes metrics from hitachi vsp: %s\" % \\\n                      (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_controllers(self, context):\n        try:\n            controller_list = []\n            controller_info = self.rest_handler.get_controllers()\n            if controller_info is not None:\n                con_entries = controller_info.get('ctls')\n                for control in con_entries:\n                    status = constants.ControllerStatus.OFFLINE\n                    if control.get('status') == 'Normal':\n                        status = constants.ControllerStatus.NORMAL\n                    controller_result = {\n                        'name': control.get('location'),\n                        'storage_id': self.storage_id,\n                        'native_controller_id': control.get('location'),\n                        'status': status,\n                        'location': control.get('location')\n                    }\n                    controller_list.append(controller_result)\n            return controller_list\n        except Exception as err:\n            err_msg = \"Failed to get controller attributes from vsp: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_ports(self, context):\n        try:\n            port_list = []\n            ports = self.rest_handler.get_all_ports()\n            if ports is None:\n                return port_list\n            port_entries = ports.get('data')\n            for port in port_entries:\n                ipv4 = None\n                ipv4_mask = None\n                ipv6 = None\n                wwn = None\n                status = constants.PortHealthStatus.NORMAL\n                conn_status = constants.PortConnectionStatus.CONNECTED\n                if port.get('portType') == 'ISCSI':\n                    iscsi_port = self.rest_handler.get_detail_ports(\n                        port.get('portId'))\n                    ipv4 = iscsi_port.get('ipv4Address')\n                    ipv4_mask = iscsi_port.get('ipv4Subnetmask')\n                    if iscsi_port.get(\n                            'ipv6LinkLocalAddress', {}).get(\"status\") == 'VAL':\n                        ipv6 = iscsi_port.get(\n                            'ipv6LinkLocalAddress', {}).get(\"address\")\n                speed = HitachiVspDriver.AUTO_PORT_SPEED if \\\n                    port.get('portSpeed') == 'AUT' else \\\n                    int(port.get('portSpeed')[:-1]) * units.Gi\n                if port.get('portType') == 'FIBRE':\n                    wwn = port.get('wwn')\n                    if wwn:\n                        wwn = wwn.upper()\n                port_type = HitachiVspDriver.PORT_TYPE_MAP.get(\n                    port.get('portType'),\n                    constants.PortType.OTHER)\n                port_result = {\n                    'name': port.get('portId'),\n                    'storage_id': self.storage_id,\n                    'native_port_id': port.get('portId'),\n                    'location': port.get('portId'),\n                    'connection_status': conn_status,\n                    'health_status': status,\n                    'type': port_type,\n                    'logical_type': '',\n                    'max_speed': speed,\n                    'mac_address': port.get('macAddress'),\n                    'wwn': wwn,\n                    'ipv4': ipv4,\n                    'ipv4_mask': ipv4_mask,\n                    'ipv6': ipv6\n                }\n                port_list.append(port_result)\n            return port_list\n        except Exception as err:\n            err_msg = \"Failed to get ports attributes from vsp: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_disks(self, context):\n        try:\n            disks = self.rest_handler.get_disks()\n            disk_list = []\n            if disks is not None:\n                disk_entries = disks.get('data')\n                for disk in disk_entries:\n                    status = HitachiVspDriver.DISK_STATUS_TYPE.get(\n                        disk.get('status'), constants.DiskStatus.NORMAL)\n                    if disk.get('driveTypeName'):\n                        type_name = 'SSD' if 'SSD' in \\\n                                             disk.get('driveTypeName').upper()\\\n                            else disk.get('driveTypeName')\n                        physical_type = \\\n                            HitachiVspDriver.DISK_PHYSICAL_TYPE_MAP.get(\n                                type_name,\n                                constants.DiskPhysicalType.UNKNOWN)\n                    else:\n                        physical_type = constants.DiskPhysicalType.UNKNOWN\n                    logical_type = HitachiVspDriver.DISK_LOGIC_TYPE_MAP.get(\n                        disk.get('usageType'),\n                        constants.DiskLogicalType.UNKNOWN)\n                    disk_result = {\n                        'name': disk.get('driveLocationId'),\n                        'storage_id': self.storage_id,\n                        'native_disk_id': disk.get('driveLocationId'),\n                        'serial_number': disk.get('serialNumber'),\n                        'speed': int(disk.get('driveSpeed', 0)),\n                        'capacity':\n                            int(disk.get('totalCapacity', 0)) * units.Gi,\n                        'status': status,\n                        'physical_type': physical_type,\n                        'logical_type': logical_type,\n                        'native_disk_group_id': disk.get('parityGroupId'),\n                        'location': disk.get('driveLocationId')\n                    }\n                    disk_list.append(disk_result)\n            return disk_list\n\n        except Exception as err:\n            err_msg = \"Failed to get disk attributes from : %s\" % \\\n                      (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    @staticmethod\n    def parse_queried_alerts(alerts, alert_list, query_para=None):\n        if not alerts:\n            return\n        for alert in alerts:\n            occur_time = int(time.mktime(time.strptime(\n                alert.get('occurenceTime'),\n                HitachiVspDriver.TIME_PATTERN))) * \\\n                HitachiVspDriver.SECONDS_TO_MS\n            if not alert_util.is_alert_in_time_range(query_para,\n                                                     occur_time):\n                continue\n            a = {\n                'location': alert.get('location'),\n                'alert_id': alert.get('alertId'),\n                'sequence_number': alert.get('alertIndex'),\n                'description': alert.get('errorDetail'),\n                'alert_name': alert.get('errorSection'),\n                'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n                'occur_time': occur_time,\n                'category': constants.Category.FAULT,\n                'type': constants.EventType.EQUIPMENT_ALARM,\n                'severity': HitachiVspDriver.ALERT_LEVEL_MAP.get(\n                    alert.get('errorLevel'),\n                    constants.Severity.INFORMATIONAL\n                )\n            }\n            alert_list.append(a)\n\n    def list_alerts(self, context, query_para=None):\n        alert_list = []\n        if self.rest_handler.device_model in consts.SUPPORTED_VSP_SERIES:\n            alerts_info_ctl1 = self.rest_handler.get_alerts(\n                'type=CTL1', HitachiVspDriver.ALERT_START,\n                HitachiVspDriver.CTL_ALERT_COUNT)\n            alerts_info_ctl2 = self.rest_handler.get_alerts(\n                'type=CTL2', HitachiVspDriver.ALERT_START,\n                HitachiVspDriver.CTL_ALERT_COUNT)\n            alerts_info_dkc = self.rest_handler.get_alerts(\n                'type=DKC', HitachiVspDriver.ALERT_START,\n                HitachiVspDriver.DKC_ALERT_COUNT)\n            HitachiVspDriver.parse_queried_alerts(alerts_info_ctl1,\n                                                  alert_list, query_para)\n            HitachiVspDriver.parse_queried_alerts(alerts_info_ctl2,\n                                                  alert_list, query_para)\n            HitachiVspDriver.parse_queried_alerts(alerts_info_dkc,\n                                                  alert_list, query_para)\n        else:\n            err_msg = \"list_alerts is not supported in model %s\" % \\\n                      self.rest_handler.device_model\n            LOG.error(err_msg)\n            raise NotImplementedError(err_msg)\n\n        return alert_list\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        try:\n            alert_model = dict()\n            alert_model['alert_id'] = alert.get(HitachiVspDriver.REFCODE_OID)\n            alert_model['alert_name'] = alert.get(HitachiVspDriver.DESC_OID)\n            severity = HitachiVspDriver.TRAP_ALERT_LEVEL_MAP.get(\n                alert.get(HitachiVspDriver.OID_SEVERITY),\n                constants.Severity.INFORMATIONAL\n            )\n            alert_model['severity'] = severity\n            alert_model['category'] = constants.Category.FAULT\n            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n            aler_time = '%s%s' % (alert.get(HitachiVspDriver.TRAP_DATE_OID),\n                                  alert.get(HitachiVspDriver.TRAP_TIME_OID))\n            pattern = '%Y/%m/%d%H:%M:%S'\n            occur_time = time.strptime(aler_time, pattern)\n            alert_model['occur_time'] = int(time.mktime(occur_time) *\n                                            HitachiVspDriver.SECONDS_TO_MS)\n            alert_model['description'] = alert.get(HitachiVspDriver.DESC_OID)\n            alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n            alert_model['location'] = alert.get(HitachiVspDriver.\n                                                TRAP_NICKNAME_OID)\n            alert_model['match_key'] = hashlib.md5(\n                alert.get(HitachiVspDriver.DESC_OID).encode()).hexdigest()\n\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (\"Failed to build alert model as some attributes missing in\"\n                   \" alert message:%s\") % (six.text_type(e))\n            raise exception.InvalidResults(msg)\n\n    def clear_alert(self, context, alert):\n        pass\n\n    @staticmethod\n    def handle_group_with_port(group_info):\n        group_list = {}\n        if not group_info:\n            return group_list\n        group_entries = group_info.get('data')\n        for group in group_entries:\n            if group_list.get(group.get('portId')):\n                group_list[group.get('portId')].append(\n                    group.get('hostGroupNumber'))\n            else:\n                group_list[group.get('portId')] = []\n                group_list[group.get('portId')].append(\n                    group.get('hostGroupNumber'))\n        return group_list\n\n    @staticmethod\n    def get_host_info(data, storage_id, host_list, type, os_type):\n        if data:\n            host_entries = data.get('data')\n            if not host_entries:\n                return True\n            for host in host_entries:\n                if type == 'iscsi':\n                    host_id = host.get('hostIscsiId')\n                    host_name = host.get('iscsiNickname') if \\\n                        host.get('iscsiNickname') != '-' \\\n                        else host.get('iscsiName')\n                else:\n                    host_id = host.get('hostWwnId')\n                    host_name = host.get('wwnNickname') if \\\n                        host.get('wwnNickname') != '-' \\\n                        else host.get('hostWwn')\n                host_result = {\n                    \"name\": host_name,\n                    \"storage_id\": storage_id,\n                    \"native_storage_host_id\": host_id.replace(\",\", \"_\"),\n                    \"os_type\": os_type,\n                    \"status\": constants.HostStatus.NORMAL\n                }\n                host_list.append(host_result)\n        return True\n\n    def list_storage_hosts(self, context):\n        try:\n            host_groups = self.rest_handler.get_all_host_groups()\n            host_list = []\n            if not host_groups:\n                return host_list\n            group_with_port = HitachiVspDriver.handle_group_with_port(\n                host_groups)\n            for port in group_with_port:\n                kwargs = {\n                    'method': 'host',\n                    'port': port,\n                    'result': host_list\n                }\n                self.handle_san_info(**kwargs)\n            return host_list\n        except Exception as e:\n            LOG.error(\"Failed to get host from vsp\")\n            raise e\n\n    @staticmethod\n    def get_initiator_from_host(data, storage_id, initiator_list, type):\n        if data:\n            host_entries = data.get('data')\n            if not host_entries:\n                return True\n            for host in host_entries:\n                if type == 'iscsi':\n                    initiator_id = host.get('hostIscsiId')\n                    init_type = constants.InitiatorType.ISCSI\n                    init_name = host.get('iscsiName')\n                else:\n                    initiator_id = host.get('hostWwnId')\n                    init_type = constants.InitiatorType.FC\n                    init_name = host.get('hostWwn')\n                for initiator in initiator_list:\n                    if initiator.get('wwn') == init_name:\n                        continue\n                init_result = {\n                    \"name\": init_name,\n                    \"storage_id\": storage_id,\n                    \"native_storage_host_initiator_id\": init_name,\n                    \"wwn\": init_name,\n                    \"status\": constants.InitiatorStatus.ONLINE,\n                    \"type\": init_type,\n                    \"alias\": host.get('portId'),\n                    \"native_storage_host_id\": initiator_id.replace(\",\", \"_\")\n                }\n                initiator_list.append(init_result)\n        return True\n\n    def list_storage_host_initiators(self, context):\n        try:\n            initiator_list = []\n            host_groups = self.rest_handler.get_all_host_groups()\n            if not host_groups:\n                return initiator_list\n            group_with_port = HitachiVspDriver.handle_group_with_port(\n                host_groups)\n            for port in group_with_port:\n                kwargs = {\n                    'method': 'initator',\n                    'port': port,\n                    'result': initiator_list\n                }\n                self.handle_san_info(**kwargs)\n            return initiator_list\n        except Exception as e:\n            LOG.error(\"Failed to get initiators from vsp\")\n            raise e\n\n    @staticmethod\n    def get_host_ids(data, target, host_ids, host_grp_relation_list,\n                     storage_id, group_id):\n        if data:\n            host_entries = data.get('data')\n            if not host_entries:\n                return True\n            for host in host_entries:\n                if host.get(target):\n                    host_ids.append(host.get(target).replace(\",\", \"_\"))\n                    relation = {\n                        'storage_id': storage_id,\n                        'native_storage_host_group_id': group_id,\n                        'native_storage_host_id':\n                            host.get(target).replace(\",\", \"_\")\n                    }\n                    host_grp_relation_list.append(relation)\n\n    def list_storage_host_groups(self, context):\n        try:\n            host_groups = self.rest_handler.get_all_host_groups()\n            host_group_list = []\n            host_grp_relation_list = []\n            if not host_groups:\n                return host_group_list\n            group_with_port = HitachiVspDriver.handle_group_with_port(\n                host_groups)\n            for port in group_with_port:\n                kwargs = {\n                    'method': 'group',\n                    'port': port,\n                    'result': host_grp_relation_list,\n                    'group_list': host_group_list\n                }\n                self.handle_san_info(**kwargs)\n            result = {\n                'storage_host_groups': host_group_list,\n                'storage_host_grp_host_rels': host_grp_relation_list\n            }\n            return result\n        except Exception:\n            LOG.error(\"Failed to get host_groups from vsp\")\n            raise\n\n    def handle_lun_path(self, **kwargs):\n        view_list = []\n        views = self.rest_handler.get_lun_path(\n            kwargs.get('port'), kwargs.get('group'))\n        if not views:\n            return None\n        view_entries = views.get('data')\n        if not view_entries:\n            return None\n        for view in view_entries:\n            group_id = '%s_%s' % (view.get('portId'),\n                                  view.get('hostGroupNumber'))\n            view_result = {\n                \"name\": view.get('lunId'),\n                \"native_storage_host_group_id\": group_id,\n                \"storage_id\": self.storage_id,\n                \"native_volume_id\": HitachiVspDriver.to_vsp_lun_id_format(\n                    view.get('ldevId')),\n                \"native_masking_view_id\": view.get('lunId').replace(\",\", \"_\"),\n            }\n            kwargs.get('result').append(view_result)\n        return view_list\n\n    def list_masking_views(self, context):\n        try:\n            view_list = []\n            host_groups = self.rest_handler.get_all_host_groups()\n            if not host_groups:\n                return view_list\n            group_data = host_groups.get('data')\n            for group in group_data:\n                kwargs = {\n                    'group': group.get('hostGroupNumber'),\n                    'port': group.get('portId'),\n                    'result': view_list\n                }\n                self.handle_lun_path(**kwargs)\n            return view_list\n        except Exception as e:\n            LOG.error(\"Failed to get views from vsp\")\n            raise e\n\n    def handle_san_info(self, **kwargs):\n        groups = self.rest_handler.get_specific_host_group(\n            kwargs.get('port'))\n        group_data = groups.get('data')\n        for specific_group in group_data:\n            iscsis = None\n            wwns = None\n            if specific_group.get('iscsiName'):\n                iscsis = self.rest_handler.get_iscsi_name(\n                    specific_group.get('portId'),\n                    specific_group.get('hostGroupNumber'))\n            else:\n                wwns = self.rest_handler.get_host_wwn(\n                    specific_group.get('portId'),\n                    specific_group.get('hostGroupNumber'))\n            if kwargs.get('method') == 'host':\n                os_type = HitachiVspDriver.OS_TYPE_MAP.get(\n                    specific_group.get('hostMode'),\n                    constants.HostOSTypes.UNKNOWN)\n                if specific_group.get('iscsiName'):\n                    HitachiVspDriver.get_host_info(\n                        iscsis, self.storage_id, kwargs.get('result'),\n                        'iscsi', os_type)\n                else:\n                    HitachiVspDriver.get_host_info(\n                        wwns, self.storage_id,\n                        kwargs.get('result'), 'fc', os_type)\n            elif kwargs.get('method') == 'group':\n                host_ids = []\n                group_id = specific_group.get('hostGroupId').replace(\",\", \"_\")\n                if specific_group.get('iscsiName'):\n                    HitachiVspDriver.get_host_ids(\n                        iscsis, 'hostIscsiId', host_ids,\n                        kwargs.get('result'), self.storage_id,\n                        group_id)\n                else:\n                    HitachiVspDriver.get_host_ids(\n                        wwns, 'hostWwnId', host_ids,\n                        kwargs.get('result'), self.storage_id,\n                        group_id)\n                group_result = {\n                    \"name\": specific_group.get('hostGroupName'),\n                    \"storage_id\": self.storage_id,\n                    \"native_storage_host_group_id\": group_id,\n                    \"storage_hosts\": ','.join(host_ids)\n                }\n                kwargs.get('group_list').append(group_result)\n            else:\n                if specific_group.get('iscsiName'):\n                    HitachiVspDriver.get_initiator_from_host(\n                        iscsis, self.storage_id, kwargs.get('result'), 'iscsi')\n                else:\n                    HitachiVspDriver.get_initiator_from_host(\n                        wwns, self.storage_id, kwargs.get('result'), 'fc')\n"
  },
  {
    "path": "delfin/drivers/hpe/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/hpe/hpe_3par/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/hpe/hpe_3par/alert_handler.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\n\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers.hpe.hpe_3par import consts\nfrom delfin.i18n import _\n\nLOG = logging.getLogger(__name__)\n\n\nclass AlertHandler(object):\n    \"\"\"Alert handling functions for Hpe3 parstor driver\"\"\"\n\n    OID_MESSAGECODE = '1.3.6.1.4.1.12925.1.7.1.8.1'\n    OID_SEVERITY = '1.3.6.1.4.1.12925.1.7.1.2.1'\n    OID_STATE = '1.3.6.1.4.1.12925.1.7.1.9.1'\n    OID_ID = '1.3.6.1.4.1.12925.1.7.1.7.1'\n    OID_TIMEOCCURRED = '1.3.6.1.4.1.12925.1.7.1.3.1'\n    OID_DETAILS = '1.3.6.1.4.1.12925.1.7.1.6.1'\n    OID_COMPONENT = '1.3.6.1.4.1.12925.1.7.1.5.1'\n\n    # Translation of trap severity to alert model severity\n    SEVERITY_MAP = {\"1\": constants.Severity.CRITICAL,\n                    \"2\": constants.Severity.MAJOR,\n                    \"3\": constants.Severity.MINOR,\n                    \"4\": constants.Severity.WARNING,\n                    \"0\": constants.Severity.FATAL,\n                    \"5\": constants.Severity.INFORMATIONAL,\n                    \"6\": constants.Severity.NOT_SPECIFIED}\n\n    # Translation of trap alert category to alert model category\n    CATEGORY_MAP = {\"0\": constants.Category.NOT_SPECIFIED,\n                    \"1\": constants.Category.FAULT,\n                    \"2\": constants.Category.RECOVERY,\n                    \"3\": constants.Category.RECOVERY,\n                    \"4\": constants.Category.RECOVERY,\n                    \"5\": constants.Category.RECOVERY}\n\n    ALERT_KEY_MAP = {\"Id\": \"sequence_number\",\n                     \"State\": \"category\",\n                     \"MessageCode\": \"message_code\",\n                     \"Time\": \"occur_time\",\n                     \"Severity\": \"severity\",\n                     \"Type\": \"alert_name\",\n                     \"Message\": \"description\",\n                     \"Component\": \"location\"\n                     }\n\n    ALERT_LEVEL_MAP = {\"Critical\": constants.Severity.CRITICAL,\n                       \"Major\": constants.Severity.MAJOR,\n                       \"Minor\": constants.Severity.MINOR,\n                       \"Degraded\": constants.Severity.WARNING,\n                       \"Fatal\": constants.Severity.FATAL,\n                       \"Informational\": constants.Severity.INFORMATIONAL,\n                       \"Debug\": constants.Severity.NOT_SPECIFIED\n                       }\n\n    # Attributes expected in alert info to proceed with model filling\n    _mandatory_alert_attributes = (\n        OID_MESSAGECODE,\n        OID_SEVERITY,\n        OID_STATE,\n        OID_ID,\n        OID_TIMEOCCURRED,\n        OID_DETAILS,\n        OID_COMPONENT\n    )\n\n    # Convert received time to epoch format\n    TIME_PATTERN = '%Y-%m-%d %H:%M:%S'\n\n    def __init__(self, rest_handler=None, ssh_handler=None):\n        self.rest_handler = rest_handler\n        self.ssh_handler = ssh_handler\n\n    @staticmethod\n    def parse_alert(context, alert):\n        \"\"\"Parse alert data got from alert manager and fill the alert model.\"\"\"\n        # Check for mandatory alert attributes\n        for attr in AlertHandler._mandatory_alert_attributes:\n            if not alert.get(attr):\n                msg = \"Mandatory information %s missing in alert message. \" \\\n                      % attr\n                raise exception.InvalidInput(msg)\n\n        try:\n            alert_model = dict()\n            # These information are sourced from device registration info\n            alert_model['alert_id'] = (\"0x%07x\" % int(\n                alert.get(AlertHandler.OID_MESSAGECODE)))\n            alert_model['alert_name'] = AlertHandler.get_alert_type(alert.get(\n                AlertHandler.OID_MESSAGECODE))\n            alert_model['severity'] = AlertHandler.SEVERITY_MAP.get(\n                alert.get(AlertHandler.OID_SEVERITY),\n                constants.Severity.NOT_SPECIFIED)\n            alert_model['category'] = AlertHandler.CATEGORY_MAP.get(\n                alert.get(AlertHandler.OID_STATE),\n                constants.Category.NOT_SPECIFIED)\n            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n            alert_model['sequence_number'] = alert.get(AlertHandler.OID_ID)\n            alert_model['occur_time'] = AlertHandler.get_time_stamp(\n                alert.get(AlertHandler.OID_TIMEOCCURRED))\n            alert_model['description'] = alert.get(AlertHandler.OID_DETAILS)\n            alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n            alert_model['location'] = alert.get(AlertHandler.OID_COMPONENT)\n\n            if alert.get(AlertHandler.OID_STATE) == '5':\n                alert_model['clear_category'] = constants.ClearType.AUTOMATIC\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (_(\"Failed to build alert model as some attributes missing \"\n                     \"in alert message.\"))\n            raise exception.InvalidResults(msg)\n\n    def add_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Config the trap receiver in storage system.\"\"\"\n        # Currently not implemented\n        pass\n\n    def remove_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Remove trap receiver configuration from storage system.\"\"\"\n        # Currently not implemented\n        pass\n\n    def clear_alert(self, context, alert):\n        \"\"\"Clear alert from storage system.\n           Remove command: removealert\n        \"\"\"\n        try:\n            if alert:\n                self.ssh_handler.remove_alerts(alert)\n                LOG.info(\"Clear alert %s successfully.\" % alert)\n        except exception.DelfinException as e:\n            err_msg = \"Remove alert %s failed: %s\" % (alert, e.msg)\n            LOG.error(err_msg)\n            raise e\n        except Exception as e:\n            err_msg = \"Remove alert %s failed: %s\" % (alert, six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    @staticmethod\n    def judge_alert_time(map, query_para):\n        if len(map) <= 1:\n            return False\n        if query_para is None and len(map) > 1:\n            return True\n        occur_time = AlertHandler.get_time_stamp(map.get('occur_time'))\n        if query_para.get('begin_time') and query_para.get('end_time'):\n            if occur_time >= int(query_para.get('begin_time')) and \\\n                    occur_time <= int(query_para.get('end_time')):\n                return True\n        elif query_para.get('begin_time'):\n            if occur_time >= int(query_para.get('begin_time')):\n                return True\n        elif query_para.get('end_time'):\n            if occur_time <= int(query_para.get('end_time')):\n                return True\n        return False\n\n    def handle_alters(self, alertlist, query_para):\n        map = {}\n        alert_list = []\n        for alertinfo in alertlist:\n            strline = alertinfo\n            if strline is not None and strline != '':\n                strinfo = strline.split(': ', 1)\n                strinfo[0] = strinfo[0].replace(\" \", \"\")\n                key = self.ALERT_KEY_MAP.get(\n                    strinfo[0]) and self.ALERT_KEY_MAP.get(\n                    strinfo[0]) or ''\n                value = self.ALERT_KEY_MAP.get(\n                    strinfo[0]) and strinfo[1] or ''\n                map[key] = value\n            elif AlertHandler.judge_alert_time(map, query_para):\n                severity = self.ALERT_LEVEL_MAP.get(map.get('severity'))\n                category = map.get('category') == 'New' and 'Fault' or ''\n                occur_time = AlertHandler.get_time_stamp(map.get('occur_time'))\n                alert_model = {\n                    'alert_id': map.get('message_code'),\n                    'alert_name': map.get('alert_name'),\n                    'severity': severity,\n                    'category': category,\n                    'type': constants.EventType.EQUIPMENT_ALARM,\n                    'sequence_number': map.get('sequence_number'),\n                    'occur_time': occur_time,\n                    'description': map.get('description'),\n                    'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n                    'location': map.get('location')\n                }\n                alert_list.append(alert_model)\n                map = {}\n        return alert_list\n\n    def list_alerts(self, context, query_para):\n        try:\n            # Get list of Hpe3parStor alerts\n            try:\n                reslist = self.ssh_handler.get_all_alerts()\n            except Exception as e:\n                err_msg = \"Failed to ssh Hpe3parStor: %s\" % \\\n                          (six.text_type(e))\n                LOG.error(err_msg)\n                raise exception.SSHException(err_msg)\n\n            alertlist = reslist.split('\\n')\n\n            return self.handle_alters(alertlist, query_para)\n        except exception.DelfinException as e:\n            err_msg = \"Get alerts failed: %s\" % (e.msg)\n            LOG.error(err_msg)\n            raise e\n        except Exception as e:\n            err_msg = \"Get alert failed: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    @staticmethod\n    def get_time_stamp(time_str):\n        \"\"\" Time stamp to time conversion\n        \"\"\"\n        time_stamp = ''\n        try:\n            if time_str:\n                if (len(time_str.split()) == 3):\n                    time_str = time_str.rsplit(' ', 1)[0]\n                # Convert to time array first\n                time_array = time.strptime(time_str, AlertHandler.TIME_PATTERN)\n                # Convert to timestamps to milliseconds\n                time_stamp = int(time.mktime(time_array) * 1000)\n        except Exception as e:\n            LOG.error(e)\n\n        return time_stamp\n\n    @staticmethod\n    def get_alert_type(message_code):\n        \"\"\"\n        Get alert type\n\n        :param str message_code: alert's message_code.\n        :return: returns alert's type\n        \"\"\"\n        re = ''\n        try:\n            if message_code is not None:\n                message_key = (\"0x%07x\" % int(message_code))\n                re = consts.HPE3PAR_ALERT_CODE.get(message_key)\n        except Exception as e:\n            LOG.error(e)\n\n        return re\n"
  },
  {
    "path": "delfin/drivers/hpe/hpe_3par/component_handler.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport datetime\nimport re\nimport time\n\nimport six\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers.hpe.hpe_3par import consts\n\nLOG = log.getLogger(__name__)\n\n\nclass ComponentHandler():\n    COMPONENT_HEALTH = 'The following components are healthy'\n    SYSTEM_HEALTH = 'System is healthy'\n    HPE3PAR_VERSION = 'Superclass'\n\n    HPE3PAR_VENDOR = 'HPE'\n\n    STATUS_MAP = {1: constants.StoragePoolStatus.NORMAL,\n                  2: constants.StoragePoolStatus.ABNORMAL,\n                  3: constants.StoragePoolStatus.ABNORMAL,\n                  99: constants.StoragePoolStatus.OFFLINE}\n\n    VOL_TYPE_MAP = {1: constants.VolumeType.THICK,\n                    2: constants.VolumeType.THIN,\n                    3: constants.VolumeType.THIN,\n                    4: constants.VolumeType.THICK,\n                    5: constants.VolumeType.THICK,\n                    6: constants.VolumeType.THIN,\n                    7: constants.VolumeType.THICK}\n\n    def __init__(self, rest_handler=None, ssh_handler=None):\n        self.rest_handler = rest_handler\n        self.ssh_handler = ssh_handler\n\n    def set_storage_id(self, storage_id):\n        self.storage_id = storage_id\n\n    def get_storage(self, context):\n        storage = self.rest_handler.get_storage()\n        status = constants.StorageStatus.NORMAL\n\n        if storage:\n            try:\n                # Check the hardware and software health\n                # status of the storage system\n                re_str = self.ssh_handler.get_health_state()\n                if 'degraded' in re_str.lower() or 'failed' in re_str.lower():\n                    status = constants.StorageStatus.ABNORMAL\n            except Exception:\n                status = constants.StorageStatus.ABNORMAL\n                LOG.error('SSH check health Failed!')\n\n            free_cap = int(storage.get('freeCapacityMiB')) * units.Mi\n            used_cap = int(storage.get('allocatedCapacityMiB')) * units.Mi\n            total_cap = free_cap + used_cap\n            raw_cap = int(storage.get('totalCapacityMiB')) * units.Mi\n            result = {\n                'name': storage.get('name'),\n                'vendor': ComponentHandler.HPE3PAR_VENDOR,\n                'model': storage.get('model'),\n                'status': status,\n                'serial_number': storage.get('serialNumber'),\n                'firmware_version': storage.get('systemVersion'),\n                'location': storage.get('location'),\n                'total_capacity': total_cap,\n                'raw_capacity': raw_cap,\n                'used_capacity': used_cap,\n                'free_capacity': free_cap\n            }\n        else:\n            # If no data is returned, it indicates that there\n            # may be a problem with the network or the device.\n            # Default return OFFLINE\n            result = {\n                'status': constants.StorageStatus.OFFLINE\n            }\n        return result\n\n    def list_storage_pools(self, context):\n        try:\n            # Get list of Hpe3parStor pool details\n            pools = self.rest_handler.get_all_pools()\n            pool_list = []\n\n            if pools is not None:\n                members = pools.get('members')\n                for pool in (members or []):\n                    # Get pool status  1=normal 2,3=abnormal 99=offline\n                    status = self.STATUS_MAP.get(pool.get('state'))\n\n                    # Get pool storage_type   default block\n                    pool_type = constants.StorageType.BLOCK\n                    usr_used = int(pool['UsrUsage']['usedMiB']) * units.Mi\n                    sa_used = int(pool['SAUsage']['usedMiB']) * units.Mi\n                    sd_used = int(pool['SDUsage']['usedMiB']) * units.Mi\n                    usr_total = int(pool['UsrUsage']['totalMiB']) * units.Mi\n                    sa_total = int(pool['SAUsage']['totalMiB']) * units.Mi\n                    sd_total = int(pool['SDUsage']['totalMiB']) * units.Mi\n                    total_cap = usr_total + sa_total + sd_total\n                    used_cap = usr_used + sa_used + sd_used\n                    free_cap = total_cap - used_cap\n                    usr_subcap = int(\n                        pool['UsrUsage']['rawTotalMiB']) * units.Mi\n                    sa_subcap = int(pool['SAUsage']['rawTotalMiB']) * units.Mi\n                    sd_subcap = int(pool['SDUsage']['rawTotalMiB']) * units.Mi\n                    subscribed_cap = usr_subcap + sa_subcap + sd_subcap\n\n                    p = {\n                        'name': pool.get('name'),\n                        'storage_id': self.storage_id,\n                        'native_storage_pool_id': str(pool.get('id')),\n                        'description': 'Hpe 3par CPG:%s' % pool.get('name'),\n                        'status': status,\n                        'storage_type': pool_type,\n                        'total_capacity': total_cap,\n                        'subscribed_capacity': subscribed_cap,\n                        'used_capacity': used_cap,\n                        'free_capacity': free_cap\n                    }\n                    pool_list.append(p)\n            return pool_list\n\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get pool metrics from Hpe3parStor: %s\" % \\\n                      (e.msg)\n            LOG.error(err_msg)\n            raise e\n        except Exception as e:\n            err_msg = \"Failed to get pool metrics from Hpe3parStor: %s\" % \\\n                      (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def handler_volume(self, volumes, pool_ids):\n        volume_list = []\n        if volumes is None:\n            return\n        else:\n            members = volumes.get('members')\n            for volume in (members or []):\n                status = self.STATUS_MAP.get(volume.get('state'))\n                orig_pool_name = volume.get('userCPG', '')\n\n                compressed = True\n                deduplicated = True\n                if volume.get('compressionState') and volume.get(\n                        'compressionState') != 1:\n                    compressed = False\n                if volume.get('deduplicationState') and volume.get(\n                        'deduplicationState') != 1:\n                    deduplicated = False\n                vol_type = self.VOL_TYPE_MAP.get(\n                    volume.get('provisioningType'))\n\n                # Virtual size of volume in MiB (10242bytes).\n                usr_used = int(\n                    volume['userSpace']['usedMiB']) * units.Mi\n                total_cap = int(volume['sizeMiB']) * units.Mi\n                used_cap = usr_used\n                free_cap = total_cap - used_cap\n\n                v = {\n                    'name': volume.get('name'),\n                    'storage_id': self.storage_id,\n                    'description': volume.get('comment'),\n                    'status': status,\n                    'native_volume_id': str(volume.get('id')),\n                    'native_storage_pool_id': pool_ids.get(orig_pool_name,\n                                                           ''),\n                    'wwn': volume.get('wwn'),\n                    'type': vol_type,\n                    'total_capacity': total_cap,\n                    'used_capacity': used_cap,\n                    'free_capacity': free_cap,\n                    'compressed': compressed,\n                    'deduplicated': deduplicated\n                }\n                volume_list.append(v)\n        return volume_list\n\n    def list_volumes(self, context):\n        try:\n            volumes = self.rest_handler.get_all_volumes()\n\n            pools = self.rest_handler.get_all_pools()\n            pool_ids = {}\n            if pools is not None:\n                members = pools.get('members')\n                for pool in (members or []):\n                    pool_ids[pool.get('name')] = pool.get('id')\n\n            return self.handler_volume(volumes, pool_ids)\n\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get list volumes from Hpe3parStor: %s\" % \\\n                      (e.msg)\n            LOG.error(err_msg)\n            raise e\n        except Exception as e:\n            err_msg = \"Failed to get list volumes from Hpe3parStor: %s\" % \\\n                      (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_controllers(self, storage_id):\n        controllers = self.ssh_handler.get_controllers()\n        controller_list = []\n        if controllers:\n            node_cpu_map = self.ssh_handler.get_controllers_cpu()\n            node_version_map = self.ssh_handler.get_controllers_version()\n            for controller in controllers:\n                node_id = controller.get('node')\n                memory_size = int(controller.get('controlmem(mb)',\n                                                 '0')) * units.Mi + int(\n                    controller.get('datamem(mb)', '0')) * units.Mi\n                cpu_info = ''\n                cpu_count = None\n                if node_cpu_map and node_cpu_map.get(node_id):\n                    cpu_count = 0\n                    cpu_info_map = node_cpu_map.get(node_id)\n                    cpu_info_keys = list(cpu_info_map.keys())\n                    for cpu_key in cpu_info_keys:\n                        if cpu_info:\n                            cpu_info = '%s%s' % (cpu_info, ',')\n                        cpu_info = '%s%s * %s MHz' % (\n                            cpu_info, cpu_info_map.get(cpu_key), cpu_key)\n                        cpu_count += cpu_info_map.get(cpu_key)\n                soft_version = None\n                if node_version_map:\n                    soft_version = node_version_map.get(node_id, '')\n                controller_model = {\n                    'name': controller.get('name'),\n                    'storage_id': storage_id,\n                    'native_controller_id': node_id,\n                    'status': consts.CONTROLLER_STATUS_MAP.get(\n                        controller.get('state', '').upper(),\n                        constants.ControllerStatus.OFFLINE),\n                    'location': None,\n                    'soft_version': soft_version,\n                    'cpu_info': cpu_info,\n                    'cpu_count': cpu_count,\n                    'memory_size': str(memory_size)\n                }\n                controller_list.append(controller_model)\n        return controller_list\n\n    def list_disks(self, storage_id):\n        disks = self.ssh_handler.get_disks()\n        disk_list = []\n        if disks:\n            disks_inventory_map = self.ssh_handler.get_disks_inventory()\n            for disk in disks:\n                disk_id = disk.get('id')\n                status = consts.DISK_STATUS_MAP.get(\n                    disk.get('state', '').upper(),\n                    constants.DiskStatus.ABNORMAL)\n                total = 0\n                if disk.get('total'):\n                    total = float(disk.get(\"total\"))\n                elif disk.get('size_mb'):\n                    total = float(disk.get(\"size_mb\"))\n                capacity = int(total * units.Mi)\n                serial_number = None\n                manufacturer = None\n                model = None\n                firmware = None\n                if disks_inventory_map:\n                    inventory_map = disks_inventory_map.get(disk_id)\n                    if inventory_map:\n                        serial_number = inventory_map.get('disk_serial')\n                        manufacturer = inventory_map.get('disk_mfr')\n                        model = inventory_map.get('disk_model')\n                        firmware = inventory_map.get('disk_fw_rev')\n                speed = None\n                if str(disk.get('rpm')).isdigit():\n                    speed = int(disk.get('rpm')) * units.k\n                disk_model = {\n                    'name': disk.get('cagepos'),\n                    'storage_id': storage_id,\n                    'native_disk_id': disk_id,\n                    'serial_number': serial_number,\n                    'manufacturer': manufacturer,\n                    'model': model,\n                    'firmware': firmware,\n                    'speed': speed,\n                    'capacity': capacity,\n                    'status': status,\n                    'physical_type': consts.DISK_PHYSICAL_TYPE_MAP.get(\n                        disk.get('type').upper(),\n                        constants.DiskPhysicalType.UNKNOWN),\n                    'logical_type': None,\n                    'health_score': None,\n                    'native_disk_group_id': None,\n                    'location': disk.get('cagepos')\n                }\n                disk_list.append(disk_model)\n        return disk_list\n\n    def list_ports(self, storage_id):\n        ports = self.ssh_handler.get_ports()\n        port_list = []\n        if ports:\n            ports_inventory_map = self.ssh_handler.get_ports_inventory()\n            ports_config_map = self.ssh_handler.get_ports_config()\n            ports_iscsi_map = self.ssh_handler.get_ports_iscsi()\n            ports_rcip_map = self.ssh_handler.get_ports_rcip()\n            ports_connected_map = self.ssh_handler.get_ports_connected()\n            ports_fcoe_map = self.ssh_handler.get_ports_fcoe()\n            port_fs_map = self.ssh_handler.get_ports_fs()\n            for port in ports:\n                port_id = port.get('n:s:p')\n                port_type = ''\n                if ports_inventory_map:\n                    port_type = ports_inventory_map.get(port_id, '')\n                max_speed = ''\n                if ports_config_map:\n                    max_speed = ports_config_map.get(port_id, '')\n                ip_addr = None\n                ip_mask = None\n                ipv4 = None\n                ipv4_mask = None\n                ipv6 = None\n                ipv6_mask = None\n                rate = ''\n                if ports_connected_map:\n                    rate = ports_connected_map.get(port_id, '')\n                if not ip_addr and ports_iscsi_map:\n                    iscsi_map = ports_iscsi_map.get(port_id)\n                    if iscsi_map:\n                        ip_addr = iscsi_map.get('ipaddr')\n                        ip_mask = iscsi_map.get('netmask/prefixlen')\n                        rate = iscsi_map.get('rate')\n                if not ip_addr and ports_rcip_map:\n                    rcip_map = ports_rcip_map.get(port_id)\n                    if rcip_map:\n                        ip_addr = rcip_map.get('ipaddr')\n                        ip_mask = rcip_map.get('netmask')\n                        rate = rcip_map.get('rate')\n                if not ip_addr and port_fs_map:\n                    fs_map = port_fs_map.get(port_id)\n                    if fs_map:\n                        ip_addr = fs_map.get('ipaddr')\n                        ip_mask = fs_map.get('netmask')\n                        rate = fs_map.get('rate')\n                if not rate and ports_fcoe_map:\n                    fcoe_map = ports_fcoe_map.get(port_id)\n                    if fcoe_map:\n                        rate = fcoe_map.get('rate')\n                if ip_addr and ip_addr != '-':\n                    pattern = re.compile(consts.IPV4_PATTERN)\n                    search_obj = pattern.search(ip_addr)\n                    if search_obj:\n                        ipv4 = ip_addr\n                        ipv4_mask = ip_mask\n                    else:\n                        ipv6 = ip_addr\n                        ipv6_mask = ip_mask\n                wwn = None\n                mac = None\n                if port_type.upper() == 'ETH':\n                    mac = port.get('port_wwn/hw_addr')\n                else:\n                    wwn = port.get('port_wwn/hw_addr')\n                port_model = {\n                    'name': port_id,\n                    'storage_id': storage_id,\n                    'native_port_id': port_id,\n                    'location': port_id,\n                    'connection_status':\n                        consts.PORT_CONNECTION_STATUS_MAP.get(\n                            port.get('state', '').upper(),\n                            constants.PortConnectionStatus.UNKNOWN),\n                    'health_status': constants.PortHealthStatus.NORMAL,\n                    'type': consts.PORT_TYPE_MAP.get(port_type.upper(),\n                                                     constants.PortType.OTHER),\n                    'logical_type': None,\n                    'speed': self.parse_speed(rate),\n                    'max_speed': self.parse_speed(max_speed),\n                    'native_parent_id': None,\n                    'wwn': wwn,\n                    'mac_address': mac,\n                    'ipv4': ipv4,\n                    'ipv4_mask': ipv4_mask,\n                    'ipv6': ipv6,\n                    'ipv6_mask': ipv6_mask,\n                }\n                port_list.append(port_model)\n        return port_list\n\n    def parse_speed(self, speed_value):\n        speed = 0\n        try:\n            if speed_value == '' or speed_value == 'n/a':\n                return None\n            speeds = re.findall(\"\\\\d+\", speed_value)\n            if speeds:\n                speed = int(speeds[0])\n            if 'Gbps' in speed_value:\n                speed = speed * units.G\n            elif 'Mbps' in speed_value:\n                speed = speed * units.M\n            elif 'Kbps' in speed_value:\n                speed = speed * units.k\n        except Exception as err:\n            err_msg = \"analyse speed error: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n        return speed\n\n    def collect_perf_metrics(self, storage_id, resource_metrics,\n                             start_time, end_time):\n        metrics = []\n        try:\n            # storage-pool metrics\n            if resource_metrics.get(constants.ResourceType.STORAGE_POOL):\n                pool_metrics = self.get_pool_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.STORAGE_POOL),\n                    start_time, end_time)\n                metrics.extend(pool_metrics)\n\n            # volume metrics\n            if resource_metrics.get(constants.ResourceType.VOLUME):\n                volume_metrics = self.get_volume_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.VOLUME),\n                    start_time, end_time)\n                metrics.extend(volume_metrics)\n\n            # port metrics\n            if resource_metrics.get(constants.ResourceType.PORT):\n                port_metrics = self.get_port_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.PORT),\n                    start_time, end_time)\n                metrics.extend(port_metrics)\n\n            # disk metrics\n            if resource_metrics.get(constants.ResourceType.DISK):\n                disk_metrics = self.get_disk_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.DISK),\n                    start_time, end_time)\n                metrics.extend(disk_metrics)\n        except exception.DelfinException as err:\n            err_msg = \"Failed to collect metrics from Hpe3parStor: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise err\n        except Exception as err:\n            err_msg = \"Failed to collect metrics from Hpe3parStor: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n        return metrics\n\n    def get_pool_metrics(self, storage_id, metric_list,\n                         start_time, end_time):\n        metrics = []\n        obj_metrics = {}\n        pool_maps = {}\n        pools = self.rest_handler.get_all_pools()\n        if pools:\n            pool_members = pools.get('members')\n            for pool in pool_members:\n                pool_maps[pool.get('name')] = str(pool.get('id'))\n            obj_metrics = self.rest_format_metrics_data(\n                start_time, end_time, self.rest_handler.get_pool_metrics,\n                constants.ResourceType.STORAGE_POOL)\n\n        if obj_metrics:\n            for obj_name in obj_metrics.keys():\n                if pool_maps.get(obj_name):\n                    labels = {\n                        'storage_id': storage_id,\n                        'resource_type': constants.ResourceType.STORAGE_POOL,\n                        'resource_id': pool_maps.get(obj_name),\n                        'type': 'RAW',\n                        'unit': ''\n                    }\n                    metric_model_list = self._get_metric_model(metric_list,\n                                                               labels,\n                                                               obj_metrics.get(\n                                                                   obj_name),\n                                                               consts.POOL_CAP)\n                    if metric_model_list:\n                        metrics.extend(metric_model_list)\n        return metrics\n\n    def _get_metric_model(self, metric_list, labels, metric_values, obj_cap):\n        metric_model_list = []\n        for metric_name in (metric_list or []):\n            values = {}\n            obj_labels = copy.deepcopy(labels)\n            obj_labels['unit'] = obj_cap.get(metric_name).get('unit')\n            for metric_value in metric_values:\n                if metric_value.get(metric_name) is not None:\n                    collect_timestamp = self.convert_to_system_time(\n                        metric_value.get('collect_timestamp'))\n                    values[collect_timestamp] = metric_value.get(\n                        metric_name)\n            if values:\n                metric_model = constants.metric_struct(name=metric_name,\n                                                       labels=obj_labels,\n                                                       values=values)\n                metric_model_list.append(metric_model)\n        return metric_model_list\n\n    def get_port_metrics(self, storage_id, metric_list,\n                         start_time, end_time):\n        metrics = []\n        obj_metrics = self.ssh_format_metrics_data(\n            start_time, end_time, self.ssh_handler.get_port_metrics,\n            constants.ResourceType.PORT)\n        if obj_metrics:\n            for obj_id in obj_metrics.keys():\n                labels = {\n                    'storage_id': storage_id,\n                    'resource_type': constants.ResourceType.PORT,\n                    'resource_id': obj_id,\n                    'type': 'RAW',\n                    'unit': ''\n                }\n                metric_model_list = self._get_metric_model(metric_list,\n                                                           labels,\n                                                           obj_metrics.get(\n                                                               obj_id),\n                                                           consts.PORT_CAP)\n                if metric_model_list:\n                    metrics.extend(metric_model_list)\n        return metrics\n\n    def get_disk_metrics(self, storage_id, metric_list,\n                         start_time, end_time):\n        metrics = []\n        obj_metrics = self.ssh_format_metrics_data(\n            start_time, end_time, self.ssh_handler.get_disk_metrics,\n            constants.ResourceType.DISK)\n        if obj_metrics:\n            for obj_id in obj_metrics.keys():\n                labels = {\n                    'storage_id': storage_id,\n                    'resource_type': constants.ResourceType.DISK,\n                    'resource_id': obj_id,\n                    'type': 'RAW',\n                    'unit': ''\n                }\n                metric_model_list = self._get_metric_model(metric_list,\n                                                           labels,\n                                                           obj_metrics.get(\n                                                               obj_id),\n                                                           consts.DISK_CAP)\n                if metric_model_list:\n                    metrics.extend(metric_model_list)\n        return metrics\n\n    def get_volume_metrics(self, storage_id, metric_list,\n                           start_time, end_time):\n        metrics = []\n        obj_metrics = {}\n        try:\n            obj_metrics = self.ssh_format_metrics_data(\n                start_time, end_time, self.ssh_handler.get_volume_metrics,\n                constants.ResourceType.VOLUME)\n        except Exception as err:\n            err_msg = \"Failed to collect volume metrics: %s\" \\\n                      % (six.text_type(err))\n            LOG.warning(err_msg)\n        if obj_metrics:\n            for obj_id in obj_metrics.keys():\n                labels = {\n                    'storage_id': storage_id,\n                    'resource_type': constants.ResourceType.VOLUME,\n                    'resource_id': obj_id,\n                    'type': 'RAW',\n                    'unit': ''\n                }\n                metric_model_list = self._get_metric_model(metric_list,\n                                                           labels,\n                                                           obj_metrics.get(\n                                                               obj_id),\n                                                           consts.VOLUME_CAP)\n                if metric_model_list:\n                    metrics.extend(metric_model_list)\n        return metrics\n\n    def ssh_format_metrics_data(self, start_time, end_time, get_obj_metrics,\n                                obj_type):\n        collect_resuore_map = {}\n        obj_metrics = get_obj_metrics(start_time, end_time)\n        if obj_metrics:\n            metric_value = obj_metrics[0]\n            last_time = metric_value.get('collect_time', 0)\n            first_time = last_time\n            time_interval = consts.COLLECT_INTERVAL_HIRES\n            while (last_time - time_interval) > start_time:\n                next_obj_metrics = get_obj_metrics(\n                    start_time, (last_time - time_interval))\n                if next_obj_metrics:\n                    metric_value = next_obj_metrics[0]\n                    last_time = metric_value.get('collect_time', 0)\n                    if last_time > start_time:\n                        time_interval = first_time - last_time\n                        first_time = last_time\n                        obj_metrics.extend(next_obj_metrics)\n                    else:\n                        break\n                else:\n                    break\n\n        for obj_metric in (obj_metrics or []):\n            obj_id = ''\n            if obj_type == constants.ResourceType.DISK:\n                obj_id = obj_metric.get('pdid')\n            elif obj_type == constants.ResourceType.PORT:\n                obj_id = '%s:%s:%s' % (\n                    obj_metric.get('port_n'), obj_metric.get('port_s'),\n                    obj_metric.get('port_p'))\n            elif obj_type == constants.ResourceType.VOLUME:\n                obj_id = obj_metric.get('vvid')\n            if obj_id:\n                metric_list = []\n                if collect_resuore_map.get(obj_id):\n                    metric_list = collect_resuore_map.get(obj_id)\n                else:\n                    collect_resuore_map[obj_id] = metric_list\n                metric_map = {}\n                metric_map['iops'] = float(obj_metric.get('iotot'))\n                metric_map['readIops'] = float(obj_metric.get('iord'))\n                metric_map['writeIops'] = float(obj_metric.get('iowr'))\n                metric_map['throughput'] = round(\n                    float(obj_metric.get('kbytestot')) / units.k, 5)\n                metric_map['readThroughput'] = round(\n                    float(obj_metric.get('kbytesrd')) / units.k, 5)\n                metric_map['writeThroughput'] = round(\n                    float(obj_metric.get('kbyteswr')) / units.k, 5)\n                metric_map['responseTime'] = float(\n                    obj_metric.get('svcttot'))\n                metric_map['ioSize'] = float(obj_metric.get('iosztot'))\n                metric_map['readIoSize'] = float(obj_metric.get('ioszrd'))\n                metric_map['writeIoSize'] = float(obj_metric.get('ioszwr'))\n                metric_map['collect_timestamp'] = obj_metric.get(\n                    'collect_time')\n                metric_list.append(metric_map)\n        return collect_resuore_map\n\n    def rest_format_metrics_data(self, start_time, end_time, get_obj_metrics,\n                                 obj_type):\n        collect_resuore_map = {}\n        obj_metrics_list = []\n        obj_metrics = get_obj_metrics(start_time, end_time)\n        if obj_metrics:\n            last_time = obj_metrics.get('sampleTimeSec', 0) * units.k\n            first_time = last_time\n            time_interval = consts.COLLECT_INTERVAL_HIRES\n            metric_members = obj_metrics.get('members')\n            if metric_members:\n                for member in metric_members:\n                    member['collect_timestamp'] = last_time\n                obj_metrics_list.extend(metric_members)\n                while (last_time - time_interval) > start_time:\n                    next_obj_metrics = get_obj_metrics(\n                        start_time,\n                        (last_time - time_interval))\n                    metric_members = next_obj_metrics.get('members')\n                    if metric_members:\n                        last_time = next_obj_metrics.get(\n                            'sampleTimeSec', 0) * units.k\n                        if last_time > start_time:\n                            time_interval = first_time - last_time\n                            first_time = last_time\n                            for member in metric_members:\n                                member['collect_timestamp'] = last_time\n                            obj_metrics_list.extend(metric_members)\n                        else:\n                            break\n                    else:\n                        break\n        for obj_metric in (obj_metrics_list or []):\n            obj_id = ''\n            if obj_type == constants.ResourceType.STORAGE_POOL:\n                obj_id = obj_metric.get('name')\n            if obj_id:\n                metric_list = []\n                if collect_resuore_map.get(obj_id):\n                    metric_list = collect_resuore_map.get(obj_id)\n                else:\n                    collect_resuore_map[obj_id] = metric_list\n                metric_map = {}\n                metric_map['iops'] = obj_metric.get('IO').get('total')\n                metric_map['readIops'] = obj_metric.get('IO').get('read')\n                metric_map['writeIops'] = obj_metric.get('IO').get('write')\n                metric_map['throughput'] = round(\n                    obj_metric.get('KBytes').get('total') / units.k, 5)\n                metric_map['readThroughput'] = round(\n                    obj_metric.get('KBytes').get('read') / units.k, 5)\n                metric_map['writeThroughput'] = round(\n                    obj_metric.get('KBytes').get('write') / units.k, 5)\n                metric_map['responseTime'] = obj_metric.get(\n                    'serviceTimeMS').get('total')\n                metric_map['ioSize'] = obj_metric.get('IOSizeKB').get('total')\n                metric_map['readIoSize'] = obj_metric.get('IOSizeKB').get(\n                    'read')\n                metric_map['writeIoSize'] = obj_metric.get('IOSizeKB').get(\n                    'write')\n                metric_map['collect_timestamp'] = obj_metric.get(\n                    'collect_timestamp')\n                metric_list.append(metric_map)\n        return collect_resuore_map\n\n    def get_latest_perf_timestamp(self):\n        latest_time = 0\n        disks_metrics_datas = self.ssh_handler.get_disk_metrics(None, None)\n        for metrics_data in (disks_metrics_datas or []):\n            if metrics_data and metrics_data.get('collect_time'):\n                latest_time = metrics_data.get('collect_time')\n                break\n        return latest_time\n\n    def convert_to_system_time(self, occur_time):\n        dateArray = datetime.datetime.utcfromtimestamp(occur_time / units.k)\n        otherStyleTime = dateArray.strftime(\"%Y-%m-%d %H:%M:%SZ\")\n        timeArray = time.strptime(otherStyleTime, \"%Y-%m-%d %H:%M:%SZ\")\n        timeStamp = int(time.mktime(timeArray))\n        hour_offset = (time.mktime(time.localtime()) - time.mktime(\n            time.gmtime())) / consts.SECONDS_PER_HOUR\n        occur_time = timeStamp * units.k + (int(hour_offset) *\n                                            consts.SECONDS_PER_HOUR) * units.k\n        return occur_time\n\n    def list_storage_host_initiators(self, storage_id):\n        initiators = self.ssh_handler.list_storage_host_initiators()\n        initiators_list = []\n        wwn_set = set()\n        for initiator in (initiators or []):\n            if initiator:\n                wwn = initiator.get('wwn/iscsi_name', '').replace('-', '')\n                if wwn:\n                    if wwn in wwn_set:\n                        continue\n                    wwn_set.add(wwn)\n                    ip_addr = initiator.get('ip_addr')\n                    type = constants.InitiatorType.FC\n                    if ip_addr and ip_addr != 'n/a':\n                        type = constants.InitiatorType.ISCSI\n                    initiator_model = {\n                        \"name\": wwn,\n                        \"storage_id\": storage_id,\n                        \"native_storage_host_initiator_id\": wwn,\n                        \"wwn\": wwn,\n                        \"type\": type,\n                        \"status\": constants.InitiatorStatus.ONLINE,\n                        \"native_storage_host_id\": initiator.get('id',\n                                                                '').replace(\n                            '-', ''),\n                    }\n                    initiators_list.append(initiator_model)\n        return initiators_list\n\n    def list_storage_hosts(self, storage_id):\n        host_datas = self.rest_handler.list_storage_host()\n        host_list = []\n        if host_datas:\n            hosts = host_datas.get('members')\n            for host in (hosts or []):\n                if host and host.get('name'):\n                    descriptors = host.get('descriptors')\n                    comment = None\n                    os = ''\n                    ip_addr = None\n                    if descriptors:\n                        comment = descriptors.get('comment')\n                        os = descriptors.get('os', '')\n                        ip_addr = descriptors.get('IPAddr')\n                    host_model = {\n                        \"name\": host.get('name'),\n                        \"description\": comment,\n                        \"storage_id\": storage_id,\n                        \"native_storage_host_id\": host.get('id'),\n                        \"os_type\": consts.HOST_OS_MAP.get(\n                            os, constants.HostOSTypes.UNKNOWN),\n                        \"status\": constants.HostStatus.NORMAL,\n                        \"ip_address\": ip_addr\n                    }\n                    host_list.append(host_model)\n        return host_list\n\n    def list_storage_host_groups(self, storage_id):\n        host_groups = self.ssh_handler.list_storage_host_groups()\n        host_group_list = []\n        result = {}\n        if host_groups:\n            hosts_map = self.ssh_handler.get_resources_ids(\n                self.ssh_handler.HPE3PAR_COMMAND_SHOWHOST_D,\n                consts.HOST_OR_VV_PATTERN)\n            for host_group in host_groups:\n                host_members = host_group.get('members')\n                host_ids = []\n                if hosts_map:\n                    for host_name in (host_members or []):\n                        host_id = hosts_map.get(host_name)\n                        if host_id:\n                            host_ids.append(host_id)\n                host_group_model = {\n                    \"name\": host_group.get('name'),\n                    \"description\": host_group.get('comment'),\n                    \"storage_id\": storage_id,\n                    \"native_storage_host_group_id\": host_group.get('id'),\n                    \"storage_hosts\": ','.join(host_ids)\n                }\n                host_group_list.append(host_group_model)\n            storage_host_grp_relation_list = []\n            for storage_host_group in host_group_list:\n                storage_hosts = storage_host_group.pop('storage_hosts', None)\n                if not storage_hosts:\n                    continue\n                storage_hosts = storage_hosts.split(',')\n\n                for storage_host in storage_hosts:\n                    storage_host_group_relation = {\n                        'storage_id': storage_id,\n                        'native_storage_host_group_id': storage_host_group.get(\n                            'native_storage_host_group_id'),\n                        'native_storage_host_id': storage_host\n                    }\n                    storage_host_grp_relation_list \\\n                        .append(storage_host_group_relation)\n\n            result = {\n                'storage_host_groups': host_group_list,\n                'storage_host_grp_host_rels': storage_host_grp_relation_list\n            }\n        return result\n\n    def list_port_groups(self, storage_id):\n        views = self.ssh_handler.list_masking_views()\n        port_groups_list = []\n        port_list = []\n        for view in (views or []):\n            port = view.get('port', '').replace('-', '')\n            if port:\n                if port in port_list:\n                    continue\n                port_list.append(port)\n                port_group_model = {\n                    \"name\": \"port_group_\" + port,\n                    \"description\": \"port_group_\" + port,\n                    \"storage_id\": storage_id,\n                    \"native_port_group_id\": \"port_group_\" + port,\n                    \"ports\": port\n                }\n                port_groups_list.append(port_group_model)\n        port_group_relation_list = []\n        for port_group in port_groups_list:\n            ports = port_group.pop('ports', None)\n            if not ports:\n                continue\n            ports = ports.split(',')\n\n            for port in ports:\n                port_group_relation = {\n                    'storage_id': storage_id,\n                    'native_port_group_id':\n                        port_group.get('native_port_group_id'),\n                    'native_port_id': port\n                }\n                port_group_relation_list.append(port_group_relation)\n        result = {\n            'port_groups': port_groups_list,\n            'port_grp_port_rels': port_group_relation_list\n        }\n        return result\n\n    def list_volume_groups(self, storage_id):\n        volume_groups = self.ssh_handler.list_volume_groups()\n        volume_group_list = []\n        result = {}\n        if volume_groups:\n            volumes_map = self.ssh_handler.get_resources_ids(\n                self.ssh_handler.HPE3PAR_COMMAND_SHOWVV,\n                consts.HOST_OR_VV_PATTERN)\n            for volume_group in volume_groups:\n                volume_members = volume_group.get('members')\n                volume_ids = []\n                if volumes_map:\n                    for volume_name in (volume_members or []):\n                        volume_id = volumes_map.get(volume_name)\n                        if volume_id:\n                            volume_ids.append(volume_id)\n                volume_group_model = {\n                    \"name\": volume_group.get('name'),\n                    \"description\": volume_group.get('comment'),\n                    \"storage_id\": storage_id,\n                    \"native_volume_group_id\": volume_group.get('id'),\n                    \"volumes\": ','.join(volume_ids)\n                }\n                volume_group_list.append(volume_group_model)\n            volume_group_relation_list = []\n            for volume_group in volume_group_list:\n                volumes = volume_group.pop('volumes', None)\n                if not volumes:\n                    continue\n                volumes = volumes.split(',')\n\n                for volume in volumes:\n                    volume_group_relation = {\n                        'storage_id': storage_id,\n                        'native_volume_group_id':\n                            volume_group.get('native_volume_group_id'),\n                        'native_volume_id': volume}\n                    volume_group_relation_list.append(volume_group_relation)\n\n            result = {\n                'volume_groups': volume_group_list,\n                'vol_grp_vol_rels': volume_group_relation_list\n            }\n        return result\n\n    def list_masking_views(self, storage_id):\n        views = self.ssh_handler.list_masking_views()\n        views_list = []\n        if views:\n            hosts_map = self.ssh_handler.get_resources_ids(\n                self.ssh_handler.HPE3PAR_COMMAND_SHOWHOST_D,\n                consts.HOST_OR_VV_PATTERN)\n            hosts_group_map = self.ssh_handler.get_resources_ids(\n                self.ssh_handler.HPE3PAR_COMMAND_SHOWHOSTSET_D,\n                consts.HOST_OR_VV_PATTERN)\n            volumes_map = self.ssh_handler.get_resources_ids(\n                self.ssh_handler.HPE3PAR_COMMAND_SHOWVV,\n                consts.HOST_OR_VV_PATTERN)\n            volumes_group_map = self.ssh_handler.get_resources_ids(\n                self.ssh_handler.HPE3PAR_COMMAND_SHOWVVSET_D,\n                consts.HOST_OR_VV_PATTERN)\n            host_vv_set = set()\n            for view in views:\n                vv_name = view.get('vvname')\n                host_name = view.get('hostname')\n                if vv_name and host_name:\n                    host_vv_key = '%s_%s' % (host_name, vv_name)\n                    host_vv_key = host_vv_key.replace(' ', '')\n                    if host_vv_key in host_vv_set:\n                        continue\n                    host_vv_set.add(host_vv_key)\n                    port = view.get('port', '').replace('-', '')\n                    lun_id = view.get('lun')\n                    wwn = view.get('host_wwn/iscsi_name', '').replace('-', '')\n                    native_port_group_id = None\n                    if port:\n                        lun_id = '%s_%s' % (lun_id, port)\n                        native_port_group_id = 'port_group_%s' % port\n                    if wwn:\n                        lun_id = '%s_%s' % (lun_id, wwn)\n                    lun_id = '%s_%s' % (lun_id, host_vv_key)\n                    view_model = {\n                        'native_masking_view_id': lun_id,\n                        \"name\": view.get('lun'),\n                        'native_port_group_id': native_port_group_id,\n                        \"storage_id\": storage_id\n                    }\n                    if 'set:' in vv_name:\n                        vv_set_id = volumes_group_map.get(\n                            vv_name.replace('set:', ''))\n                        view_model['native_volume_group_id'] = vv_set_id\n                    else:\n                        vv_id = volumes_map.get(vv_name)\n                        view_model['native_volume_id'] = vv_id\n                    if 'set:' in host_name:\n                        host_set_id = hosts_group_map.get(\n                            host_name.replace('set:', ''))\n                        view_model[\n                            'native_storage_host_group_id'] = host_set_id\n                    else:\n                        host_id = hosts_map.get(host_name)\n                        view_model['native_storage_host_id'] = host_id\n                    if (view_model.get('native_storage_host_id')\n                        or view_model.get('native_storage_host_group_id')) \\\n                            and (view_model.get('native_volume_id')\n                                 or view_model.get('native_volume_group_id')):\n                        views_list.append(view_model)\n        return views_list\n"
  },
  {
    "path": "delfin/drivers/hpe/hpe_3par/consts.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2016 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n# CPG's status\nfrom delfin.common import constants\n\nSTATUS_POOL_NORMAL = 1  # CPG STATUS Normal operation\nSTATUS_POOL_DEGRADED = 2  # CPG STATUS Degraded state\nSTATUS_POOL_FAILED = 3  # CPG STATUS Abnormal operation\nSTATUS_POOL_UNKNOWN = 99  # CPG STATUS Unknown state\n# VOLUME's status\nSTATUS_VOLUME_NORMAL = 1  # VOLUME STATUS Normal operation\nSTATUS_VOLUME_DEGRADED = 2  # VOLUME STATUS Degraded state\nSTATUS_VOLUME_FAILED = 3  # VOLUME STATUS Abnormal operation\nSTATUS_VOLUME_UNKNOWN = 99  # VOLUME STATUS Unknown state\n# VOLUME's type\nTHIN_LUNTYPE = 2  # TPVV 2\t• TPVV,\n# VOLUME's Compression status\nSTATUS_COMPRESSION_YES = 1  # Compression is enabled on the volume\n# VOLUME's deduplication status\nSTATUS_DEDUPLICATIONSTATE_YES = 1  # Enables deduplication on the volume\n# Page size per page at default paging\nQUERY_PAGE_SIZE = 150\n# Connection timeout\nLOGIN_SOCKET_TIMEOUT = 10\nSOCKET_TIMEOUT = 10\n# 403  The client request has an invalid session key.\n# The request came from a different IP address\nERROR_SESSION_INVALID_CODE = 403\n# 409  Session key is being used.\nERROR_SESSION_IS_BEING_USED_CODE = 409\n# http SUCCESS's status\nSUCCESS_STATUS_CODES = 200\n# session SUCCESS's status\nLOGIN_SUCCESS_STATUS_CODES = 201\n\nSERVICE_UNAVAILABLE_CODES = 503\nBAD_REQUEST_CODES = 400\nNOT_IMPLEMENTED_CODES = 501\n\n# alert state enumeration\nALERT_STATE_NEW = 1  # New.\nALERT_STATE_ACKED = 2  # Acknowledged state.\nALERT_STATE_FIXED = 3  # Alert issue fixed.\nALERT_STATE_UNKNOWN = 99  # Unknown state\n\n# alert severity enumeration\nALERT_SEVERITY_CRITICAL = 2\nALERT_SEVERITY_MAJOR = 3\nALERT_SEVERITY_MINOR = 4\nALERT_SEVERITY_DEGRADED = 5\n\n# alert code\nHPE3PAR_ALERT_CODE = {\n    '0x0000000': 'Node CPU Thermal Status',\n    '0x0010001': 'Serial link event',\n    '0x0010002': 'Serial link fail FIFO full',\n    '0x0010003': 'Serial link fail full loss',\n    '0x0010004': 'Serial link fail rate loss',\n    '0x0020001': 'Active VLUN Limit Exceeded',\n    '0x0020002': 'System Reporter VLUN performance (major alert)',\n    '0x0020003': 'System Reporter VLUN performance (critical alert)',\n    '0x0020004': 'System Reporter VLUN performance (minor alert)',\n    '0x0020005': 'System Reporter VLUN performance (info alert)',\n    '0x0030001': 'Firmware coredump event',\n    '0x0030002': 'Too many WWNs on an RCFC port',\n    '0x0030003': 'Host [[sw_port]] experienced over 50 CRC '\n                 'errors (<count>) in 24 hours',\n    '0x0030005': 'FC Port Error',\n    '0x0030006': 'FC Port Loop Connection Type Not Supported',\n    '0x0030007': 'RCFC port sees non-3PAR WWNs',\n    '0x0030009': 'Excessive retransmits on RCFC port',\n    '0x0030010': 'Port Device Count Exceeded',\n    '0x0030011': 'CRC error on RCIP port',\n    '0x0030012': 'Unsupported SATA Drive',\n    '0x0030013': 'Unsupported SAS Device',\n    '0x0030014': 'Multiple SAS Initiators',\n    '0x0030015': 'System Reporter port performance (major alert)',\n    '0x0030016': 'Disk Port has exceeded IO error threshold',\n    '0x0030017': 'System Reporter port performance (critical alert)',\n    '0x0030018': 'System Reporter port performance (minor alert)',\n    '0x0030019': 'System Reporter port performance (info alert)',\n    '0x00300de': 'Component state change',\n    '0x00300fa': 'Component state change',\n    '0x0040001': 'Metadata inconsistency in a VV',\n    '0x0040003': 'Admin Volume I/O timeout',\n    '0x0040004': 'VV availability',\n    '0x0040005': 'Pinned DCOWs',\n    '0x0040006': 'Aborted DCOWs',\n    '0x0040007': 'Recovery scan found corrupt log',\n    '0x0040008': 'vlmap count exceeds threshold',\n    '0x0040009': 'FlashCache performance degradation',\n    '0x004000a': 'VV unrecovered DIF error',\n    '0x004000b': 'Metadata inconsistency in a Deduplication Group',\n    '0x004000c': 'VV unrecovered DIF error',\n    '0x004000d': 'System Reporter VV space major alert',\n    '0x004000e': 'System Reporter VV space critical alert',\n    '0x004000f': 'System Reporter VV space minor alert',\n    '0x0040010': 'System Reporter VV space info alert',\n    '0x0040011': 'Flash Cache Creation Failure',\n    '0x0040012': 'SD Metadata inconsistency in a VV',\n    '0x0040013': 'Compression is not enabled for Volumes less than 16GB',\n    '0x0040014': 'System VV detected',\n    '0x00400de': 'Component state change',\n    '0x00400fa': 'Component state change',\n    '0x0050002': 'Ldsk has failed set',\n    '0x0050003': 'LD check summary message',\n    '0x0050004': 'LD availability has reduced',\n    '0x0050005': 'Log LD raid set failure.',\n    '0x0050006': 'System Reporter LD performance (major alert)',\n    '0x0050007': 'LD check inconsistent',\n    '0x0050008': 'LD check failed LD not consistent',\n    '0x0050009': 'LD check consistent',\n    '0x005000a': 'LD check changed logical disk',\n    '0x005000b': 'System Reporter LD performance (critical alert)',\n    '0x005000c': 'System Reporter LD performance (minor alert)',\n    '0x005000d': 'System Reporter LD performance (info alert)',\n    '0x005000f': 'System Reporter LD space critical alert',\n    '0x0050010': 'System Reporter LD space minor alert',\n    '0x0050011': 'System Reporter LD space info alert',\n    '0x0050012': 'System Reporter LD space major alert',\n    '0x0060001': 'Disk fail alert',\n    '0x0060002': 'Disk monitor stopped',\n    '0x0060003': 'Invalid PD configuration',\n    '0x0060007': '42 Alerts',\n    '0x0060008': 'Disk overtemp warning',\n    '0x0060009': 'Disk overtemp alert',\n    '0x006000a': 'Chunklet relocation failure',\n    '0x006000b': 'System Reporter PD performance (major alert)',\n    '0x006000c': 'System overtemp',\n    '0x006000d': 'Disk overtemp warning',\n    '0x006000e': 'Disk overtemp alert',\n    '0x0060011': 'Disk overtemp but not spundown',\n    '0x0060012': 'Disk overtemp and spundown',\n    '0x0060013': 'Disk overtemp but not spundown no DSK',\n    '0x0060014': 'Disk overtemp and spundown no DSK',\n    '0x0060015': 'System Reporter PD space major alert',\n    '0x0060016': 'System Reporter PD space critical alert',\n    '0x0060017': 'System Reporter PD space minor alert',\n    '0x0060018': 'System Reporter PD space info alert',\n    '0x0060019': 'System Reporter PD performance critical alert',\n    '0x006001a': 'System Reporter PD performance minor alert',\n    '0x006001b': 'System Reporter PD performance info alert',\n    '0x00600de': 'Component state change',\n    '0x00600fa': 'Component state change',\n    '0x0070001': 'No free chunklet found for relocation',\n    '0x0070002': 'No spare chunklet found for relocation',\n    '0x0080001': 'Could not process SCSI DB',\n    '0x0090001': 'Host Path Status Change',\n    '0x00900de': 'Component state change',\n    '0x00a0005': 'Snap Admin Volume low on space, degraded',\n    '0x00a0006': 'Snap Data Volume low on space, degraded',\n    '0x00a0007': 'Second snap Data Volume low on space, degraded',\n    '0x00b0001': 'Kernel crashdump event',\n    '0x00b0002': 'Kernel crashdump with error',\n    '0x00c0001': 'Process has exited',\n    '0x00c0002': 'Process cannot be started',\n    '0x00c0003': 'Process coredump event',\n    '0x00c0004': 'Attempt to run grub failed',\n    '0x00c0005': 'Attempt to run grub failed, PM not starting',\n    '0x00c0006': 'Attempt to run grub failed, retval',\n    '0x00c0007': 'Process coredump with error',\n    '0x00d0001': 'Corrupt PR table found',\n    '0x00d0002': 'PR transition',\n    '0x00d0003': 'PR transition, degraded.',\n    '0x00e0001': 'Double node failure',\n    '0x00e0002': 'System manager cannot startup',\n    '0x00e0003': 'Node recovery powerfail event',\n    '0x00e0004': '<success> use of golden license',\n    '0x00e0005': 'License key usage, license expired',\n    '0x00e0006': 'System recovery notification about bad volume',\n    '0x00e0007': 'Pfail partition needs to be wiped',\n    '0x00e0008': 'Power fail saved version mismatch',\n    '0x00e0009': 'Failed to save task data',\n    '0x00e000a': 'Task failed',\n    '0x00e000b': 'Pfail recovery continued with failed previous NM1 recovery',\n    '0x00e000d': 'System recovery stalled due to unknown replicant state',\n    '0x00e000e': 'System recovery stalled due to sole owner of ld missing',\n    '0x00e0011': '\"servicemag start\" operation has completed',\n    '0x00e0012': '\"servicemag resume\" operation has completed',\n    '0x00e0014': 'Battery States',\n    '0x00e0015': 'Node not integrated',\n    '0x00e0016': 'System recovery stalled due to unstarted vvs',\n    '0x00e0017': 'TOC corruption detected',\n    '0x00e0018': 'Pfail Recovery with a missing VV',\n    '0x00e0019': 'Pfail Recovery with VV in bad state',\n    '0x00e001a': 'Pfail Recovery skipped due to multiple NM1 nodes',\n    '0x00e001b': 'NM1 pfail recovery proceeding with missing replicant',\n    '0x00e001c': 'Configuration lock hold time',\n    '0x00e001d': 'Inconsistent TOC object removed',\n    '0x00e001e': 'Invalid VVMEMB(s) resolved',\n    '0x00e001f': '\"servicemag resume\" operation has passed '\n                 'with dismissed disks',\n    '0x00e0020': '\"servicemag resume\" operation has passed '\n                 'without dismissing any disks',\n    '0x00e0021': '\"servicemag resume\" operation has failed '\n                 'with no error message',\n    '0x00e0022': '\"servicemag resume\" operation has failed to admit disk',\n    '0x00e0023': '\"servicemag resume\" operation has failed '\n                 'unrecoverable disk',\n    '0x00e0024': '\"servicemag resume\" operation has failed to '\n                 'relocate_chunklets',\n    '0x00e0025': 'System manager cannot start up, TOC not found',\n    '0x00e0026': 'System manager cannot start up, waiting on nodes',\n    '0x00e0027': 'System manager cannot start up, manual start up set',\n    '0x00e0028': 'System manager cannot start up, TOC quorum not met',\n    '0x00e0029': 'System manager cannot start up, waiting for '\n                 'nodes to recover',\n    '0x00e002a': 'Pfail partition needs to be wiped',\n    '0x00e002b': 'Pfail partition needs to be wiped',\n    '0x00e002c': 'System manager cannot start up, incomplete powerfail',\n    '0x00e002d': 'System manager cannot start up, TOC quorum found, '\n                 'incomplete powerfail',\n    '0x00e002e': 'System manager cannot start up, TOC quorum found, '\n                 'waiting for nodes to recover',\n    '0x00e002f': 'System manager cannot start up, waiting for nodes '\n                 'to recover',\n    '0x00e0030': 'Unexpected encryption state on node drive',\n    '0x00e0031': '\"servicemag start\" failed',\n    '0x00e0032': 'Single node WBC is active',\n    '0x00e0033': 'Single node WBC is expired',\n    '0x0100001': 'Online upgrade',\n    '0x0100002': 'Unresponsive IOCTL',\n    '0x0100003': 'Update available',\n    '0x0100004': 'Update status',\n    '0x0100005': 'Update install status',\n    '0x0100006': 'Unresponsive IOCTL Verbose',\n    '0x0110001': 'Errors accessing the IDE disk',\n    '0x0110002': 'IDE disk error handling',\n    '0x0110004': 'Version mismatch event',\n    '0x0110005': 'Serial comm init failed',\n    '0x0110006': 'IDE disk error node shutdown',\n    '0x0110007': 'IDE disk error node not shutdown',\n    '0x0110008': 'IDE disk error node not shutdown LDs cannot be served',\n    '0x0110009': 'IDE disk error node reboot',\n    '0x011000a': 'Version mismatch event for svcalert',\n    '0x011000b': 'Version mismatch event',\n    '0x011000c': 'Version mismatch event',\n    '0x0130001': 'Too many alerts in the system',\n    '0x0140001': 'Notification',\n    '0x0140003': 'fork(2) call failed',\n    '0x0140004': 'System Reporter QoS performance (major alert)',\n    '0x0140005': 'SFP Unqualified Notification',\n    '0x0140007': 'System upgrade cancelled',\n    '0x0140008': 'System upgrade Cancellation Failed',\n    '0x0140009': 'System serial number could not be determined',\n    '0x014000a': 'DC3 I2C Lockup Reset Succeeded',\n    '0x014000b': 'DC3 I2C Lockup Reset Failed',\n    '0x014000c': 'admitpd not allowed on Emulex generated wwn',\n    '0x014000d': 'admitpd not allowed on toto-sata generated wwn',\n    '0x014000e': 'RAID 0 LD failed due to stale chunklet',\n    '0x014000f': 'Mismatch of failed chunklet information',\n    '0x0140010': 'System Reporter QoS performance (critical alert)',\n    '0x0140011': 'System Reporter QoS performance (minor alert)',\n    '0x0140012': 'System Reporter QoS performance (info alert)',\n    '0x0150004': 'CLI server cannot communicate with system manager',\n    '0x0150005': 'CLI internal error using authentication library',\n    '0x0150006': 'Authentication failure',\n    '0x0150007': 'CLI internal error',\n    '0x015000c': 'CPG free space limit',\n    '0x015000d': 'CLI client process event',\n    '0x015000f': 'Relocatepd request',\n    '0x0150010': 'Control Recovery Auth Ciphertext Export',\n    '0x0150011': 'CLI server process event, max tpdtcl exceeded',\n    '0x0150012': 'CLI server process event, twice max tpdtcl exceeded',\n    '0x0150013': 'CLI server process event, max CLI server exceeded',\n    '0x0150014': 'CLI server process event, max local exceeded',\n    '0x0150015': 'CLI server process event, max server exceeded brief',\n    '0x0150016': 'CLI server process event, max server exceeded local',\n    '0x0150017': 'CLI server process event, error in track',\n    '0x0150018': 'CLI server process event, error in store user name',\n    '0x0150019': 'CLI server process event, svcalert brief',\n    '0x015001a': 'CLI server process event, svcalert',\n    '0x015001b': 'CLI internal error Failed sanity check',\n    '0x015001c': 'CLI internal error sqlite database',\n    '0x015001d': 'CLI internal error SQLite DB',\n    '0x015001f': 'CLI client process event disk high temp',\n    '0x0150020': 'Unable to send an event to the security syslog server.',\n    '0x0150021': 'Connection has been reestablished to the '\n                 'security syslog server.',\n    '0x0150022': 'Slow Disk temperature unavailable',\n    '0x0170001': 'TOC update',\n    '0x0170004': 'TOC update, not above error threshold and decreased.',\n    '0x0170005': 'TOC update, not above warn threshold and decreased.',\n    '0x0190001': 'ea msg timeout',\n    '0x0190002': 'Pre Integration Link Test Error',\n    '0x01a0001': 'CPU Memory Correctable ECC',\n    '0x01a0002': 'Node is offline',\n    '0x01a0003': 'Node Time of Day Battery',\n    '0x01a0005': 'HW: CPU Memory Correctable ECC',\n    '0x01a0006': 'CPU Configuration',\n    '0x01a0007': 'BIOS IDE log entry',\n    '0x01a0008': 'Node Environmental Check Pass',\n    '0x01a0009': 'IDE file integrity check results',\n    '0x01a000b': 'Eagle memory uerr',\n    '0x01a000c': 'Eagle memory muerr',\n    '0x01a000d': 'Eagle memory cerr',\n    '0x01a000e': 'Eagle internal system error',\n    '0x01a000f': 'Eagle hardware watchdog error',\n    '0x01a0010': 'Eagle PCI error',\n    '0x01a0011': 'Eagle driver software error',\n    '0x01a0012': 'Memory usage information',\n    '0x01a0014': 'Too many TCP segment retransmits',\n    '0x01a0015': 'Node PCIe Correctable Error Status',\n    '0x01a0016': 'Node PCIe Link Status',\n    '0x01a0017': 'Too many TCP segment errors',\n    '0x01a0019': 'Cluster thermal shutdown',\n    '0x01a001a': 'Link Configuration Mismatch',\n    '0x01a001b': 'Unexpected Cable Event',\n    '0x01a001c': 'Link establish alert',\n    '0x01a001d': 'Core File Received From Remote/Local MCU',\n    '0x01a001f': 'Node Needs to Shutdown',\n    '0x01a0021': 'Node Rescue',\n    '0x01a0022': 'Node-Failure-Analysis File Received From Remote/Local MCU',\n    '0x01a0024': 'Slab usage information',\n    '0x01a0025': 'System Reporter cmp performance (major alert)',\n    '0x01a0026': 'System Reporter CPU performance (major alert)',\n    '0x01a0027': 'System Reporter link performance (major alert)',\n    '0x01a0028': 'Node ID Mismatch',\n    '0x01a0029': 'Remote Node ID Mismatch',\n    '0x01a002a': 'System Model Mismatch',\n    '0x01a002b': 'Remote System Model Mismatch',\n    '0x01a002c': 'Node Type Mismatch',\n    '0x01a002d': 'Remote Node Type Mismatch',\n    '0x01a002e': 'SSN Mismatch',\n    '0x01a002f': 'Remote SSN Mismatch',\n    '0x01a0031': 'Node Rescue User Abort',\n    '0x01a0032': 'Node Rescue Invalid',\n    '0x01a0033': 'Node Rescue Internal Communication Error',\n    '0x01a0034': 'Node Rescue No Rejoin',\n    '0x01a0035': 'Node Rescue Port 80 Blocked',\n    '0x01a0036': 'Node Rescue Port 69 Blocked',\n    '0x01a0037': 'Node Rescue Port 873 Blocked',\n    '0x01a0038': 'Node Rescue No Backplane Connection',\n    '0x01a0039': 'CMP Threshold',\n    '0x01a003a': 'DIF error',\n    '0x01a003b': 'IDE file integrity check bad run',\n    '0x01a003c': 'IDE file integrity check bad',\n    '0x01a003d': 'IDE file integrity check very bad',\n    '0x01a003e': 'System Reporter cache performance alert',\n    '0x01a003f': 'Legacy System Model Mismatch',\n    '0x01a0040': 'Remote System Model Mismatch',\n    '0x01a0041': 'Node Rescue Detected Dual Boot Node Drive Size Mismatch',\n    '0x01a0042': 'Node Environmental Check Fail',\n    '0x01a0043': 'Node Thermal Status svc alert',\n    '0x01a0044': 'Node Needs to Shutdown svc alert',\n    '0x01a0045': 'Node Thermal Status Alert',\n    '0x01a0046': 'Node Thermal Status Warning',\n    '0x01a0047': 'System Reporter cmp performance (critical alert)',\n    '0x01a0048': 'System Reporter cmp performance (minor alert)',\n    '0x01a0049': 'System Reporter cmp performance (info alert)',\n    '0x01a004a': 'System Reporter CPU performance (critical alert)',\n    '0x01a004b': 'System Reporter CPU performance (minor alert)',\n    '0x01a004c': 'System Reporter CPU performance (info alert)',\n    '0x01a004d': 'System Reporter link performance (critical alert)',\n    '0x01a004e': 'System Reporter link performance (minor alert)',\n    '0x01a004f': 'System Reporter link performance (info alert)',\n    '0x01a0050': 'System Reporter cache performance (critical alert)',\n    '0x01a0051': 'System Reporter cache performance (minor alert)',\n    '0x01a0052': 'System Reporter cache performance (info alert)',\n    '0x01a0053': 'Eagle link error',\n    '0x01a0054': 'System Series Mismatch',\n    '0x01a0055': 'Remote System Series Mismatch',\n    '0x01a0056': 'Node temporary filesystem in use',\n    '0x01a0057': 'Node rescue detected that rescuee node has an '\n                 'incompatible board series',\n    '0x01a00de': 'Component state change',\n    '0x01a00fa': 'Component state change',\n    '0x01b0001': 'Power Supply',\n    '0x01b0002': 'Power Supply DC Status',\n    '0x01b0003': 'Power Supply AC Status',\n    '0x01b0004': 'Power Supply Fan Status',\n    '0x01b0005': 'Power Supply Charger Status',\n    '0x01b0009': 'Power Supply Type Mismatch',\n    '0x01b0015': 'VSC 055 Interrupt Error',\n    '0x01b00de': 'Component state change',\n    '0x01b00fa': 'Component state change',\n    '0x01d0001': 'Bios eeprom log events',\n    '0x01e0001': 'Cage log event',\n    '0x01e0005': 'Cage coredump event',\n    '0x01e0006': 'servicemag failed to dismiss PD: '\n                 'cage <cageid>, mag <magid>, '\n                 'taskid <taskid>, pd <pdid>: error<smag_err> - <text>',\n    '0x01e0007': 'Critical ESI port count, down to one',\n    '0x01e0008': 'Critical ESI port count, one valid',\n    '0x01e0009': 'Critical ESI port count, lost',\n    '0x01e000a': 'Invalid cage isolated configuration',\n    '0x01e000b': 'Invalid cage isolated configuration',\n    '0x01e000c': 'Invalid cage mixed configuration',\n    '0x01e000d': 'Invalid cage unknown configuration',\n    '0x01e000e': 'Invalid cage partners configuration',\n    '0x01e000f': 'Invalid cage maxcage configuration',\n    '0x01e0010': 'Invalid cage twice configuration',\n    '0x01e0011': 'Unknown cage configuration',\n    '0x01e0012': 'Cage coredump event - detailed - 0',\n    '0x01e0013': 'Cage coredump event - detailed - 1',\n    '0x01e0014': 'Cage coredump event - detailed - 2',\n    '0x01e0015': 'Cage coredump event - detailed - 3',\n    '0x01e0016': 'Cage coredump event - very detailed - 0',\n    '0x01e0017': 'Cage coredump event - very detailed - 1',\n    '0x01e0018': 'Cage log event, firmware panic',\n    '0x01e0019': 'Cage log event, midplane esi',\n    '0x01e001a': 'Cage log event, midplane',\n    '0x01e001b': 'Cage log event, post',\n    '0x01e001c': 'Cage log event, midplane lm87',\n    '0x01e001d': 'Cage log event, midplane pmc',\n    '0x01e00de': 'Component state change',\n    '0x01e00fa': 'Component state change',\n    '0x01f0001': 'Mixing SSDs with different RPMs not supported',\n    '0x01f00de': 'Component state change',\n    '0x01f00fa': 'Component state change',\n    '0x0200006': 'GUI server can not communicate with the system manager',\n    '0x0200009': 'Internal error in authentication library',\n    '0x0210001': 'InForm GUI has lost connection to the event filter',\n    '0x0220001': 'Battery expiring soon',\n    '0x0220010': 'Assert Battery FAIL',\n    '0x0220014': 'Battery Type Mismatch',\n    '0x0220017': 'Battery expiration soon',\n    '0x02200de': 'Component state change',\n    '0x02200fa': 'Component state change',\n    '0x0230003': 'Port shutdown on fatal error',\n    '0x0230004': 'Host port is down',\n    '0x0230005': 'All ports in the same FC card must be configured for RCFC',\n    '0x0230006': 'HBA fw file status',\n    '0x0230007': 'HBA FW error opening file',\n    '0x0230008': 'HBA FW error reading file',\n    '0x0230009': 'HBA FW unsupported file',\n    '0x0240002': 'Internodal Serial Port Receiver Timeout Error',\n    '0x0240003': 'Internodal Serial Port Default Error',\n    '0x0250002': 'Remote Copy link status',\n    '0x0250007': 'System Reporter RC Target performance (major alert)',\n    '0x0250008': 'System Reporter RC VV performance (major alert)',\n    '0x0250009': 'Remote Copy group in failsafe state',\n    '0x025000a': 'Replication resource usage exceeded - Group \"Logging\".',\n    '0x025000b': 'Replication resource usage exceeded - Group \"Stopped\".',\n    '0x025000c': 'Replication resources restored - Group transition '\n                 'from Logging failure',\n    '0x025000d': 'System Reporter RC VV performance (critical alert)',\n    '0x025000e': 'System Reporter RC VV performance (minor alert)',\n    '0x025000f': 'System Reporter RC VV performance (info alert)',\n    '0x0250011': 'System Reporter RC Target performance (critical alert)',\n    '0x0250012': 'System Reporter RC Target performance (minor alert)',\n    '0x0250013': 'System Reporter RC Target performance (info alert)',\n    '0x0250014': 'Remote Copy group status alert',\n    '0x0250015': 'Remote Copy group status fail',\n    '0x0250016': 'Quorum is not in Started state',\n    '0x0260001': 'Ethernet Monitor Event',\n    '0x0260002': 'No admin network interface discovered',\n    '0x0270001': 'TP VV allocation size warning',\n    '0x0270002': 'TP VV allocation size limit',\n    '0x0270003': 'Snapshot space allocation size warning',\n    '0x0270004': 'Snapshot space allocation size limit',\n    '0x0270005': 'CPG growth warning',\n    '0x0270006': 'CPG growth limit',\n    '0x0270007': 'TP VV allocation failure',\n    '0x0270008': 'Snapshot space allocation failure',\n    '0x0270009': 'CPG growth failure',\n    '0x027000e': 'FC raw space allocation 50% alert',\n    '0x027000f': 'FC raw space allocation 75% alert',\n    '0x0270010': 'FC raw space allocation 85% alert',\n    '0x0270011': 'FC raw space allocation 95% alert',\n    '0x0270012': 'CPG space used status',\n    '0x0270013': 'Raw space allocation user configured alert',\n    '0x0270014': 'NL raw space allocation 50% alert',\n    '0x0270015': 'NL raw space allocation 75% alert',\n    '0x0270016': 'NL raw space allocation 85% alert',\n    '0x0270017': 'NL raw space allocation 95% alert',\n    '0x0270018': 'CPG was grown with degraded parameters',\n    '0x0270019': 'SSD raw space allocation 50% alert',\n    '0x027001a': 'SSD raw space allocation 75% alert',\n    '0x027001b': 'SSD raw space allocation 85% alert',\n    '0x027001c': 'SSD raw space allocation 95% alert',\n    '0x027001d': 'CPG growth failure non-admin',\n    '0x027001e': 'CPG growth non admin limit',\n    '0x027001f': 'CPG growth non admin warning',\n    '0x0270020': 'Overprovisioning CPG warning alert',\n    '0x0270021': 'Overprovisioning CPG limit alert',\n    '0x0270022': 'Overprovisioning warning alert',\n    '0x0270023': 'Overprovisioning limit alert',\n    '0x0270024': 'System Reporter CPG space critical alert',\n    '0x0270025': 'System Reporter CPG space minor alert',\n    '0x0270026': 'System Reporter CPG space info alert',\n    '0x0270027': 'System Reporter CPG space major alert',\n    '0x0280001': 'Preserved data LDs configuration',\n    '0x0280002': 'Preserved data LDs unavailable',\n    '0x0280003': 'Preserved data LDs are filling up',\n    '0x0280004': 'Preserved data LDs are full',\n    '0x0280005': 'LD availability',\n    '0x0280006': 'Preserved data LDs status, mangler class',\n    '0x0280007': 'Preserved data LDs configuration, Not configured',\n    '0x0280008': 'Preserved data LDs configuration, Not started',\n    '0x02900de': 'Component state change',\n    '0x02a00de': 'Component state change',\n    '0x02a00fa': 'Component state change',\n    '0x02b00de': 'Component state change',\n    '0x02b00fa': 'Component state change',\n    '0x02d00de': 'Component state change',\n    '0x02d00fa': 'Component state change',\n    '0x03500de': 'Component state change',\n    '0x03500fa': 'Component state change',\n    '0x0360002': 'Write Cache Availability',\n    '0x0360003': 'System Reporter system space critical alert',\n    '0x0360004': 'System Reporter system space major alert',\n    '0x0360005': 'System Reporter system space info alert',\n    '0x0360006': 'System Reporter system space minor alert',\n    '0x03700de': 'Component state change',\n    '0x03700fa': 'Component state change',\n    '0x03800de': 'Component state change',\n    '0x03900fa': 'Component state change',\n    '0x03a00de': 'Component state change',\n    '0x03a00fa': 'Component state change',\n    '0x03b0002': 'Free node disk space low',\n    '0x03b0004': 'Node drive is encrypted but encryption is '\n                 'not enabled on the system',\n    '0x03b0005': 'Encryption is enabled on the system but the '\n                 'node drive is not encrypted',\n    '0x03b0006': 'Unable to do I/O to the node drive',\n    '0x03b0007': 'Free node disk space low, /common not mounted',\n    '0x03b0008': 'Free node disk space low, /altroot not mounted',\n    '0x03b0009': 'Free node disk space low, /common and /altroot not mounted',\n    '0x03b000a': 'Syslog Node Drive Failure Message Monitoring',\n    '0x03b000b': 'Periodic /proc/mdstat Monitoring '\n                 'Detected Degraded Node Drive Raid',\n    '0x03b000c': 'Lost interrupt',\n    '0x03b000d': 'IDE SMART failed self check',\n    '0x03b000e': 'IDE SMART unreadable sectors',\n    '0x03b000f': 'IDE SMART uncorrectable sectors',\n    '0x03b0010': 'IDE SMART failed unit ready',\n    '0x03b0011': 'IDE SMART failed usage attribute',\n    '0x03b0012': 'IDE SMART failure',\n    '0x03b0013': 'IDE SMART execute test failed',\n    '0x03b0014': 'IDE SMART new self test log error',\n    '0x03b0015': 'IDE SMART repeat self test log error',\n    '0x03b0016': 'IDE SMART ATA error increase',\n    '0x03b0017': 'IDE SMART attribute data read fail',\n    '0x03b0019': 'IDE SMART error log read fail',\n    '0x03b0020': 'DUAL IDE SMART failed self check',\n    '0x03b0021': 'DUAL IDE SMART unreadable sectors',\n    '0x03b0022': 'DUAL IDE SMART uncorrectable sectors',\n    '0x03b0023': 'DUAL IDE SMART failed unit ready',\n    '0x03b0024': 'DUAL IDE SMART failed usage attribute',\n    '0x03b0025': 'DUAL IDE SMART failure',\n    '0x03b0026': 'DUAL IDE SMART execute test failed',\n    '0x03b0027': 'DUAL IDE SMART new self test log error',\n    '0x03b0028': 'DUAL IDE SMART repeat self test log error',\n    '0x03b0029': 'DUAL IDE SMART ATA error increase',\n    '0x03b002a': 'DUAL IDE SMART attribute data read fail',\n    '0x03b002b': 'DUAL IDE SMART error log read fail',\n    '0x03f0001': 'Process appears unresponsive',\n    '0x03f0002': 'Process name appears unresponsive',\n    '0x03f0003': 'Process event handling appears unresponsive',\n    '0x0450001': 'Data Cache DIMM CECC Monitoring',\n    '0x0450002': 'Patrol Data Cache DIMM UERR',\n    '0x0460001': 'Control Cache DIMM Temperature',\n    '0x0460002': 'Control Cache DIMM Temperature',\n    '0x0460003': 'Node FB-DIMM AMB Correctable Error Status',\n    '0x04a0001': 'Slot PCIe Correctable Error Status',\n    '0x04a0002': 'Slot PCIe Link Status',\n    '0x04e0001': 'Rejecting SSH Connection',\n    '0x04e0002': 'Rejecting SSH Connection from IP',\n    '0x0500001': 'A system task failed',\n    '0x05d00de': 'Component state change',\n    '0x05d00fa': 'Component state change',\n    '0x0600005': 'WSAPI internal error using authentication library',\n    '0x06200fa': 'Component state change',\n    '0x0640001': 'PD Scrub',\n    '0x0660001': 'SED is from the wrong system',\n    '0x0660002': 'SED has the wrong key',\n    '0x0660003': 'SED is present, but encryption is not enabled',\n    '0x0660004': 'LKM is in an unknown state',\n    '0x0660005': 'MMAP failed to map the segment of the memory with keys',\n    '0x0660006': 'Nodesvr unresponsive during darsvr startup',\n    '0x0660007': 'Nodesvr unresponsive during fipsvr startup',\n    '0x0660008': 'fipsvr unable to start in FIPS mode',\n    '0x0660009': 'Failed to successfully communicate with EKM at startup',\n    '0x066000a': 'Controlencryption restore failed',\n    '0x066000b': 'Controlencryption restore ignore failed',\n    '0x066000c': 'Controlencryption restore ignore succeeded with failures',\n    '0x066000d': 'Encryption operation attempted on drive with WWN 0',\n    '0x066000e': 'Unsupported drive present in the system',\n    '0x06700de': 'Component state change',\n    '0x0680001': 'Quorum Witness',\n    '0x06e0001': 'File Services state change',\n    '0x0720001': 'File Provisioning Group',\n    '0x0740001': 'File Store',\n    '0x0750001': 'Virtual Server IP Address',\n    '0x0760001': 'Node Network Bond',\n    '0x0770001': 'Node Network Interface',\n    '0x0780001': 'Node IP Address',\n    '0x0790001': 'File Service Node Active Directory Configuration',\n    '0x07e0001': 'Anti-Virus VSE Server',\n    '0x0810001': 'Anti-Virus Scan',\n    '0x0820001': 'Virtual Server Certificate',\n    '0x0840001': 'HTTP Share',\n    '0x0850001': 'NFS Share',\n    '0x0860001': 'SMB Share',\n    '0x0870001': 'User Quota',\n    '0x08b0001': 'File Store Snapshot',\n    '0x08c0001': 'File Provisioning Group Snap Reclamation Task',\n    '0x08d0001': 'Overall File Services for Node',\n    '0x08e0001': 'File Services Software Update',\n    '0x08f0001': 'File Services Log Collection',\n    '0x0900001': 'File Service Virtual Server Backup',\n    '0x0960002': 'Vasa Provider migration failed due to VVol SC migration',\n    '0x0960003': 'Vasa Provider migration failed due '\n                 'to Certificate mode migration',\n    '0x0960004': 'Vasa Provider migration failed while updating config file',\n    '0x0960005': 'VASA provider could not start because of '\n                 'issues with the VASA Certificate',\n    '0x0990001': 'Static IP Route',\n    '0x09a0001': 'SMB Global Setting State change event',\n    '0x09b0001': 'Ddcscan Monitoring',\n    '0x09d0001': 'NVDIMM Battery Failure',\n    '0x09e0003': 'Management Module High Temperature',\n    '0x09e0004': 'Management Module not responding',\n    '0x09f0001': 'File Persona VM shutdown',\n    '0x09f0002': 'File Persona CPG grow limit warning',\n    '0x0a50001': 'File Access Auditing Alerts'\n}\nNODE_PATTERN = \"^\\\\s*Node\\\\s+[-]*Name[-]*\\\\s+[-]*State[-]*\\\\s+\"\nCPU_PATTERN = \"^\\\\s*Node\\\\s+CPU\"\n\nDISK_PATTERN = \"^\\\\s*Id\\\\s+[-]*CagePos[-]*\\\\s+[-]*Type[-]*\\\\s+RPM\\\\s+State\\\\s+\"\nDISK_I_PATTERN = \"^\\\\s*Id\\\\s+[-]*CagePos[-]*\\\\s+[-]*State[-]*\\\\s+\" \\\n                 \"[-]*Node_WWN[-]*\\\\s+[-]*MFR[-]*\\\\s+[-]*Model[-]*\\\\s+\" \\\n                 \"[-]*Serial[-]*\\\\s+[-]*FW_Rev[-]*\"\nPORT_PATTERN = \"^\\\\s*N:S:P\\\\s+[-]*Mode[-]*\\\\s+[-]*State[-]*\\\\s+[-]*\" \\\n               \"Node_WWN[-]*\\\\s+[-]*Port_WWN/HW_Addr[-]*\\\\s+\"\nPORT_I_PATTERN = \"^\\\\s*N:S:P\\\\s+Brand\\\\s+Model\\\\s+Rev\\\\s+Firmware\\\\s+\" \\\n                 \"Serial\\\\s+HWType\"\nPORT_PER_PATTERN = \"^\\\\s*N:S:P\\\\s+Connmode\\\\s+ConnType\\\\s+CfgRate\\\\s+MaxRate\"\nPORT_C_PATTERN = \"^\\\\s*N:S:P\\\\s+Mode\\\\s+Device\\\\s+Pos\\\\s+Config\\\\s+\" \\\n                 \"Topology\\\\s+Rate\"\nPORT_ISCSI_PATTERN = \"^\\\\s*N:S:P\\\\s+State\\\\s+IPAddr\\\\s+Netmask/PrefixLen\\\\s+\" \\\n                     \"Gateway\"\nPORT_RCIP_PATTERN = \"^\\\\s*N:S:P\\\\s+State\\\\s+[-]*HwAddr[-]*\\\\s+IPAddr\\\\s+\" \\\n                    \"Netmask\\\\s+Gateway\\\\s+MTU\\\\s+Rate\"\nPORT_FCOE_PATTERN = \"^\\\\s*N:S:P\\\\s+State\\\\s+\"\nPORT_FS_PATTERN = \"^\\\\s*N:S:P\\\\s+State\\\\s+\"\nFPG_PATTERN = \"^\\\\s*FPG\\\\s+[-]*Mountpath[-]*\\\\s+[-]*Size[-]*\\\\s+[-]*\" \\\n              \"Available[-]*\\\\s+[-]*ActiveStates\"\nCPG_PATTERN = \"^\\\\s*Id\\\\s+[-]*Name[-]*\\\\s+Warn\"\nVOLUME_PATTERN = \"^\\\\s*Id\\\\s+Name\\\\s+Prov\\\\s+Compr\\\\s+Dedup\"\nFSTORE_PATTERN = \"^\\\\s*Fstore\\\\s+VFS\\\\s+FPG\\\\s+State\\\\s+Mode\"\nFSHARE_PATTERN = \"^\\\\s*ShareName\\\\s+Protocol\\\\s+VFS\\\\s+FileStore\\\\s+\" \\\n                 \"ShareDir\\\\s+State\"\nVFS_PATTERN = \"^\\\\s*VFS\\\\s+FPG\\\\s+IPAddr\\\\s+State\"\n\nSRSTATPORT_PATTERN = \"^\\\\s*PORT_N\\\\s+PORT_S\\\\s+PORT_P\\\\s+Rd\\\\s+Wr\\\\s+\" \\\n                     \"Tot\\\\s+Rd\\\\s+Wr\\\\s+Tot\\\\s+Rd\\\\s+Wr\\\\s+Tot\"\nSRSTATPD_PATTERN = \"^\\\\s*PDID\\\\s+Rd\\\\s+Wr\\\\s+\" \\\n                   \"Tot\\\\s+Rd\\\\s+Wr\\\\s+Tot\\\\s+Rd\\\\s+Wr\\\\s+Tot\"\nSRSTATVV_PATTERN = \"^\\\\s*VVID\\\\s+VV_NAME\\\\s+Rd\\\\s+Wr\\\\s+\" \\\n                   \"Tot\\\\s+Rd\\\\s+Wr\\\\s+Tot\\\\s+Rd\\\\s+Wr\\\\s+Tot\"\n\nIPV4_PATTERN = \"^(?:[0-9]{1,3}\\\\.){3}[0-9]{1,3}$\"\n\nHOST_OR_VV_SET_PATTERN = \"^\\\\s*Id\\\\s+Name\\\\s+Members\\\\s+Comment\"\nHOST_OR_VV_PATTERN = \"^\\\\s*Id\\\\s+Name\\\\s+\"\nVLUN_PATTERN = \"^\\\\s*Lun\\\\s+VVName\\\\s+HostName\"\n\nCONTROLLER_STATUS_MAP = {\n    'OK': constants.ControllerStatus.NORMAL,\n    'NORMAL': constants.ControllerStatus.NORMAL,\n    'DEGRADED': constants.ControllerStatus.DEGRADED,\n    'FAILED': constants.ControllerStatus.FAULT\n}\nDISK_PHYSICAL_TYPE_MAP = {\n    'FC': constants.DiskPhysicalType.FC,\n    'SSD': constants.DiskPhysicalType.SSD,\n    'NL': constants.DiskPhysicalType.UNKNOWN\n}\nDISK_STATUS_MAP = {\n    'NORMAL': constants.DiskStatus.NORMAL,\n    'DEGRADED': constants.DiskStatus.DEGRADED,\n    'FAILED': constants.DiskStatus.ABNORMAL,\n    'NEW': constants.DiskStatus.ABNORMAL\n}\nPORT_CONNECTION_STATUS_MAP = {\n    'CONFIG_WAIT': constants.PortConnectionStatus.DISCONNECTED,\n    'ALPA_WAIT': constants.PortConnectionStatus.DISCONNECTED,\n    'LOGIN_WAIT': constants.PortConnectionStatus.DISCONNECTED,\n    'READY': constants.PortConnectionStatus.CONNECTED,\n    'LOSS_SYNC': constants.PortConnectionStatus.DISCONNECTED,\n    'ERROR_STATE': constants.PortConnectionStatus.DISCONNECTED,\n    'XXX': constants.PortConnectionStatus.DISCONNECTED,\n    'NONPARTICIPATE': constants.PortConnectionStatus.DISCONNECTED,\n    'COREDUMP': constants.PortConnectionStatus.DISCONNECTED,\n    'OFFLINE': constants.PortConnectionStatus.DISCONNECTED,\n    'FWDEAD': constants.PortConnectionStatus.DISCONNECTED,\n    'IDLE_FOR_RESET': constants.PortConnectionStatus.DISCONNECTED,\n    'DHCP_IN_PROGRESS': constants.PortConnectionStatus.DISCONNECTED,\n    'PENDING_RESET': constants.PortConnectionStatus.DISCONNECTED\n}\nPORT_TYPE_MAP = {\n    'FC': constants.PortType.FC,\n    'ISCSI': constants.PortType.ISCSI,\n    'ETH': constants.PortType.ETH,\n    'CNA': constants.PortType.CNA,\n    'SAS': constants.PortType.SAS,\n    'COMBO': constants.PortType.COMBO,\n    'NVMe': constants.PortType.OTHER,\n    'UNKNOWN': constants.PortType.OTHER,\n    'RCIP': constants.PortType.RCIP,\n    'RCFC': constants.PortType.OTHER\n}\nVERSION_PATTERN = \"^\\\\s*[-]*Service[-]*\\\\s+[-]*State[-]*\\\\s+\"\nSSH_NODE_MEM_TYPE = {\n    1: \"control\",\n    2: \"data\"\n}\nSSH_METRIC_TYPE = {\n    1: \"io\",\n    2: \"kbytes\",\n    3: \"svct\",\n    4: \"iosz\"\n}\nSSH_COLLECT_TIME_PATTERN = \"\\\\(\\\\d+\\\\)\"\nCOLLECT_INTERVAL_HIRES = 60000\nSIXTY_SECONDS = 60\nREST_COLLEC_TTIME_PATTERN = '%Y-%m-%dT%H:%M:%SZ'\nIOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Input/output operations per second\"\n}\nREAD_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Read input/output operations per second\"\n}\nWRITE_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Write input/output operations per second\"\n}\nTHROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data is \"\n                   \"successfully transferred in MB/s\"\n}\nREAD_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data read is \"\n                   \"successfully transferred in MB/s\"\n}\nWRITE_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data write is \"\n                   \"successfully transferred in MB/s\"\n}\nRESPONSE_TIME_DESCRIPTION = {\n    \"unit\": \"ms\",\n    \"description\": \"Average time taken for an IO \"\n                   \"operation in ms\"\n}\nCACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of io that are cache hits\"\n}\nREAD_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of read ops that are cache hits\"\n}\nWRITE_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of write ops that are cache hits\"\n}\nIO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of IO requests in KB\"\n}\nREAD_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of read IO requests in KB\"\n}\nWRITE_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of write IO requests in KB\"\n}\nPOOL_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION\n}\nVOLUME_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"ioSize\": IO_SIZE_DESCRIPTION,\n    \"readIoSize\": READ_IO_SIZE_DESCRIPTION,\n    \"writeIoSize\": WRITE_IO_SIZE_DESCRIPTION\n}\nPORT_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION\n}\nDISK_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION\n}\nSECONDS_PER_HOUR = 3600\nHOST_OS_MAP = {\n    'AIX': constants.HostOSTypes.AIX,\n    'Citrix Xen Server 5.x/6.x': constants.HostOSTypes.XEN_SERVER,\n    'Citrix Xen Server 7.x': constants.HostOSTypes.XEN_SERVER,\n    'HP-UX': constants.HostOSTypes.HP_UX,\n    'HP-UX (11i v1,11i v2)': constants.HostOSTypes.HP_UX,\n    'HP-UX (11i v3)': constants.HostOSTypes.HP_UX,\n    'OpenVMS': constants.HostOSTypes.OPEN_VMS,\n    'Oracle VM x86': constants.HostOSTypes.ORACLE_VM,\n    'Solaris 11': constants.HostOSTypes.SOLARIS,\n    'Solaris 9/10': constants.HostOSTypes.SOLARIS,\n    'VMware (ESXi)': constants.HostOSTypes.VMWARE_ESX,\n    'ESXI6.0': constants.HostOSTypes.VMWARE_ESX,\n    'ESX 4.x/5.x': constants.HostOSTypes.VMWARE_ESX,\n    'Windows 2003': constants.HostOSTypes.WINDOWS,\n    'Windows 2008/2008 R2': constants.HostOSTypes.WINDOWS,\n    'Windows 2012': constants.HostOSTypes.WINDOWS_SERVER_2012,\n    'Windows 2012 / WS2012 R2': constants.HostOSTypes.WINDOWS_SERVER_2012,\n    'Windows Server 2016': constants.HostOSTypes.WINDOWS,\n    'Red Hat Enterprise Linux': constants.HostOSTypes.LINUX,\n    'OE Linux UEK (5.x, 6.x)': constants.HostOSTypes.LINUX,\n    'OE Linux UEK 7.x': constants.HostOSTypes.LINUX,\n    'RHE Linux (5.x, 6.x)': constants.HostOSTypes.LINUX,\n    'RHE Linux (Pre RHEL 5)': constants.HostOSTypes.LINUX,\n    'RHE Linux 7.x': constants.HostOSTypes.LINUX,\n    'SuSE (10.x, 11.x)': constants.HostOSTypes.LINUX,\n    'SuSE': constants.HostOSTypes.LINUX,\n    'SuSE 12.x': constants.HostOSTypes.LINUX,\n    'SuSE Linux (Pre SLES 10)': constants.HostOSTypes.LINUX,\n    'SuSE Virtualization': constants.HostOSTypes.LINUX\n}\n"
  },
  {
    "path": "delfin/drivers/hpe/hpe_3par/hpe_3parstor.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\nfrom oslo_log import log\n\nfrom delfin import context\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.hpe.hpe_3par import alert_handler, consts\nfrom delfin.drivers.hpe.hpe_3par import component_handler\nfrom delfin.drivers.hpe.hpe_3par import rest_handler\nfrom delfin.drivers.hpe.hpe_3par import ssh_handler\nfrom delfin.drivers.utils.rest_client import RestClient\n\nLOG = log.getLogger(__name__)\n\n\n# Hpe3parStor Driver\nclass Hpe3parStorDriver(driver.StorageDriver):\n    \"\"\"Hpe3parStorDriver implement Hpe 3par Stor driver,\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n\n        self.rest_client = RestClient(**kwargs)\n        self.rest_client.verify = kwargs.get('verify', False)\n        self.rest_handler = rest_handler.RestHandler(self.rest_client)\n        self.rest_handler.login()\n\n        self.ssh_handler = ssh_handler.SSHHandler(**kwargs)\n        self.version = self.ssh_handler.login(context)\n\n        self.comhandler = component_handler.ComponentHandler(\n            rest_handler=self.rest_handler, ssh_handler=self.ssh_handler)\n\n        self.alert_handler = alert_handler.AlertHandler(\n            rest_handler=self.rest_handler, ssh_handler=self.ssh_handler)\n\n    def reset_connection(self, context, **kwargs):\n        try:\n            self.rest_handler.logout()\n        except Exception as e:\n            LOG.warning('logout failed when resetting connection, '\n                        'reason is %s' % six.text_type(e))\n        self.rest_client.verify = kwargs.get('verify', False)\n        self.rest_handler.login()\n\n    def close_connection(self):\n        self.rest_handler.logout()\n\n    def get_storage(self, context):\n        return self.comhandler.get_storage(context)\n\n    def list_storage_pools(self, context):\n        self.comhandler.set_storage_id(self.storage_id)\n        return self.comhandler.list_storage_pools(context)\n\n    def list_volumes(self, context):\n        self.comhandler.set_storage_id(self.storage_id)\n        return self.comhandler.list_volumes(context)\n\n    def list_controllers(self, context):\n        return self.comhandler.list_controllers(self.storage_id)\n\n    def list_ports(self, context):\n        return self.comhandler.list_ports(self.storage_id)\n\n    def list_disks(self, context):\n        return self.comhandler.list_disks(self.storage_id)\n\n    def list_alerts(self, context, query_para=None):\n        return self.alert_handler.list_alerts(context, query_para)\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return alert_handler.AlertHandler().parse_alert(context, alert)\n\n    def clear_alert(self, context, alert):\n        return self.alert_handler.clear_alert(context, alert)\n\n    def list_storage_host_initiators(self, context):\n        return self.comhandler.list_storage_host_initiators(self.storage_id)\n\n    def list_storage_hosts(self, context):\n        return self.comhandler.list_storage_hosts(self.storage_id)\n\n    def collect_perf_metrics(self, context, storage_id, resource_metrics,\n                             start_time, end_time):\n        return self.comhandler.collect_perf_metrics(storage_id,\n                                                    resource_metrics,\n                                                    start_time, end_time)\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        \"\"\"Get capability of supported driver\"\"\"\n        return {\n            'is_historic': True,\n            'resource_metrics': {\n                constants.ResourceType.STORAGE_POOL: consts.POOL_CAP,\n                constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n                constants.ResourceType.PORT: consts.PORT_CAP,\n                constants.ResourceType.DISK: consts.DISK_CAP\n            }\n        }\n\n    def get_latest_perf_timestamp(self, context):\n        return self.comhandler.get_latest_perf_timestamp()\n\n    def list_storage_host_groups(self, context):\n        return self.comhandler.list_storage_host_groups(self.storage_id)\n\n    def list_port_groups(self, context):\n        return self.comhandler.list_port_groups(self.storage_id)\n\n    def list_volume_groups(self, context):\n        return self.comhandler.list_volume_groups(self.storage_id)\n\n    def list_masking_views(self, context):\n        return self.comhandler.list_masking_views(self.storage_id)\n"
  },
  {
    "path": "delfin/drivers/hpe/hpe_3par/rest_handler.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2016 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport threading\n\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import cryptor\nfrom delfin import exception\nfrom delfin.drivers.hpe.hpe_3par import consts\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = logging.getLogger(__name__)\n\n\nclass RestHandler(object):\n    \"\"\"Common class for Hpe 3parStor storage system.\"\"\"\n\n    REST_AUTH_URL = '/api/v1/credentials'\n    REST_LOGOUT_URL = '/api/v1/credentials/'\n    REST_STORAGE_URL = '/api/v1/system'\n\n    REST_CAPACITY_URL = '/api/v1/capacity'\n    REST_POOLS_URL = '/api/v1/cpgs'\n    REST_VOLUMES_URL = '/api/v1/volumes'\n\n    REST_ALERTS_URL = '/api/v1/eventlog?query=\"category EQ 2\"'\n\n    REST_HOSTS_URL = '/api/v1/hosts'\n\n    REST_AUTH_KEY = 'X-HP3PAR-WSAPI-SessionKey'\n\n    REST_CPGSTATISTICS_URL = '/api/v1/systemreporter' \\\n                             '/attime/cpgstatistics/hires?' \\\n                             'query=\"sampleTime GE %s AND sampleTime LE %s\"'\n\n    session_lock = None\n\n    def __init__(self, rest_client):\n        self.rest_client = rest_client\n        self.session_lock = threading.Lock()\n\n    def call(self, url, data=None, method=None):\n        \"\"\"Send requests to server.\n        If fail, try another RestURL.\n        Increase the judgment of token invalidation\n        \"\"\"\n        try:\n            res = self.call_with_token(url, data, method,\n                                       calltimeout=consts.SOCKET_TIMEOUT)\n            # Judge whether the access failure is caused by\n            # the token invalidation.\n            # If the token fails, it will be retrieved again,\n            # and the token will be accessed again\n            if res is not None:\n                # 403  The client request has an invalid session key.\n                #      The request came from a different IP address\n                # 409  Session key is being used.\n                if (res.status_code == consts.ERROR_SESSION_INVALID_CODE\n                        or res.status_code ==\n                        consts.ERROR_SESSION_IS_BEING_USED_CODE):\n                    LOG.error(\n                        \"Failed to get token=={0}=={1}\".format(res.status_code,\n                                                               res.text))\n                    LOG.error(\"Failed to get token,relogin,Get token again\")\n                    # if method is logout,return immediately\n                    if method == 'DELETE' and RestHandler.\\\n                            REST_LOGOUT_URL in url:\n                        return res\n                    self.rest_client.rest_auth_token = None\n                    access_session = self.login()\n                    # if get token，Revisit url\n                    if access_session is not None:\n                        res = self.call_with_token(\n                            url, data, method,\n                            calltimeout=consts.SOCKET_TIMEOUT)\n                    else:\n                        LOG.error('Login res is None')\n                elif res.status_code == 503:\n                    raise exception.InvalidResults(res.text)\n            else:\n                LOG.error('Rest exec failed')\n\n            return res\n        except exception.DelfinException as e:\n            err_msg = \"Call failed: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as e:\n            err_msg = \"Get RestHandler.call failed: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_resinfo_call(self, url, data=None, method=None):\n        rejson = None\n        res = self.call(url, data, method)\n        if res is not None:\n            if res.status_code == consts.SUCCESS_STATUS_CODES:\n                rejson = res.json()\n            else:\n                if res.text and 'unsupported' in res.text:\n                    LOG.warning('rest api error: {}'.format(res.text))\n                else:\n                    raise exception.StorageBackendException(res.text)\n        return rejson\n\n    def login(self):\n        \"\"\"Login Hpe3par storage array.\"\"\"\n        try:\n            access_session = self.rest_client.rest_auth_token\n            if self.rest_client.san_address:\n                url = RestHandler.REST_AUTH_URL\n\n                data = {\"user\": self.rest_client.rest_username,\n                        \"password\": cryptor.decode(\n                            self.rest_client.rest_password)\n                        }\n\n                self.session_lock.acquire()\n\n                if self.rest_client.rest_auth_token is not None:\n                    return self.rest_client.rest_auth_token\n                self.rest_client.init_http_head()\n                res = self.rest_client. \\\n                    do_call(url, data, 'POST',\n                            calltimeout=consts.SOCKET_TIMEOUT)\n\n                if res is None:\n                    LOG.error('Login res is None')\n                    raise exception.InvalidResults('res is None')\n\n                if res.status_code == consts. \\\n                        LOGIN_SUCCESS_STATUS_CODES:\n                    result = res.json()\n\n                    access_session = result.get('key')\n                    self.rest_client.rest_auth_token = cryptor.encode(\n                        access_session)\n                    self.rest_client.session.headers[\n                        RestHandler.REST_AUTH_KEY] = cryptor.encode(\n                        access_session)\n                else:\n                    LOG.error(\"Login error. URL: %(url)s\\n\"\n                              \"Reason: %(reason)s.\",\n                              {\"url\": url, \"reason\": res.text})\n                    if 'invalid username or password' in res.text:\n                        raise exception.InvalidUsernameOrPassword()\n                    else:\n                        raise exception.StorageBackendException(\n                            six.text_type(res.text))\n            else:\n                LOG.error('Login Parameter error')\n\n            return access_session\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise e\n        finally:\n            self.session_lock.release()\n\n    def logout(self):\n        \"\"\"Logout the session.\"\"\"\n        try:\n            url = RestHandler.REST_LOGOUT_URL\n            if self.rest_client.rest_auth_token is not None:\n                url = '%s%s' % (\n                    url, cryptor.decode(self.rest_client.rest_auth_token))\n            self.rest_client.rest_auth_token = None\n            if self.rest_client.san_address:\n                self.call(url, method='DELETE')\n            if self.rest_client.session:\n                self.rest_client.session.close()\n        except exception.DelfinException as e:\n            err_msg = \"Logout error: %s\" % (e.msg)\n            LOG.error(err_msg)\n            raise e\n        except Exception as e:\n            err_msg = \"Logout error: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def call_with_token(self, url, data=None, method='GET',\n                        calltimeout=consts.SOCKET_TIMEOUT):\n        with self.session_lock:\n            auth_key = None\n            if self.rest_client.session:\n                auth_key = self.rest_client.session.headers.get(\n                    RestHandler.REST_AUTH_KEY, None)\n                if auth_key:\n                    self.rest_client.session.headers[\n                        RestHandler.REST_AUTH_KEY] = cryptor.decode(auth_key)\n            res = self.rest_client.do_call(url, data, method, calltimeout)\n            if auth_key:\n                self.rest_client.session.headers[\n                    RestHandler.REST_AUTH_KEY] = auth_key\n        return res\n\n    def get_storage(self):\n        rejson = self.get_resinfo_call(RestHandler.REST_STORAGE_URL,\n                                       method='GET')\n        return rejson\n\n    def get_capacity(self):\n        rejson = self.get_resinfo_call(RestHandler.REST_CAPACITY_URL,\n                                       method='GET')\n        return rejson\n\n    def get_all_pools(self):\n        rejson = self.get_resinfo_call(RestHandler.REST_POOLS_URL,\n                                       method='GET')\n        return rejson\n\n    def get_all_volumes(self):\n        rejson = self.get_resinfo_call(RestHandler.REST_VOLUMES_URL,\n                                       method='GET')\n        return rejson\n\n    def get_pool_metrics(self, start_time, end_time):\n        start_time_str = Tools.timestamp_to_utc_time_str(\n            start_time, consts.REST_COLLEC_TTIME_PATTERN)\n        end_time_str = Tools.timestamp_to_utc_time_str(\n            end_time, consts.REST_COLLEC_TTIME_PATTERN)\n        url = RestHandler.REST_CPGSTATISTICS_URL % (\n            start_time_str, end_time_str)\n        rejson = self.get_resinfo_call(url, method='GET')\n        return rejson\n\n    def list_storage_host(self):\n        rejson = self.get_resinfo_call(RestHandler.REST_HOSTS_URL,\n                                       method='GET')\n        return rejson\n"
  },
  {
    "path": "delfin/drivers/hpe/hpe_3par/ssh_handler.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2016 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport re\nimport time\n\nimport six\nfrom oslo_log import log as logging\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin import utils\nfrom delfin.drivers.hpe.hpe_3par import consts\nfrom delfin.drivers.utils.ssh_client import SSHPool\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = logging.getLogger(__name__)\n\n\nclass SSHHandler(object):\n    \"\"\"Common class for Hpe 3parStor storage system.\"\"\"\n\n    HPE3PAR_COMMAND_SHOWWSAPI = 'showwsapi'\n    HPE3PAR_COMMAND_CHECKHEALTH = 'checkhealth vv vlun task snmp ' \\\n                                  'port pd node network ld dar cage cabling'\n    HPE3PAR_COMMAND_SHOWALERT = 'showalert -d'\n    HPE3PAR_COMMAND_REMOVEALERT = 'removealert -f %s'\n    ALERT_NOT_EXIST_MSG = 'Unable to read alert'\n    HPE3PAR_COMMAND_SHOWNODE = 'shownode'\n    HPE3PAR_COMMAND_SHOWNODE_CPU = 'shownode -cpu'\n    HPE3PAR_COMMAND_SHOWEEPROM = 'showeeprom'\n    HPE3PAR_COMMAND_SHOWPD = 'showpd'\n    HPE3PAR_COMMAND_SHOWPD_I = 'showpd -i'\n    HPE3PAR_COMMAND_SHOWPORT = 'showport'\n    HPE3PAR_COMMAND_SHOWPORT_I = 'showport -i'\n    HPE3PAR_COMMAND_SHOWPORT_PAR = 'showport -par'\n    HPE3PAR_COMMAND_SHOWPORT_C = 'showport -c'\n    HPE3PAR_COMMAND_SHOWPORT_ISCSI = 'showport -iscsi'\n    HPE3PAR_COMMAND_SHOWPORT_RCIP = 'showport -rcip'\n    HPE3PAR_COMMAND_SHOWPORT_FCOE = 'showport -fcoe'\n    HPE3PAR_COMMAND_SHOWPORT_FS = 'showport -fs'\n    HPE3PAR_COMMAND_SHOWHOSTSET_D = 'showhostset -d'\n    HPE3PAR_COMMAND_SHOWVVSET_D = 'showvvset -d'\n    HPE3PAR_COMMAND_SHOWHOST_D = 'showhost -d'\n    HPE3PAR_COMMAND_SHOWVV = 'showvv'\n    HPE3PAR_COMMAND_SHOWVLUN_T = 'showvlun -t'\n\n    HPE3PAR_COMMAND_SRSTATPORT = 'srstatport -attime -groupby ' \\\n                                 'PORT_N,PORT_S,PORT_P -btsecs %d -etsecs %d'\n    HPE3PAR_COMMAND_SRSTATPD = 'srstatpd -attime -btsecs %d -etsecs %d'\n    HPE3PAR_COMMAND_SRSTATVV = 'srstatvv -attime -groupby VVID,VV_NAME' \\\n                               ' -btsecs %d -etsecs %d'\n    HPE3PAR_COMMAND_SRSTATPD_ATTIME = 'srstatpd -attime'\n\n    def __init__(self, **kwargs):\n        self.kwargs = kwargs\n        self.ssh_pool = SSHPool(**kwargs)\n\n    def login(self, context):\n        \"\"\"Test SSH connection \"\"\"\n        version = ''\n        try:\n            re = self.exec_command(SSHHandler.HPE3PAR_COMMAND_SHOWWSAPI)\n            if re:\n                version = self.get_version(re)\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise e\n        return version\n\n    def get_version(self, wsapi_infos):\n        \"\"\"get wsapi version \"\"\"\n        version = ''\n        try:\n            version_list = self.parse_datas_to_list(wsapi_infos,\n                                                    consts.VERSION_PATTERN)\n            if version_list and version_list[0]:\n                version = version_list[0].get('version')\n        except Exception as e:\n            LOG.error(\"Get version error: %s, wsapi info: %s\" % (\n                six.text_type(e), wsapi_infos))\n        return version\n\n    def get_health_state(self):\n        \"\"\"Check the hardware and software health\n           status of the storage system\n\n           return: System is healthy\n        \"\"\"\n        return self.exec_command(SSHHandler.HPE3PAR_COMMAND_CHECKHEALTH)\n\n    def get_all_alerts(self):\n        return self.exec_command(SSHHandler.HPE3PAR_COMMAND_SHOWALERT)\n\n    def remove_alerts(self, alert_id):\n        \"\"\"Clear alert from storage system.\n            Currently not implemented   removes command : removealert\n        \"\"\"\n        utils.check_ssh_injection([alert_id])\n        command_str = SSHHandler.HPE3PAR_COMMAND_REMOVEALERT % alert_id\n        res = self.exec_command(command_str)\n        if res:\n            if self.ALERT_NOT_EXIST_MSG not in res:\n                raise exception.InvalidResults(six.text_type(res))\n            LOG.warning(\"Alert %s doesn't exist.\", alert_id)\n\n    def get_controllers(self):\n        para_map = {\n            'command': 'parse_node_table'\n        }\n        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWNODE,\n                                       self.parse_datas_to_list,\n                                       pattern_str=consts.NODE_PATTERN,\n                                       para_map=para_map)\n\n    def get_controllers_cpu(self):\n        para_map = {\n            'command': 'parse_node_cpu'\n        }\n        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWNODE_CPU,\n                                       self.parse_datas_to_map,\n                                       pattern_str=consts.CPU_PATTERN,\n                                       para_map=para_map, throw_excep=False)\n\n    def get_controllers_version(self):\n        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWEEPROM,\n                                       self.parse_node_version,\n                                       throw_excep=False)\n\n    def parse_node_version(self, resource_info, pattern_str, para_map=None):\n        node_version_map = {}\n        node_info_map = {}\n        try:\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    if str_line.startswith('Node:'):\n                        str_info = self.split_str_by_colon(str_line)\n                        node_info_map['node_id'] = str_info[1]\n                    if str_line.startswith('OS version:'):\n                        str_info = self.split_str_by_colon(str_line)\n                        node_info_map['node_os_version'] = str_info[1]\n                else:\n                    if node_info_map:\n                        node_version_map[\n                            node_info_map.get('node_id')] = node_info_map.get(\n                            'node_os_version')\n                        node_info_map = {}\n        except Exception as e:\n            err_msg = \"Analyse node version info error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return node_version_map\n\n    def split_str_by_colon(self, str_line):\n        str_info = []\n        if str_line:\n            # str_info[0] is the parsed attribute name, there are some special\n            # characters such as spaces, brackets, etc.,\n            # str_info[1] is the value\n            str_info = str_line.split(':', 1)\n            str_info[0] = str_info[0].strip()\n            str_info[0] = str_info[0].replace(\" \", \"_\") \\\n                .replace(\"(\", \"\").replace(\")\", \"\").lower()\n            if len(str_info) > 1:\n                str_info[1] = str_info[1].strip()\n        return str_info\n\n    def get_disks(self):\n        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPD,\n                                       self.parse_datas_to_list,\n                                       pattern_str=consts.DISK_PATTERN)\n\n    def get_disks_inventory(self):\n        inventory_map = {}\n        para_map = {\n            'command': 'parse_disk_table'\n        }\n        inventorys = self.get_resources_info(\n            SSHHandler.HPE3PAR_COMMAND_SHOWPD_I, self.parse_datas_to_list,\n            pattern_str=consts.DISK_I_PATTERN, para_map=para_map,\n            throw_excep=False)\n        for inventory in (inventorys or []):\n            inventory_map[inventory.get('disk_id')] = inventory\n        return inventory_map\n\n    def get_ports(self):\n        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT,\n                                       self.parse_datas_to_list,\n                                       pattern_str=consts.PORT_PATTERN)\n\n    def get_ports_inventory(self):\n        para_map = {\n            'key_position': 0,\n            'value_position': 'last'\n        }\n        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_I,\n                                       self.parse_datas_to_map,\n                                       pattern_str=consts.PORT_I_PATTERN,\n                                       para_map=para_map, throw_excep=False)\n\n    def get_ports_config(self):\n        para_map = {\n            'key_position': 0,\n            'value_position': 4\n        }\n        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_PAR,\n                                       self.parse_datas_to_map,\n                                       pattern_str=consts.PORT_PER_PATTERN,\n                                       para_map=para_map, throw_excep=False)\n\n    def get_ports_iscsi(self):\n        iscsis_map = {}\n        iscsis = self.get_resources_info(\n            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_ISCSI,\n            self.parse_datas_to_list, pattern_str=consts.PORT_ISCSI_PATTERN,\n            throw_excep=False)\n        for iscsi in (iscsis or []):\n            iscsis_map[iscsi.get('n:s:p')] = iscsi\n        return iscsis_map\n\n    def get_ports_connected(self):\n        para_map = {\n            'key_position': 0,\n            'value_position': 6\n        }\n        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_C,\n                                       self.parse_datas_to_map,\n                                       pattern_str=consts.PORT_C_PATTERN,\n                                       para_map=para_map, throw_excep=False)\n\n    def get_ports_rcip(self):\n        rcip_map = {}\n        rcips = self.get_resources_info(\n            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_RCIP,\n            self.parse_datas_to_list, pattern_str=consts.PORT_RCIP_PATTERN,\n            throw_excep=False)\n        for rcip in (rcips or []):\n            rcip_map[rcip.get('n:s:p')] = rcip\n        return rcip_map\n\n    def get_ports_fs(self):\n        port_fs_map = {}\n        port_fss = self.get_resources_info(\n            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_FS,\n            self.parse_datas_to_list, pattern_str=consts.PORT_FS_PATTERN,\n            throw_excep=False)\n        for port_fs in (port_fss or []):\n            port_fs_map[port_fs.get('n:s:p')] = port_fs\n        return port_fs_map\n\n    def get_ports_fcoe(self):\n        fcoe_map = {}\n        fcoes = self.get_resources_info(\n            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_FCOE,\n            self.parse_datas_to_list, pattern_str=consts.PORT_FCOE_PATTERN,\n            throw_excep=False)\n        for fcoe in (fcoes or []):\n            fcoe_map[fcoe.get('n:s:p')] = fcoe\n        return fcoe_map\n\n    def parse_datas_to_list(self, resource_info, pattern_str, para_map=None):\n        obj_list = []\n        titles_size = 9999\n        try:\n            pattern = re.compile(pattern_str)\n            obj_infos = resource_info.split('\\n')\n            titles = []\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    search_obj = pattern.search(str_line)\n                    if search_obj:\n                        titles = str_line.split()\n                        titles_size = len(titles)\n                    else:\n                        str_info = str_line.split()\n                        cols_size = len(str_info)\n                        if para_map and para_map.get('command', '') \\\n                                == 'parse_disk_table':\n                            obj_list = self.parse_disk_table(cols_size,\n                                                             titles_size,\n                                                             str_info,\n                                                             obj_list,\n                                                             titles)\n                        elif para_map and para_map.get('command', '') \\\n                                == 'parse_node_table':\n                            obj_list = self.parse_node_table(cols_size,\n                                                             titles_size,\n                                                             str_info,\n                                                             obj_list,\n                                                             titles)\n                        elif para_map and para_map.get('command', '') \\\n                                == 'parse_metric_table':\n                            if '---------------------------------' in str_line:\n                                break\n                            if 'Time:' in str_line:\n                                collect_time = Tools.get_numbers_in_brackets(\n                                    str_line, consts.SSH_COLLECT_TIME_PATTERN)\n                                if collect_time:\n                                    collect_time = int(collect_time) * units.k\n                                else:\n                                    collect_time = int(time.time() * units.k)\n                                para_map['collect_time'] = collect_time\n                            obj_list = self.parse_metric_table(cols_size,\n                                                               titles_size,\n                                                               str_info,\n                                                               obj_list,\n                                                               titles,\n                                                               para_map)\n                        elif para_map and para_map.get('command', '') \\\n                                == 'parse_set_groups_table':\n                            if '---------------------------------' in str_line:\n                                break\n                            obj_list = self.parse_set_groups_table(cols_size,\n                                                                   titles_size,\n                                                                   str_info,\n                                                                   obj_list)\n                        elif para_map and para_map.get('command', '') \\\n                                == 'parse_view_table':\n                            if '---------------------------------' in str_line:\n                                break\n                            obj_list = self.parse_view_table(cols_size,\n                                                             titles_size,\n                                                             str_info,\n                                                             obj_list,\n                                                             titles)\n                        else:\n                            if cols_size == titles_size:\n                                obj_model = {}\n                                for i in range(0, cols_size):\n                                    key = titles[i].lower().replace('-', '')\n                                    obj_model[key] = str_info[i]\n                                if obj_model:\n                                    obj_list.append(obj_model)\n        except Exception as e:\n            err_msg = \"Analyse datas to list error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_list\n\n    def parse_datas_to_map(self, resource_info, pattern_str, para_map=None):\n        obj_model = {}\n        titles = []\n        titles_size = 9999\n        try:\n            pattern = re.compile(pattern_str)\n            obj_infos = resource_info.split('\\n')\n            for obj_info in obj_infos:\n                str_line = obj_info.strip()\n                if str_line:\n                    search_obj = pattern.search(str_line)\n                    if search_obj:\n                        titles = str_line.split()\n                        titles_size = len(titles)\n                    else:\n                        str_info = str_line.split()\n                        cols_size = len(str_info)\n                        if para_map and para_map.get('command',\n                                                     '') == 'parse_node_cpu':\n                            obj_model = self.parse_node_cpu(cols_size,\n                                                            titles_size,\n                                                            str_info,\n                                                            obj_model,\n                                                            titles)\n                        else:\n                            if cols_size >= titles_size:\n                                key_position = para_map.get('key_position')\n                                value_position = para_map.get('value_position')\n                                if para_map.get('value_position') == 'last':\n                                    value_position = cols_size - 1\n                                obj_model[str_info[key_position]] = str_info[\n                                    value_position]\n        except Exception as e:\n            err_msg = \"Analyse datas to map error: %s\", six.text_type(e)\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return obj_model\n\n    def parse_disk_table(self, cols_size, titles_size, str_info,\n                         obj_list, titles):\n        if cols_size >= titles_size:\n            fw_rev_index = self.get_index_of_key(titles, 'FW_Rev')\n            if fw_rev_index:\n                inventory_map = {\n                    'disk_id': str_info[0],\n                    'disk_mfr': ' '.join(str_info[4:fw_rev_index - 2]),\n                    'disk_model': str_info[fw_rev_index - 2],\n                    'disk_serial': str_info[fw_rev_index - 1],\n                    'disk_fw_rev': str_info[fw_rev_index]\n                }\n                obj_list.append(inventory_map)\n        return obj_list\n\n    def parse_node_table(self, cols_size, titles_size, str_info, obj_list,\n                         titles):\n        if cols_size >= titles_size:\n            obj_model = {}\n            num_prefix = 1\n            for i in range(cols_size):\n                key_prefix = ''\n                key = titles[i].lower().replace('-', '')\n                if key == 'mem(mb)':\n                    key_prefix = consts.SSH_NODE_MEM_TYPE.get(num_prefix)\n                    num_prefix += 1\n                key = '%s%s' % (key_prefix, key)\n                obj_model[key] = str_info[i]\n            if obj_model:\n                obj_list.append(obj_model)\n        return obj_list\n\n    def parse_node_cpu(self, cols_size, titles_size, str_info, obj_map,\n                       titles):\n        if cols_size >= titles_size:\n            if 'Cores' in titles:\n                node_id = str_info[0]\n                cpu_info = ' '.join(str_info[5:])\n                cpu_map = obj_map.setdefault(node_id, {})\n                cpu_map[cpu_info] = int(str_info[2])\n            else:\n                node_id = str_info[0]\n                cpu_info = str_info[4]\n                cpu_map = obj_map.setdefault(node_id, {})\n                cpu_map[cpu_info] = cpu_map.get(cpu_info, 0) + 1\n        return obj_map\n\n    def parse_metric_table(self, cols_size, titles_size, str_info,\n                           obj_list, titles, para_map):\n        if cols_size == titles_size:\n            obj_model = {}\n            metric_type_num = 1\n            key_prefix = ''\n            for i in range(0, cols_size):\n                key = titles[i].lower().replace('-', '')\n                if key == 'rd':\n                    key_prefix = consts.SSH_METRIC_TYPE.get(metric_type_num)\n                    metric_type_num += 1\n                key = '%s%s' % (key_prefix, key)\n                obj_model[key] = str_info[i]\n            if obj_model:\n                if para_map and para_map.get('collect_time'):\n                    obj_model['collect_time'] = para_map.get('collect_time')\n                obj_list.append(obj_model)\n        return obj_list\n\n    def get_index_of_key(self, titles_list, key):\n        if titles_list:\n            for title in titles_list:\n                if key in title:\n                    return titles_list.index(title)\n        return None\n\n    def get_resources_info(self, command, parse_type, pattern_str=None,\n                           para_map=None, throw_excep=True):\n        re = self.exec_command(command)\n        resources_info = None\n        try:\n            if re:\n                resources_info = parse_type(re, pattern_str,\n                                            para_map=para_map)\n        except Exception as e:\n            LOG.error(\"Get %s info error: %s\" % (command, six.text_type(e)))\n            if throw_excep:\n                raise e\n        return resources_info\n\n    def exec_command(self, command):\n        re = self.ssh_pool.do_exec(command)\n        if re:\n            if 'invalid command name' in re or 'Invalid option' in re:\n                LOG.warning(re)\n                raise NotImplementedError(re)\n            elif 'Too many local CLI connections' in re:\n                LOG.error(\"command %s failed: %s\" % (command, re))\n                raise exception.StorageBackendException(re)\n        return re\n\n    def get_volumes(self):\n        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWVV,\n                                       self.parse_datas_to_list,\n                                       pattern_str=consts.VOLUME_PATTERN)\n\n    def get_port_metrics(self, start_time, end_time):\n        command = SSHHandler.HPE3PAR_COMMAND_SRSTATPORT % (\n            int(start_time / units.k), int(end_time / units.k))\n        return self.get_resources_info(command,\n                                       self.parse_datas_to_list,\n                                       pattern_str=consts.SRSTATPORT_PATTERN,\n                                       para_map={\n                                           'command': 'parse_metric_table'})\n\n    def get_disk_metrics(self, start_time, end_time):\n        command = SSHHandler.HPE3PAR_COMMAND_SRSTATPD_ATTIME\n        if start_time and end_time:\n            command = SSHHandler.HPE3PAR_COMMAND_SRSTATPD % (\n                int(start_time / units.k), int(end_time / units.k))\n        return self.get_resources_info(command,\n                                       self.parse_datas_to_list,\n                                       pattern_str=consts.SRSTATPD_PATTERN,\n                                       para_map={\n                                           'command': 'parse_metric_table'})\n\n    def get_volume_metrics(self, start_time, end_time):\n        command = SSHHandler.HPE3PAR_COMMAND_SRSTATVV % (\n            int(start_time / units.k), int(end_time / units.k))\n        return self.get_resources_info(command,\n                                       self.parse_datas_to_list,\n                                       pattern_str=consts.SRSTATVV_PATTERN,\n                                       para_map={\n                                           'command': 'parse_metric_table'})\n\n    def list_storage_host_groups(self):\n        para_map = {\n            'command': 'parse_set_groups_table'\n        }\n        return self.get_resources_info(\n            SSHHandler.HPE3PAR_COMMAND_SHOWHOSTSET_D,\n            self.parse_datas_to_list,\n            pattern_str=consts.HOST_OR_VV_SET_PATTERN,\n            para_map=para_map)\n\n    def list_volume_groups(self):\n        para_map = {\n            'command': 'parse_set_groups_table'\n        }\n        return self.get_resources_info(\n            SSHHandler.HPE3PAR_COMMAND_SHOWVVSET_D,\n            self.parse_datas_to_list,\n            pattern_str=consts.HOST_OR_VV_SET_PATTERN,\n            para_map=para_map)\n\n    def parse_set_groups_table(self, cols_size, titles_size, str_info,\n                               obj_list):\n        if cols_size >= titles_size:\n            members = []\n            value = str_info[2].replace('-', '')\n            if value:\n                members = [str_info[2]]\n            obj_model = {\n                'id': str_info[0],\n                'name': str_info[1],\n                'members': members,\n                'comment': (\" \".join(str_info[3:])).replace('-', ''),\n            }\n            obj_list.append(obj_model)\n        elif obj_list and cols_size == 1:\n            value = str_info[0].replace('-', '')\n            if value:\n                obj_model = obj_list[-1]\n                if obj_model and obj_model.get('members'):\n                    obj_model.get('members').append(str_info[0])\n                else:\n                    members = [str_info[0]]\n                    obj_model['members'] = members\n\n        return obj_list\n\n    def parse_view_table(self, cols_size, titles_size, str_info, obj_list,\n                         titles):\n        if cols_size >= titles_size:\n            obj_model = {}\n            for i in range(titles_size):\n                key = titles[i].lower().replace('-', '')\n                obj_model[key] = str_info[i]\n            if obj_model:\n                obj_list.append(obj_model)\n        return obj_list\n\n    def get_resources_ids(self, command, pattern_str, para_map=None):\n        if not para_map:\n            para_map = {\n                'key_position': 1,\n                'value_position': 0\n            }\n        return self.get_resources_info(command,\n                                       self.parse_datas_to_map,\n                                       pattern_str=pattern_str,\n                                       para_map=para_map, throw_excep=False)\n\n    def list_storage_host_initiators(self):\n        return self.get_resources_info(\n            SSHHandler.HPE3PAR_COMMAND_SHOWHOST_D,\n            self.parse_datas_to_list,\n            pattern_str=consts.HOST_OR_VV_PATTERN)\n\n    def list_masking_views(self):\n        para_map = {\n            'command': 'parse_view_table'\n        }\n        return self.get_resources_info(\n            SSHHandler.HPE3PAR_COMMAND_SHOWVLUN_T,\n            self.parse_datas_to_list,\n            pattern_str=consts.VLUN_PATTERN,\n            para_map=para_map)\n"
  },
  {
    "path": "delfin/drivers/hpe/hpe_msa/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/hpe/hpe_msa/consts.py",
    "content": "from delfin.common import constants\n\n\nclass AlertOIDNumber(object):\n    OID_ERR_ID = '1.3.6.1.3.94.1.11.1.1'\n    OID_EVENT_TYPE = '1.3.6.1.3.94.1.11.1.7'\n    OID_LAST_TIME = '1.3.6.1.3.94.1.11.1.4'\n    OID_EVENT_DESC = '1.3.6.1.3.94.1.11.1.9'\n    OID_EVENT_ID = '1.3.6.1.3.94.1.11.1.3'\n    OID_SEVERITY = '1.3.6.1.3.94.1.11.1.6'\n\n\nclass StorageVendor(object):\n    HPE_MSA_VENDOR = \"HPE\"\n\n\nclass TrapSeverity(object):\n    TRAP_SEVERITY_MAP = {\n        '1': 'unknown',\n        '2': 'emergency',\n        '3': 'alert',\n        '4': constants.Severity.CRITICAL,\n        '5': 'error',\n        '6': constants.Severity.WARNING,\n        '7': 'notify',\n        '8': constants.Severity.INFORMATIONAL,\n        '9': 'debug',\n        '10': 'mark'\n    }\n\n    SEVERITY_MAP = {\n        \"warning\": \"Warning\",\n        \"informational\": \"Informational\",\n        \"error\": \"Major\"\n    }\n\n\nclass SecondsNumber(object):\n    SECONDS_TO_MS = 1000\n\n\nclass RpmSpeed(object):\n    RPM_SPEED = 1000\n\n\nclass DiskPhysicalType(object):\n    DISK_PHYSICAL_TYPE = {\n        'fc': constants.DiskPhysicalType.FC,\n        'SAS': constants.DiskPhysicalType.SAS\n    }\n\n\nclass InitiatorType(object):\n    ISCSI_INITIATOR_TYPE = \"9\"\n    FC_INITIATOR_TYPE = \"6\"\n    ISCSI_INITIATOR_DESCRIPTION = constants.InitiatorType.ISCSI\n    FC_INITIATOR_DESCRIPTION = constants.InitiatorType.FC\n    UNKNOWN_INITIATOR_DESCRIPTION = constants.InitiatorType.UNKNOWN\n\n\nclass HostOSTypes(object):\n    MSA_HOST_TYPE = {\n        'HP-UX': constants.HostOSTypes.HP_UX\n    }\n"
  },
  {
    "path": "delfin/drivers/hpe/hpe_msa/hpe_msastor.py",
    "content": "from delfin.drivers import driver\nfrom delfin.drivers.hpe.hpe_msa import ssh_handler\nfrom delfin.drivers.hpe.hpe_msa.ssh_handler import SSHHandler\n\n\nclass HpeMsaStorDriver(driver.StorageDriver):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.ssh_handler = ssh_handler.SSHHandler(**kwargs)\n\n    def reset_connection(self, context, **kwargs):\n        self.ssh_handler.login()\n\n    def get_storage(self, context):\n        return self.ssh_handler.get_storage(self.storage_id)\n\n    def list_storage_pools(self, context):\n        return self.ssh_handler.list_storage_pools(self.storage_id)\n\n    def list_volumes(self, context):\n        return self.ssh_handler.list_storage_volume(self.storage_id)\n\n    def list_controllers(self, context):\n        return self.ssh_handler.\\\n            list_storage_controller(self.storage_id)\n\n    def list_ports(self, context):\n        return self.ssh_handler.list_storage_ports(self.storage_id)\n\n    def list_disks(self, context):\n        return self.ssh_handler.list_storage_disks(self.storage_id)\n\n    def list_alerts(self, context, query_para=None):\n        return self.ssh_handler.list_alerts(query_para)\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return SSHHandler.parse_alert(alert)\n\n    def clear_alert(self, context, alert):\n        pass\n\n    def list_storage_host_initiators(self, context):\n        return self.ssh_handler.list_storage_host_initiators(self.storage_id)\n\n    def list_storage_hosts(self, context):\n        return self.ssh_handler.list_storage_hosts(self.storage_id)\n\n    def list_storage_host_groups(self, context):\n        return self.ssh_handler.list_storage_host_groups(self.storage_id)\n\n    def list_port_groups(self, context):\n        return self.ssh_handler.list_port_groups(self.storage_id)\n\n    def list_volume_groups(self, context):\n        return self.ssh_handler.list_volume_groups(self.storage_id)\n\n    def list_masking_views(self, context):\n        return self.ssh_handler.list_masking_views(self.storage_id)\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n"
  },
  {
    "path": "delfin/drivers/hpe/hpe_msa/ssh_handler.py",
    "content": "import hashlib\nimport time\n\nimport six\nfrom oslo_log import log as logging\nfrom operator import itemgetter\nfrom itertools import groupby\nfrom delfin import exception\nfrom delfin.common import constants, alert_util\nfrom delfin.drivers.utils.ssh_client import SSHPool\nfrom delfin.drivers.utils.tools import Tools\nfrom delfin.drivers.hpe.hpe_msa import consts\n\ntry:\n    import defusedxml.cElementTree as Et\nexcept ImportError:\n    import defusedxml.ElementTree as Et\n\nLOG = logging.getLogger(__name__)\n\n\nclass SSHHandler(object):\n\n    def __init__(self, **kwargs):\n        self.ssh_pool = SSHPool(**kwargs)\n\n    def login(self):\n        try:\n            self.ssh_pool.do_exec('show pools')\n        except Exception as e:\n            LOG.error(\"Failed to login msa  %s\" %\n                      (six.text_type(e)))\n            raise e\n\n    def get_storage(self, storage_id):\n        try:\n            system_info = self.ssh_pool.do_exec('show system')\n            system_data = self.handle_xml_to_dict(system_info, 'system')\n            version_info = self.ssh_pool.do_exec('show version')\n            version_arr = self.handle_xml_to_json(version_info, 'versions')\n            version_id = \"\"\n            if version_arr:\n                version_id = version_arr[0].get('bundle-version')\n            if system_data:\n                pools_list = self.list_storage_pools(storage_id)\n                total_capacity = 0\n                if pools_list:\n                    for pool in pools_list:\n                        total_capacity += int(pool.get('total_capacity'))\n                disks_list = self.list_storage_disks(storage_id)\n                raw_capacity = 0\n                if disks_list:\n                    for disk in disks_list:\n                        raw_capacity += int(disk.get('capacity'))\n                volumes_list = self.list_storage_volume(storage_id)\n                volume_all_size = 0\n                if volumes_list:\n                    for volume in volumes_list:\n                        volume_all_size += int(volume.get('total_capacity'))\n                health = system_data.get('health')\n                status = constants.StorageStatus.OFFLINE\n                if health == 'OK':\n                    status = constants.StorageStatus.NORMAL\n                elif health == 'Degraded':\n                    status = constants.StorageStatus.DEGRADED\n                serial_num = system_data.get('midplane-serial-number')\n                storage_map = {\n                    'name': system_data.get('system-name'),\n                    'vendor': consts.StorageVendor.HPE_MSA_VENDOR,\n                    'model': system_data.get('product-id'),\n                    'status': status,\n                    'serial_number': serial_num,\n                    'firmware_version': version_id,\n                    'location': system_data.get('system-location'),\n                    'raw_capacity': int(raw_capacity),\n                    'total_capacity': int(total_capacity),\n                    'used_capacity': int(volume_all_size),\n                    'free_capacity': int(total_capacity - volume_all_size)\n                }\n                return storage_map\n        except Exception as e:\n            err_msg = \"Failed to get system info : %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    def list_storage_disks(self, storage_id):\n        try:\n            disk_info = self.ssh_pool.do_exec('show disks')\n            disk_detail = self.handle_xml_to_json(disk_info, 'drives')\n            disks_arr = []\n            for data in disk_detail:\n                health = data.get('health')\n                status = constants.StoragePoolStatus.OFFLINE\n                if health == 'OK':\n                    status = constants.StoragePoolStatus.NORMAL\n                size = self.parse_string_to_bytes(data.get('size'))\n                physical_type = consts.DiskPhysicalType.\\\n                    DISK_PHYSICAL_TYPE.get(data.get('description'),\n                                           constants.DiskPhysicalType.\n                                           UNKNOWN)\n                rpm = data.get('rpm')\n                if rpm:\n                    rpm = int(rpm) * consts.RpmSpeed.RPM_SPEED\n                data_map = {\n                    'native_disk_id': data.get('location'),\n                    'name': data.get('location'),\n                    'physical_type': physical_type,\n                    'status': status,\n                    'storage_id': storage_id,\n                    'native_disk_group_id': data.get('disk-group'),\n                    'serial_number': data.get('serial-number'),\n                    'manufacturer': data.get('vendor'),\n                    'model': data.get('model'),\n                    'speed': rpm,\n                    'capacity': int(size),\n                    'health_score': status\n                }\n                disks_arr.append(data_map)\n            return disks_arr\n        except Exception as e:\n            err_msg = \"Failed to get storage disk: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    def list_storage_ports(self, storage_id):\n        try:\n            ports_info = self.ssh_pool.do_exec('show ports')\n            ports_split = ports_info.split('\\n')\n            ports_array = ports_split[1:len(ports_split) - 1]\n            ports_xml_data = ''.join(ports_array)\n            xml_element = Et.fromstring(ports_xml_data)\n            ports_json = []\n            for element_data in xml_element.iter('OBJECT'):\n                property_name = element_data.get('basetype')\n                if property_name != 'status':\n                    msg = {}\n                    for child in element_data.iter('PROPERTY'):\n                        msg[child.get('name')] = child.text\n                    ports_json.append(msg)\n            ports_elements_info = []\n            for i in range(0, len(ports_json) - 1, 2):\n                port_element = ports_json[i].copy()\n                port_element.update(ports_json[i + 1])\n                ports_elements_info.append(port_element)\n            list_ports = []\n            for data in ports_elements_info:\n                status = constants.PortHealthStatus.NORMAL\n                conn_status = constants.PortConnectionStatus.CONNECTED\n                if data.get('health') != 'OK':\n                    status = constants.PortHealthStatus.ABNORMAL\n                    conn_status = constants.PortConnectionStatus.\\\n                        DISCONNECTED\n                wwn = None\n                port_type = constants.PortType.FC\n                location_port_type = data.get('port-type')\n                if location_port_type:\n                    location_port_type = location_port_type.upper()\n                if location_port_type == 'ISCSI':\n                    port_type = constants.PortType.ETH\n                else:\n                    target_id = data.get('target-id')\n                    if target_id:\n                        wwn = target_id\n                location = '%s_%s' % (data.get('port'),\n                                      location_port_type)\n                speed = data.get('configured-speed', None)\n                max_speed = 0\n                if speed != 'Auto' and speed is not None:\n                    max_speed = self.parse_string_to_bytes(speed)\n                data_map = {\n                    'native_port_id': data.get('durable-id'),\n                    'name': data.get('port'),\n                    'type': port_type,\n                    'connection_status': conn_status,\n                    'health_status': status,\n                    'location': location,\n                    'storage_id': storage_id,\n                    'speed': max_speed,\n                    'max_speed': max_speed,\n                    'mac_address': data.get('mac-address'),\n                    'ipv4': data.get('ip-address'),\n                    'wwn': wwn\n                }\n                list_ports.append(data_map)\n            return list_ports\n        except Exception as e:\n            err_msg = \"Failed to get storage ports: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    def list_storage_controller(self, storage_id):\n        try:\n            controller_info = self.ssh_pool.do_exec('show controllers')\n            controller_detail = self.handle_xml_to_json(\n                controller_info, 'controllers')\n            controller_arr = []\n            for data in controller_detail:\n                health = data.get('health')\n                status = constants.StoragePoolStatus.OFFLINE\n                if health == 'OK':\n                    status = constants.StoragePoolStatus.NORMAL\n                cpu_info = data.get('sc-cpu-type')\n                cpu_count = None\n                if cpu_info:\n                    cpu_count = 1\n                memory_size = data.get('system-memory-size')\n                if memory_size is not None:\n                    memory_size += \"MB\"\n                system_memory_size = self.parse_string_to_bytes(\n                    memory_size)\n                data_map = {\n                    'native_controller_id': data.get('controller-id'),\n                    'name': data.get('durable-id'),\n                    'storage_id': storage_id,\n                    'status': status,\n                    'location': data.get('position'),\n                    'soft_version': data.get('sc-fw'),\n                    'cpu_info': cpu_info,\n                    'cpu_count': cpu_count,\n                    'memory_size': int(system_memory_size)\n                }\n                controller_arr.append(data_map)\n            return controller_arr\n        except Exception as e:\n            err_msg = \"Failed to get storage controllers: %s\"\\\n                      % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    def list_storage_volume(self, storage_id):\n        try:\n            volume_infos = self.ssh_pool.do_exec('show volumes')\n            volume_detail = self.handle_xml_to_json(volume_infos, 'volumes')\n            pools_info = self.ssh_pool.do_exec('show pools')\n            pool_detail = self.handle_xml_to_json(pools_info, 'pools')\n            list_volumes = []\n            for data in volume_detail:\n                health = data.get('health')\n                status = constants.StoragePoolStatus.OFFLINE\n                if health == 'OK':\n                    status = constants.StoragePoolStatus.NORMAL\n                total_size = self.parse_string_to_bytes(data.get('total-size'))\n                total_avail = self.parse_string_to_bytes(\n                    data.get('allocated-size'))\n                native_storage_pool_id = ''\n                if pool_detail:\n                    native_storage_pool_id = pool_detail[0]. \\\n                        get('serial-number')\n                    for pools in pool_detail:\n                        if data.get('virtual-disk-name') == pools.\\\n                                get('name'):\n                            native_storage_pool_id = pools.\\\n                                get('serial-number')\n                blocks = data.get('blocks')\n                if blocks is not None:\n                    blocks = int(blocks)\n                volume_map = {\n                    'name': data.get('volume-name'),\n                    'storage_id': storage_id,\n                    'description': data.get('volume-name'),\n                    'status': status,\n                    'native_volume_id': str(data.get('durable-id')),\n                    'native_storage_pool_id': native_storage_pool_id,\n                    'wwn': str(data.get('wwn')),\n                    'type': data.get('volume-type'),\n                    'total_capacity': int(total_size),\n                    'free_capacit': int(total_size - total_avail),\n                    'used_capacity': int(total_avail),\n                    'blocks': int(blocks),\n                    'compressed': True,\n                    'deduplicated': True\n                }\n                list_volumes.append(volume_map)\n            return list_volumes\n        except Exception as e:\n            err_msg = \"Failed to get storage volume: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    def list_storage_pools(self, storage_id):\n        try:\n            pool_infos = self.ssh_pool.do_exec('show pools')\n            pool_detail = self.handle_xml_to_json(pool_infos, 'pools')\n            volume_list = self.list_storage_volume(storage_id)\n            pools_list = []\n            for data in pool_detail:\n                volume_size = 0\n                blocks = 0\n                if volume_list:\n                    for volume in volume_list:\n                        if volume.get('native_storage_pool_id') == data.\\\n                                get('serial-number'):\n                            volume_size += volume.get('total_capacity')\n                            blocks += volume.get('blocks')\n                health = data.get('health')\n                status = constants.StoragePoolStatus.OFFLINE\n                if health == 'OK':\n                    status = constants.StoragePoolStatus.NORMAL\n                total_size = self.parse_string_to_bytes(\n                    data.get('total-size'))\n                pool_map = {\n                    'name': data.get('name'),\n                    'storage_id': storage_id,\n                    'native_storage_pool_id': data.get('serial-number'),\n                    'status': status,\n                    'storage_type': constants.StorageType.BLOCK,\n                    'total_capacity': int(total_size),\n                    'subscribed_capacity': int(blocks),\n                    'used_capacity': volume_size,\n                    'free_capacity': int(total_size - volume_size)\n                }\n                pools_list.append(pool_map)\n            return pools_list\n        except Exception as e:\n            err_msg = \"Failed to get storage pool: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    @staticmethod\n    def parse_string_to_bytes(value):\n        capacity = 0\n        if value:\n            if value.isdigit():\n                capacity = float(value)\n            else:\n                if value == '0B':\n                    capacity = 0\n                else:\n                    unit = value[-2:]\n                    capacity = float(value[:-2]) * int(\n                        Tools.change_capacity_to_bytes(unit))\n        return capacity\n\n    @staticmethod\n    def handle_xml_to_json(detail_info, element):\n        detail_arr = []\n        detail_data = detail_info.split('\\n')\n        detail = detail_data[1:len(detail_data) - 1]\n        detail_xml = ''.join(detail)\n        xml_element = Et.fromstring(detail_xml)\n        for children in xml_element.iter('OBJECT'):\n            property_name = children.get('basetype')\n            if element == property_name:\n                msg = {}\n                for child in children.iter('PROPERTY'):\n                    msg[child.get('name')] = child.text\n                detail_arr.append(msg)\n        return detail_arr\n\n    def list_alerts(self, query_para):\n        alert_list = []\n        try:\n            alert_infos = self.ssh_pool.do_exec('show events error')\n            alert_json = self.handle_xml_to_json(alert_infos, 'events')\n            for alert_map in alert_json:\n                now = time.time()\n                occur_time = int(round(now * consts.SecondsNumber\n                                       .SECONDS_TO_MS))\n                time_stamp = alert_map.get('time-stamp-numeric')\n                if time_stamp is not None:\n                    occur_time = int(time_stamp) * consts.SecondsNumber\\\n                        .SECONDS_TO_MS\n                    if not alert_util.is_alert_in_time_range(query_para,\n                                                             occur_time):\n                        continue\n                event_code = alert_map.get('event-code')\n                event_id = alert_map.get('event-id')\n                location = alert_map.get('message')\n                resource_type = alert_map.get('event-code')\n                severity = alert_map.get('severity')\n                additional_info = str(alert_map.get('additional-information'))\n                match_key = None\n                if event_code:\n                    match_key = event_code\n                if severity:\n                    match_key += severity\n                if location:\n                    match_key += location\n                description = None\n                if additional_info:\n                    description = additional_info\n                if severity == 'Informational' or severity == 'RESOLVED':\n                    continue\n                alert_model = {\n                    'alert_id': event_id,\n                    'alert_name': event_code,\n                    'severity': severity,\n                    'category': constants.Category.FAULT,\n                    'type': 'EquipmentAlarm',\n                    'sequence_number': event_id,\n                    'occur_time': occur_time,\n                    'description': description,\n                    'resource_type': resource_type,\n                    'location': location,\n                    'match_key': hashlib.md5(match_key.encode()).hexdigest()\n                }\n                alert_list.append(alert_model)\n            alert_list_data = SSHHandler.get_last_alert_data(alert_list)\n            return alert_list_data\n        except Exception as e:\n            err_msg = \"Failed to get storage alert: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n    @staticmethod\n    def get_last_alert_data(alert_json):\n        alert_list = []\n        alert_json.sort(key=itemgetter('alert_name', 'location', 'severity'))\n        for key, item in groupby(alert_json, key=itemgetter(\n                'alert_name', 'location', 'severity')):\n            alert_last_index = 0\n            alert_list.append(list(item)[alert_last_index])\n        return alert_list\n\n    @staticmethod\n    def parse_alert(alert):\n        try:\n            alert_model = dict()\n            alert_id = None\n            description = None\n            severity = consts.TrapSeverity.TRAP_SEVERITY_MAP.get('8')\n            sequence_number = None\n            event_type = None\n            for alert_key, alert_value in alert.items():\n                if consts.AlertOIDNumber.OID_ERR_ID in alert_key:\n                    alert_id = str(alert_value)\n                elif consts.AlertOIDNumber.OID_EVENT_TYPE in alert_key:\n                    event_type = alert_value\n                elif consts.AlertOIDNumber.OID_EVENT_DESC in alert_key:\n                    description = alert_value\n                elif consts.AlertOIDNumber.OID_SEVERITY in alert_key:\n                    severity = consts.TrapSeverity.TRAP_SEVERITY_MAP\\\n                        .get(alert.get(consts.AlertOIDNumber.OID_SEVERITY),\n                             constants.Severity.INFORMATIONAL)\n                elif consts.AlertOIDNumber.OID_EVENT_ID in alert_key:\n                    sequence_number = alert_value\n            if description:\n                desc_arr = description.split(\",\")\n                if desc_arr:\n                    alert_id = SSHHandler.split_by_char_and_number(\n                        desc_arr[0], \":\", 1)\n            alert_model['alert_id'] = str(alert_id)\n            alert_model['alert_name'] = event_type\n            alert_model['severity'] = severity\n            alert_model['category'] = constants.Category.FAULT\n            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n            alert_model['sequence_number'] = sequence_number\n            now = time.time()\n            alert_model['occur_time'] = int(round(now * consts.\n                                                  SecondsNumber.SECONDS_TO_MS))\n            alert_model['description'] = description\n            alert_model['location'] = description\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = \"Failed to build alert model: %s.\" % (six.text_type(e))\n            raise exception.InvalidResults(msg)\n\n    @staticmethod\n    def split_by_char_and_number(split_str, split_char, arr_number):\n        split_value = ''\n        if split_str:\n            tmp_value = split_str.split(split_char, 1)\n            if arr_number == 1 and len(tmp_value) > 1:\n                split_value = tmp_value[arr_number].strip()\n            elif arr_number == 0:\n                split_value = tmp_value[arr_number].strip()\n        return split_value\n\n    @staticmethod\n    def handle_xml_to_dict(xml_info, element):\n        msg = {}\n        xml_split = xml_info.split('\\n')\n        xml_data = xml_split[1:len(xml_split) - 1]\n        detail_xml = ''.join(xml_data)\n        xml_element = Et.fromstring(detail_xml)\n        for children in xml_element.iter('OBJECT'):\n            property_name = children.get('basetype')\n            if element == property_name:\n                for child in children.iter('PROPERTY'):\n                    msg[child.get('name')] = child.text\n        return msg\n\n    def list_storage_host_initiators(self, storage_id):\n        try:\n            initiator_list = []\n            host_groups_info = self.ssh_pool.do_exec(\"show initiators\")\n            host_groups_json = self.handle_xml_to_json(host_groups_info,\n                                                       \"initiator\")\n            type_switch = {\n                consts.InitiatorType.ISCSI_INITIATOR_TYPE:\n                    consts.InitiatorType.ISCSI_INITIATOR_DESCRIPTION,\n                consts.InitiatorType.FC_INITIATOR_TYPE:\n                    consts.InitiatorType.FC_INITIATOR_DESCRIPTION,\n            }\n            for initiator in host_groups_json:\n                description = type_switch.get(\n                    initiator.get('host-bus-type-numeric'),\n                    consts.InitiatorType.UNKNOWN_INITIATOR_DESCRIPTION)\n                initiator_item = {\n                    \"name\": initiator.get('nickname'),\n                    \"type\": description,\n                    \"alias\": initiator.get('durable-id'),\n                    \"storage_id\": storage_id,\n                    \"native_storage_host_initiator_id\":\n                        initiator.get('durable-id'),\n                    \"wwn\": initiator.get('id'),\n                    \"status\": constants.InitiatorStatus.ONLINE,\n                    \"native_storage_host_id\": initiator.get('host-id')\n                }\n                initiator_list.append(initiator_item)\n            return initiator_list\n        except Exception as e:\n            LOG.error(\"Failed to get initiator \"\n                      \"from msa storage_id: %s\" % storage_id)\n            raise e\n\n    def list_storage_hosts(self, storage_id):\n        try:\n            hosts_info = self.ssh_pool.do_exec('show host-groups')\n            host_list = []\n            hosts = self.handle_xml_to_json(hosts_info, 'host')\n            host_set = set()\n            for host in hosts:\n                status = constants.HostStatus.NORMAL\n                os_type = constants.HostOSTypes.HP_UX\n                host_member_count = int(host.get('member-count'))\n                if host_member_count > 0:\n                    serial_number = host.get('serial-number')\n                    if serial_number not in host_set:\n                        host_set.add(host.get('serial-number'))\n                        host_dict = {\n                            \"name\": host.get('name'),\n                            \"description\": host.get('durable-id'),\n                            \"storage_id\": storage_id,\n                            \"native_storage_host_id\":\n                                host.get('serial-number'),\n                            \"os_type\": os_type,\n                            \"status\": status\n                        }\n                        host_list.append(host_dict)\n            return host_list\n        except Exception as e:\n            LOG.error(\"Failed to get host \"\n                      \"from msa storage_id: %s\" % storage_id)\n            raise e\n\n    def list_storage_host_groups(self, storage_id):\n        try:\n            host_groups_info = self.ssh_pool.do_exec('show host-groups')\n            host_group_list = []\n            storage_host_grp_relation_list = []\n            host_groups = self.handle_xml_to_json(\n                host_groups_info, 'host-group')\n            host_info_list = self.handle_xml_to_json(host_groups_info, 'host')\n            for host_group in host_groups:\n                member_count = int(host_group.get('member-count'))\n                if member_count > 0:\n                    hosts_list = []\n                    storage_host_group_id = host_group.get('serial-number')\n                    for host_info in host_info_list:\n                        host_id = host_info.get('serial-number')\n                        host_group_id = host_info.get('host-group')\n                        if host_id != 'NOHOST' and \\\n                                host_group_id == storage_host_group_id:\n                            hosts_list.append(host_id)\n                            storage_host_group_relation = {\n                                'storage_id': storage_id,\n                                'native_storage_host_group_id':\n                                    storage_host_group_id,\n                                'native_storage_host_id': host_id\n                            }\n                            storage_host_grp_relation_list.\\\n                                append(storage_host_group_relation)\n                    host_group_map = {\n                        \"name\": host_group.get('name'),\n                        \"description\": host_group.get('durable-id'),\n                        \"storage_id\": storage_id,\n                        \"native_storage_host_group_id\": storage_host_group_id,\n                        \"storage_hosts\": ','.join(hosts_list)\n                    }\n                    host_group_list.append(host_group_map)\n            storage_host_groups_result = {\n                'storage_host_groups': host_group_list,\n                'storage_host_grp_host_rels':\n                    storage_host_grp_relation_list\n            }\n            return storage_host_groups_result\n        except Exception as e:\n            LOG.error(\"Failed to get host_group from msa \"\n                      \"storage_id: %s\" % storage_id)\n            raise e\n\n    def list_volume_groups(self, storage_id):\n        try:\n            volume_group_list = []\n            volume_group_relation_list = []\n            volume_groups_info = self.ssh_pool.do_exec('show volume-groups')\n            volume_groups_json = self.handle_xml_to_json(\n                volume_groups_info, 'volume-groups')\n            volumes_json = self.handle_xml_to_json(\n                volume_groups_info, 'volumes')\n            for volume_group in volume_groups_json:\n                volumes_list = []\n                durable_id = volume_group.get('durable-id')\n                if volumes_json:\n                    for volume_info in volumes_json:\n                        group_key = volume_info.get('group-key')\n                        volume_id = volume_info.get('durable-id')\n                        if group_key == durable_id:\n                            volumes_list.append(volume_id)\n                            volume_group_relation = {\n                                'storage_id': storage_id,\n                                'native_volume_group_id': durable_id,\n                                'native_volume_id': volume_id\n                            }\n                            volume_group_relation_list.\\\n                                append(volume_group_relation)\n                volume_groups_map = {\n                    \"name\": volume_group.get('group-name'),\n                    \"description\": volume_group.get('durable-id'),\n                    \"storage_id\": storage_id,\n                    \"native_volume_group_id\": durable_id,\n                    \"volumes\": ','.join(volumes_list)\n                }\n                volume_group_list.append(volume_groups_map)\n            volume_group_result = {\n                'volume_groups': volume_group_list,\n                'vol_grp_vol_rels': volume_group_relation_list\n            }\n            return volume_group_result\n        except Exception as e:\n            LOG.error(\"Failed to get volume_group\"\n                      \" from msa storage_id: %s\" % storage_id)\n            raise e\n\n    def list_port_groups(self, storage_id):\n        try:\n            port_group_list = []\n            port_group_relation_list = []\n            storage_view_info = self.ssh_pool.do_exec('show maps all ')\n            storage_port_list = self.list_storage_ports(storage_id)\n            storage_host_view = self.handle_xml_to_json(\n                storage_view_info, 'volume-view-mappings')\n            reduce_set = set()\n            for storage_view in storage_host_view:\n                port_number = storage_view.get('ports')\n                port_group_dict = self.get_port_group_id_and_name(\n                    port_number, storage_port_list)\n                native_port_group_id = port_group_dict.get(\n                    'native_port_group_id')\n                native_port_group_name = port_group_dict.get(\n                    'native_port_group_name')\n                if native_port_group_name:\n                    native_port_group_id = \"port_group_\" + \\\n                                           native_port_group_id\n                    if native_port_group_id in reduce_set:\n                        continue\n                    reduce_set.add(native_port_group_id)\n                    port_group_map = {\n                        'name': native_port_group_id,\n                        'description': native_port_group_id,\n                        'storage_id': storage_id,\n                        'native_port_group_id': native_port_group_id,\n                        'ports': native_port_group_name\n                    }\n                    port_ids = native_port_group_name.split(',')\n                    for port_id in port_ids:\n                        port_group_relation = {\n                            'storage_id': storage_id,\n                            'native_port_group_id': native_port_group_id,\n                            'native_port_id': port_id\n                        }\n                        port_group_relation_list.append(\n                            port_group_relation)\n                    port_group_list.append(port_group_map)\n                result = {\n                    'port_groups': port_group_list,\n                    'port_grp_port_rels': port_group_relation_list\n                }\n                return result\n        except Exception as e:\n            LOG.error(\"Failed to get port_group\"\n                      \" from msa storage_id: %s\" % storage_id)\n            raise e\n\n    @staticmethod\n    def get_port_group_id_and_name(port_number, storage_port_list):\n        native_port_group_id = []\n        native_port_group_name = []\n        if port_number:\n            port_codes = port_number.split(',')\n            for port_code in port_codes:\n                for port in storage_port_list:\n                    port_name = port.get('name')\n                    durable_id = port.get('native_port_id')\n                    if port_code in port_name:\n                        native_port_group_id.append(port_name)\n                        native_port_group_name.append(durable_id)\n        port_group_dict = {\n            'native_port_group_id': ''.join(native_port_group_id),\n            'native_port_group_name': ','.join(native_port_group_name)\n        }\n        return port_group_dict\n\n    def list_masking_views(self, storage_id):\n        try:\n            views_list = []\n            storage_view_info = self.ssh_pool.do_exec('show maps all ')\n            if storage_view_info:\n                storage_port_list = self.list_storage_ports(storage_id)\n                host_list = self.list_storage_hosts(storage_id)\n                initiators_list = self.list_storage_host_initiators(storage_id)\n                host_group_list = self.list_storage_host_groups(storage_id)\n                storage_host_group = host_group_list.get('storage_host_groups')\n                storage_host_view = self.handle_xml_to_json(\n                    storage_view_info, 'volume-view-mappings')\n                views_list.extend(\n                    self.get_storage_view_list(storage_host_view, 'volume',\n                                               storage_id, storage_port_list,\n                                               host_list, initiators_list,\n                                               storage_host_group))\n                storage_host_volume_groups_view = self.handle_xml_to_json(\n                    storage_view_info, 'volume-group-view-mappings')\n                views_list.extend(self.get_storage_view_list(\n                    storage_host_volume_groups_view, 'group',\n                    storage_id, storage_port_list, host_list, initiators_list,\n                    storage_host_group))\n            return views_list\n        except Exception as e:\n            LOG.error(\"Failed to get view \"\n                      \"from msa storage_id: %s\" % storage_id)\n            raise e\n\n    def get_storage_view_list(self, storage_view_list, vol_type, storage_id,\n                              storage_port_list, host_list, initiators_list,\n                              storage_host_groups):\n        views_list = []\n        if storage_view_list:\n            native_volume_group_name = 'native_volume_group_id'\\\n                if vol_type == 'group' else 'native_volume_id'\n            for host_view in storage_view_list:\n                access = host_view.get('access')\n                if access != 'not-mapped':\n                    mapped_id = host_view.get('mapped-id')\n                    native_masking_view_id = host_view.get('durable-id')\n                    volume_id = host_view.get('parent-id')\n                    port_number = host_view.get('ports')\n                    view_name = host_view.get('nickname')\n                    host_group_name = 'native_storage_host_group_id'\\\n                        if '.*.*' in view_name else 'native_storage_host_id'\n                    native_port_group_dict = \\\n                        self.get_port_group_id_and_name(port_number,\n                                                        storage_port_list)\n                    native_port_group_id = native_port_group_dict.get(\n                        'native_port_group_id')\n                    native_storage_host_id = self.get_storage_host_id(\n                        host_list, mapped_id, initiators_list,\n                        storage_host_groups, view_name)\n                    view_map = {\n                        \"name\": view_name,\n                        \"description\": view_name,\n                        \"storage_id\": storage_id,\n                        \"native_masking_view_id\":\n                            native_masking_view_id + volume_id,\n                        native_volume_group_name: volume_id,\n                        host_group_name: native_storage_host_id\n                    }\n                    if native_port_group_id:\n                        view_map['native_port_group_id'] = \\\n                            \"port_group_\" + native_port_group_id\n                    views_list.append(view_map)\n            return views_list\n\n    @staticmethod\n    def get_storage_host_id(host_list, mapped_id, initiators_list,\n                            storage_host_groups, view_name):\n        for host_value in host_list:\n            host_durable_id = host_value.get('description')\n            if host_durable_id == mapped_id:\n                native_storage_host_id = \\\n                    host_value.get('native_storage_host_id')\n                return native_storage_host_id\n\n        for initiators in initiators_list:\n            initiators_durable_id = initiators.get(\n                'native_storage_host_initiator_id')\n            if initiators_durable_id == mapped_id:\n                native_storage_host_id = \\\n                    initiators.get('native_storage_host_id')\n                return native_storage_host_id\n\n        group_name = view_name.split('.')[0]\n        for host_group in storage_host_groups:\n            if group_name == host_group.get('name'):\n                native_storage_host_id = \\\n                    host_group.get('native_storage_host_group_id')\n                return native_storage_host_id\n"
  },
  {
    "path": "delfin/drivers/huawei/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/huawei/oceanstor/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/huawei/oceanstor/alert_handler.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\n\nfrom oslo_log import log\n\nfrom delfin import exception\nfrom delfin.common import alert_util\nfrom delfin.common import constants\nfrom delfin.drivers.huawei.oceanstor import oid_mapper\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertHandler(object):\n    \"\"\"Alert handling functions for huawei oceanstor driver\"\"\"\n\n    TIME_PATTERN = \"%Y-%m-%d,%H:%M:%S.%f\"\n\n    # Translation of trap severity to alert model severity\n    SEVERITY_MAP = {\"1\": constants.Severity.CRITICAL,\n                    \"2\": constants.Severity.MAJOR,\n                    \"3\": constants.Severity.MINOR,\n                    \"4\": constants.Severity.WARNING}\n\n    # Translation of trap alert category to alert model category\n    CATEGORY_MAP = {\"1\": constants.Category.FAULT,\n                    \"2\": constants.Category.RECOVERY,\n                    \"3\": constants.Category.EVENT}\n\n    # Translation of trap alert category to alert type\n    TYPE_MAP = {\n        \"1\": constants.EventType.COMMUNICATIONS_ALARM,\n        \"2\": constants.EventType.EQUIPMENT_ALARM,\n        \"3\": constants.EventType.PROCESSING_ERROR_ALARM,\n        \"4\": constants.EventType.QUALITY_OF_SERVICE_ALARM,\n        \"5\": constants.EventType.ENVIRONMENTAL_ALARM,\n        \"6\": constants.EventType.QUALITY_OF_SERVICE_ALARM}\n\n    # Translation of severity of queried alerts to alert model severity\n    QUERY_ALERTS_SEVERITY_MAP = {2: constants.Severity.INFORMATIONAL,\n                                 3: constants.Severity.WARNING,\n                                 5: constants.Severity.MAJOR,\n                                 6: constants.Severity.CRITICAL}\n\n    # Translation of alert category of queried alerts to alert model category\n    QUERY_ALERTS_CATEGORY_MAP = {0: constants.Category.EVENT,\n                                 1: constants.Category.FAULT,\n                                 2: constants.Category.RECOVERY}\n\n    # Attributes expected in alert info to proceed with model filling\n    _mandatory_alert_attributes = ('hwIsmReportingAlarmAlarmID',\n                                   'hwIsmReportingAlarmFaultTitle',\n                                   'hwIsmReportingAlarmFaultLevel',\n                                   'hwIsmReportingAlarmNodeCode',\n                                   'hwIsmReportingAlarmFaultType',\n                                   'hwIsmReportingAlarmAdditionInfo',\n                                   'hwIsmReportingAlarmSerialNo',\n                                   'hwIsmReportingAlarmFaultCategory',\n                                   'hwIsmReportingAlarmRestoreAdvice',\n                                   'hwIsmReportingAlarmFaultTime'\n                                   )\n\n    @staticmethod\n    def parse_alert(context, alert):\n        \"\"\"Parse alert data and fill the alert model.\"\"\"\n        # Check for mandatory alert attributes\n        alert = oid_mapper.OidMapper.map_oids(alert)\n        LOG.info(\"Get alert from storage: %s\", alert)\n\n        for attr in AlertHandler._mandatory_alert_attributes:\n            if not alert.get(attr):\n                msg = \"Mandatory information %s missing in alert message. \" \\\n                      % attr\n                raise exception.InvalidInput(msg)\n\n        try:\n            alert_model = dict()\n            # These information are sourced from device registration info\n            alert_model['alert_id'] = alert['hwIsmReportingAlarmAlarmID']\n            alert_model['alert_name'] = alert['hwIsmReportingAlarmFaultTitle']\n            alert_model['severity'] = AlertHandler.SEVERITY_MAP.get(\n                alert['hwIsmReportingAlarmFaultLevel'],\n                constants.Severity.NOT_SPECIFIED)\n            alert_model['category'] = AlertHandler.CATEGORY_MAP.get(\n                alert['hwIsmReportingAlarmFaultCategory'],\n                constants.Category.NOT_SPECIFIED)\n            alert_model['type'] = AlertHandler.TYPE_MAP.get(\n                alert['hwIsmReportingAlarmFaultType'],\n                constants.EventType.NOT_SPECIFIED)\n            alert_model['sequence_number'] \\\n                = alert['hwIsmReportingAlarmSerialNo']\n            occur_time = datetime.strptime(\n                alert['hwIsmReportingAlarmFaultTime'],\n                AlertHandler.TIME_PATTERN)\n            alert_model['occur_time'] = int(occur_time.timestamp() * 1000)\n\n            description = alert['hwIsmReportingAlarmAdditionInfo']\n            if AlertHandler._is_hex(description):\n                description = bytes.fromhex(description[2:]).decode('ascii')\n            alert_model['description'] = description\n\n            recovery_advice = alert['hwIsmReportingAlarmRestoreAdvice']\n            if AlertHandler._is_hex(recovery_advice):\n                recovery_advice = bytes.fromhex(\n                    recovery_advice[2:]).decode('ascii')\n\n            alert_model['recovery_advice'] = recovery_advice\n\n            alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n            alert_model['location'] = 'Node code=' \\\n                                      + alert['hwIsmReportingAlarmNodeCode']\n\n            if alert.get('hwIsmReportingAlarmLocationInfo'):\n                alert_model['location'] \\\n                    = alert_model['location'] + ',' + alert[\n                    'hwIsmReportingAlarmLocationInfo']\n\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (_(\"Failed to build alert model as some attributes missing \"\n                     \"in alert message.\"))\n            raise exception.InvalidResults(msg)\n\n    def parse_queried_alerts(self, alert_list, query_para):\n        \"\"\"Parses list alert data and fill the alert model.\"\"\"\n        # List contains all the current alarms of given storage id\n        alert_model_list = []\n        for alert in alert_list:\n            try:\n                occur_time = alert['startTime']\n                # skip if alert not in input time range\n                if not alert_util.is_alert_in_time_range(query_para,\n                                                         occur_time):\n                    continue\n\n                alert_model = dict()\n                alert_model['alert_id'] = alert['eventID']\n                alert_model['alert_name'] = alert['name']\n                alert_model['severity'] = self.QUERY_ALERTS_SEVERITY_MAP.get(\n                    alert['level'], constants.Severity.NOT_SPECIFIED)\n                alert_model['category'] = self.QUERY_ALERTS_CATEGORY_MAP.get(\n                    alert['eventType'], constants.Category.NOT_SPECIFIED)\n                alert_model['type'] = constants.EventType.NOT_SPECIFIED\n                alert_model['sequence_number'] = alert['sequence']\n                alert_model['occur_time'] = int(occur_time * 1000)\n                alert_model['description'] = alert['description']\n\n                alert_model['recovery_advice'] = alert['suggestion']\n\n                alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n                alert_model['location'] = alert['location']\n\n                alert_model_list.append(alert_model)\n            except Exception as e:\n                LOG.error(e)\n                msg = (_(\"Failed to build alert model as some attributes\"\n                         \" missing in queried alerts.\"))\n                raise exception.InvalidResults(msg)\n        return alert_model_list\n\n    def add_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Config the trap receiver in storage system.\"\"\"\n        # Currently not implemented\n        pass\n\n    def remove_trap_config(self, context, storage_id, trap_config):\n        \"\"\"Remove trap receiver configuration from storage system.\"\"\"\n        # Currently not implemented\n        pass\n\n    def clear_alert(self, context, storage_id, alert):\n        # Currently not implemented\n        \"\"\"Clear alert from storage system.\"\"\"\n        pass\n\n    @staticmethod\n    def _is_hex(value):\n        try:\n            int(value, 16)\n        except ValueError:\n            return False\n        return True\n"
  },
  {
    "path": "delfin/drivers/huawei/oceanstor/consts.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2016 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nfrom delfin.common import constants\n\nSTATUS_HEALTH = '1'\nSTATUS_ACTIVE = '43'\nSTATUS_RUNNING = '10'\nSTATUS_VOLUME_READY = '27'\nSTATUS_LUNCOPY_READY = '40'\nSTATUS_QOS_ACTIVE = '2'\nQOS_INACTIVATED = '45'\nLUN_TYPE = '11'\nSNAPSHOT_TYPE = '27'\nSTATUS_POOL_ONLINE = '27'\nSTATUS_STORAGE_NORMAL = '1'\nSTATUS_CTRLR_OFFLINE = '28'\nSTATUS_CTRLR_UNKNOWN = '0'\n\nPORT_TYPE_FC = '212'\nPORT_TYPE_ETH = '213'\nPORT_TYPE_SAS = '214'\nPORT_TYPE_FCOE = '252'\nPORT_TYPE_PCIE = '233'\nPORT_TYPE_BOND = '235'\n\nPORT_LOGICTYPE_HOST = '0'\nPORT_HEALTH_UNKNOWN = '0'\nPORT_HEALTH_NORMAL = '1'\nPORT_HEALTH_FAULTY = '2'\nPORT_HEALTH_ABOUTFAIL = '3'\nPORT_HEALTH_PARTIALLYDAMAGED = '4'\nPORT_HEALTH_INCONSISTENT = '9'\n\nPORT_RUNNINGSTS_UNKNOWN = '0'\nPORT_RUNNINGSTS_NORMAL = '1'\nPORT_RUNNINGSTS_RUNNING = '2'\nPORT_RUNNINGSTS_LINKUP = '10'\nPORT_RUNNINGSTS_LINKDOWN = '11'\nPORT_RUNNINGSTS_TOBERECOVERED = '33'\n\nPORT_LOGICTYPE_EXPANSION = '1'\nPORT_LOGICTYPE_MANAGEMENT = '2'\nPORT_LOGICTYPE_INTERNAL = '3'\nPORT_LOGICTYPE_MAINTENANCE = '4'\nPORT_LOGICTYPE_SERVICE = '5'\nPORT_LOGICTYPE_MAINTENANCE2 = '6'\nPORT_LOGICTYPE_INTERCONNECT = '11'\n\nPortTypeMap = {\n    PORT_TYPE_FC: constants.PortType.FC,\n    PORT_TYPE_FCOE: constants.PortType.FCOE,\n    PORT_TYPE_ETH: constants.PortType.ETH,\n    PORT_TYPE_PCIE: constants.PortType.OTHER,\n    PORT_TYPE_SAS: constants.PortType.SAS,\n    PORT_TYPE_BOND: constants.PortType.OTHER,\n}\n\nPortLogicTypeMap = {\n    PORT_LOGICTYPE_HOST:\n        constants.PortLogicalType.SERVICE,\n    PORT_LOGICTYPE_EXPANSION:\n        constants.PortLogicalType.OTHER,\n    PORT_LOGICTYPE_MANAGEMENT:\n        constants.PortLogicalType.MANAGEMENT,\n    PORT_LOGICTYPE_INTERNAL:\n        constants.PortLogicalType.INTERNAL,\n    PORT_LOGICTYPE_MAINTENANCE:\n        constants.PortLogicalType.MAINTENANCE,\n    PORT_LOGICTYPE_SERVICE:\n        constants.PortLogicalType.SERVICE,\n    PORT_LOGICTYPE_MAINTENANCE2:\n        constants.PortLogicalType.MAINTENANCE,\n    PORT_LOGICTYPE_INTERCONNECT:\n        constants.PortLogicalType.INTERCONNECT,\n}\n\nDISK_STATUS_UNKNOWN = '0'\nDISK_STATUS_NORMAL = '1'\nDISK_STATUS_OFFLINE = '28'\n\nDISK_TYPE_SAS = '1'\nDISK_TYPE_SATA = '2'\nDISK_TYPE_SSD = '3'\n\nDISK_LOGICTYPE_FREE = '1'\nDISK_LOGICTYPE_MEMBER = '2'\nDISK_LOGICTYPE_HOTSPARE = '3'\nDISK_LOGICTYPE_CACHE = '4'\n\nDiskPhysicalTypeMap = {\n    DISK_TYPE_SATA: constants.DiskPhysicalType.SATA,\n    DISK_TYPE_SAS: constants.DiskPhysicalType.SAS,\n    DISK_TYPE_SSD: constants.DiskPhysicalType.SSD,\n}\n\nDiskLogicalTypeMap = {\n    DISK_LOGICTYPE_FREE:\n        constants.DiskLogicalType.FREE,\n    DISK_LOGICTYPE_MEMBER:\n        constants.DiskLogicalType.MEMBER,\n    DISK_LOGICTYPE_HOTSPARE:\n        constants.DiskLogicalType.HOTSPARE,\n    DISK_LOGICTYPE_CACHE:\n        constants.DiskLogicalType.CACHE,\n}\n\nFS_WORM_COMPLIANCE = '1'\nFS_WORM_AUDIT_LOG = '2'\nFS_WORM_ENTERPRISE = '3'\n\nFS_HEALTH_NORMAL = '1'\nFS_TYPE_THICK = '0'\nFS_TYPE_THIN = '1'\nPARENT_TYPE_POOL = 216\n\nQUOTA_NOT_ENABLED = 'INVALID_VALUE64'\nQUOTA_TYPE_TREE = '1'\nQUOTA_TYPE_USER = '2'\nQUOTA_TYPE_GROUP = '3'\n\nSECURITY_STYLE_MIXED = '0'\nSECURITY_STYLE_NATIVE = '1'\nSECURITY_STYLE_NTFS = '2'\nSECURITY_STYLE_UNIX = '3'\n\nPARENT_OBJECT_TYPE_FS = 40\nSHARE_NFS = '16401'\n\nERROR_CONNECT_TO_SERVER = -403\nERROR_UNAUTHORIZED_TO_SERVER = -401\n\nSOCKET_TIMEOUT = 52\nLOGIN_SOCKET_TIMEOUT = 4\n\nERROR_VOLUME_NOT_EXIST = 1077939726\nRELOGIN_ERROR_PASS = [ERROR_VOLUME_NOT_EXIST]\nPWD_EXPIRED = 3\nPWD_RESET = 4\n\nBLOCK_STORAGE_POOL_TYPE = '1'\nFILE_SYSTEM_POOL_TYPE = '2'\n\nSECTORS_SIZE = 512\nQUERY_PAGE_SIZE = 100\n\nTHICK_LUNTYPE = '0'\nTHIN_LUNTYPE = '1'\n\nHOST_OS = [\n    constants.HostOSTypes.LINUX,\n    constants.HostOSTypes.WINDOWS,\n    constants.HostOSTypes.SOLARIS,\n    constants.HostOSTypes.HP_UX,\n    constants.HostOSTypes.AIX,\n    constants.HostOSTypes.XEN_SERVER,\n    constants.HostOSTypes.VMWARE_ESX,\n    constants.HostOSTypes.LINUX_VIS,\n    constants.HostOSTypes.WINDOWS_SERVER_2012,\n    constants.HostOSTypes.ORACLE_VM,\n    constants.HostOSTypes.OPEN_VMS,\n]\n\nHOST_RUNNINGSTATUS_NORMAL = '1'\nINITIATOR_RUNNINGSTATUS_UNKNOWN = '0'\nINITIATOR_RUNNINGSTATUS_ONLINE = '27'\nINITIATOR_RUNNINGSTATUS_OFFLINE = '28'\nISCSI_INITIATOR_TYPE = 222\nFC_INITIATOR_TYPE = 223\nIB_INITIATOR_TYPE = 16499\nISCSI_INITIATOR_DESCRIPTION = 'iSCSI Initiator'\nFC_INITIATOR_DESCRIPTION = 'FC Initiator'\nIB_INITIATOR_DESCRIPTION = 'IB Initiator'\nUNKNOWN_INITIATOR_DESCRIPTION = 'Unknown Initiator'\n\nOCEANSTOR_METRICS = {\n    'iops': '22',\n    'readIops': '25',\n    'writeIops': '28',\n    'throughput': '21',\n    'readThroughput': '23',\n    'writeThroughput': '26',\n    'responseTime': '370',\n    'ioSize': '228',\n    'readIoSize': '24',\n    'writeIoSize': '27',\n    'cacheHitRatio': '303',\n    'readCacheHitRatio': '93',\n    'writeCacheHitRatio': '95',\n}\n\nCONVERT_TO_MILLI_SECOND_LIST = [\n    'responseTime'\n]\n\nIOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Input/output operations per second\"\n}\nREAD_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Read input/output operations per second\"\n}\nWRITE_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Write input/output operations per second\"\n}\nTHROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data is \"\n                   \"successfully transferred in MB/s\"\n}\nREAD_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data read is \"\n                   \"successfully transferred in MB/s\"\n}\nWRITE_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data write is \"\n                   \"successfully transferred in MB/s\"\n}\nRESPONSE_TIME_DESCRIPTION = {\n    \"unit\": \"ms\",\n    \"description\": \"Average time taken for an IO \"\n                   \"operation in ms\"\n}\nCACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of io that are cache hits\"\n}\nREAD_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of read ops that are cache hits\"\n}\nWRITE_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of write ops that are cache hits\"\n}\nIO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of IO requests in KB\"\n}\nREAD_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of read IO requests in KB\"\n}\nWRITE_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of write IO requests in KB\"\n}\nCPU_USAGE_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of CPU usage\"\n}\nMEMORY_USAGE_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of DISK memory usage in percentage\"\n}\nSERVICE_TIME = {\n    \"unit\": 'ms',\n    \"description\": \"Service time of the resource in ms\"\n}\nPOOL_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nVOLUME_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"cacheHitRatio\": CACHE_HIT_RATIO_DESCRIPTION,\n    \"readCacheHitRatio\": READ_CACHE_HIT_RATIO_DESCRIPTION,\n    \"writeCacheHitRatio\": WRITE_CACHE_HIT_RATIO_DESCRIPTION,\n    \"ioSize\": IO_SIZE_DESCRIPTION,\n    \"readIoSize\": READ_IO_SIZE_DESCRIPTION,\n    \"writeIoSize\": WRITE_IO_SIZE_DESCRIPTION,\n}\nCONTROLLER_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nPORT_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nDISK_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\n"
  },
  {
    "path": "delfin/drivers/huawei/oceanstor/oceanstor.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom delfin.common import constants\nfrom delfin.drivers.huawei.oceanstor import rest_client, consts, alert_handler\nfrom delfin.drivers import driver\n\nLOG = log.getLogger(__name__)\nCONF = cfg.CONF\n\noceanstor_opts = [\n    cfg.StrOpt(\n        'enable_perf_config',\n        default=False,\n        help='Enable changing performance configs on storage array'\n             'Settings for real-time, historical collection updated'),\n]\n\nCONF.register_opts(oceanstor_opts, \"oceanstor_driver\")\n\n\nclass OceanStorDriver(driver.StorageDriver):\n    \"\"\"OceanStorDriver implement Huawei OceanStor driver,\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.client = rest_client.RestClient(**kwargs)\n        self.sector_size = consts.SECTORS_SIZE\n        self.init_perf_config = CONF.oceanstor_driver.enable_perf_config\n\n    def reset_connection(self, context, **kwargs):\n        self.client.reset_connection(**kwargs)\n\n    def get_storage(self, context):\n\n        storage = self.client.get_storage()\n\n        # Get firmware version\n        controller = self.client.get_all_controllers()\n        firmware_ver = controller[0]['SOFTVER']\n\n        # Get status\n        status = constants.StorageStatus.OFFLINE\n        if storage['RUNNINGSTATUS'] == consts.STATUS_STORAGE_NORMAL:\n            status = constants.StorageStatus.NORMAL\n\n        # Keep sector_size for use in list pools\n        self.sector_size = int(storage['SECTORSIZE'])\n\n        total_cap = int(storage['TOTALCAPACITY']) * self.sector_size\n        used_cap = int(storage['USEDCAPACITY']) * self.sector_size\n        free_cap = int(storage['userFreeCapacity']) * self.sector_size\n        raw_cap = int(storage['MEMBERDISKSCAPACITY']) * self.sector_size\n\n        s = {\n            'name': 'OceanStor',\n            'vendor': 'Huawei',\n            'description': 'Huawei OceanStor Storage',\n            'model': storage['NAME'],\n            'status': status,\n            'serial_number': storage['ID'],\n            'firmware_version': firmware_ver,\n            'location': storage['LOCATION'],\n            'total_capacity': total_cap,\n            'used_capacity': used_cap,\n            'free_capacity': free_cap,\n            'raw_capacity': raw_cap\n        }\n        LOG.info(\"get_storage(), successfully retrieved storage details\")\n        return s\n\n    def list_storage_pools(self, context):\n        try:\n            # Get list of OceanStor pool details\n            pools = self.client.get_all_pools()\n\n            pool_list = []\n            for pool in pools:\n                # Get pool status\n                status = constants.StoragePoolStatus.OFFLINE\n                if pool['RUNNINGSTATUS'] == consts.STATUS_POOL_ONLINE:\n                    status = constants.StoragePoolStatus.NORMAL\n\n                # Get pool storage_type\n                storage_type = constants.StorageType.BLOCK\n                if pool.get('USAGETYPE') == consts.FILE_SYSTEM_POOL_TYPE:\n                    storage_type = constants.StorageType.FILE\n\n                total_cap = \\\n                    int(pool['USERTOTALCAPACITY']) * self.sector_size\n                used_cap = \\\n                    int(pool['USERCONSUMEDCAPACITY']) * self.sector_size\n                free_cap = \\\n                    int(pool['USERFREECAPACITY']) * self.sector_size\n\n                p = {\n                    'name': pool['NAME'],\n                    'storage_id': self.storage_id,\n                    'native_storage_pool_id': pool['ID'],\n                    'description': 'Huawei OceanStor Pool',\n                    'status': status,\n                    'storage_type': storage_type,\n                    'total_capacity': total_cap,\n                    'used_capacity': used_cap,\n                    'free_capacity': free_cap,\n                }\n                pool_list.append(p)\n\n            return pool_list\n\n        except Exception:\n            LOG.error(\"Failed to get pool metrics from OceanStor\")\n            raise\n\n    def _get_orig_pool_id(self, pools, volume):\n        for pool in pools:\n            if volume['PARENTNAME'] == pool['NAME']:\n                return pool['ID']\n        return ''\n\n    def list_volumes(self, context):\n        try:\n            # Get all volumes in OceanStor\n            volumes = self.client.get_all_volumes()\n            pools = self.client.get_all_pools()\n\n            volume_list = []\n            for volume in volumes:\n                # Get pool id of volume\n                orig_pool_id = self._get_orig_pool_id(pools, volume)\n                compressed = False\n                if volume['ENABLECOMPRESSION'] != 'false':\n                    compressed = True\n\n                deduplicated = False\n                if volume['ENABLEDEDUP'] != 'false':\n                    deduplicated = True\n\n                status = constants.VolumeStatus.ERROR\n                if volume['RUNNINGSTATUS'] == consts.STATUS_VOLUME_READY:\n                    status = constants.VolumeStatus.AVAILABLE\n\n                vol_type = constants.VolumeType.THICK\n                if volume['ALLOCTYPE'] == consts.THIN_LUNTYPE:\n                    vol_type = constants.VolumeType.THIN\n\n                sector_size = int(volume['SECTORSIZE'])\n                total_cap = int(volume['CAPACITY']) * sector_size\n                used_cap = int(volume['ALLOCCAPACITY']) * sector_size\n\n                v = {\n                    'name': volume['NAME'],\n                    'storage_id': self.storage_id,\n                    'description': 'Huawei OceanStor volume',\n                    'status': status,\n                    'native_volume_id': volume['ID'],\n                    'native_storage_pool_id': orig_pool_id,\n                    'wwn': volume['WWN'],\n                    'type': vol_type,\n                    'total_capacity': total_cap,\n                    'used_capacity': used_cap,\n                    'free_capacity': None,\n                    'compressed': compressed,\n                    'deduplicated': deduplicated,\n                }\n\n                volume_list.append(v)\n\n            return volume_list\n\n        except Exception:\n            LOG.error(\"Failed to get list volumes from OceanStor\")\n            raise\n\n    def list_controllers(self, context):\n        try:\n            # Get list of OceanStor controller details\n            controllers = self.client.get_all_controllers()\n\n            controller_list = []\n            for controller in controllers:\n                status = constants.ControllerStatus.NORMAL\n                if controller['RUNNINGSTATUS'] == consts.STATUS_CTRLR_UNKNOWN:\n                    status = constants.ControllerStatus.UNKNOWN\n                if controller['RUNNINGSTATUS'] == consts.STATUS_CTRLR_OFFLINE:\n                    status = constants.ControllerStatus.OFFLINE\n\n                c = {\n                    'name': controller['NAME'],\n                    'storage_id': self.storage_id,\n                    'native_controller_id': controller['ID'],\n                    'status': status,\n                    'location': controller['LOCATION'],\n                    'soft_version': controller['SOFTVER'],\n                    'cpu_info': controller['CPUINFO'],\n                    'memory_size': controller['MEMORYSIZE'],\n                }\n                controller_list.append(c)\n\n            return controller_list\n\n        except Exception:\n            LOG.error(\"Failed to get controller metrics from OceanStor\")\n            raise\n\n    def list_ports(self, context):\n        try:\n            # Get list of OceanStor port details\n            ports = self.client.get_all_ports()\n\n            port_list = []\n            for port in ports:\n                health_status = constants.PortHealthStatus.ABNORMAL\n                conn_status = constants.PortConnectionStatus.CONNECTED\n\n                logical_type = consts.PortLogicTypeMap.get(\n                    port.get('LOGICTYPE'), constants.PortLogicalType.OTHER)\n\n                if port['HEALTHSTATUS'] == consts.PORT_HEALTH_UNKNOWN:\n                    health_status = constants.PortHealthStatus.UNKNOWN\n                if port['HEALTHSTATUS'] == consts.PORT_HEALTH_NORMAL:\n                    health_status = constants.PortHealthStatus.UNKNOWN\n\n                if port['RUNNINGSTATUS'] == consts.PORT_RUNNINGSTS_UNKNOWN:\n                    conn_status = constants.PortConnectionStatus.UNKNOWN\n                if port['RUNNINGSTATUS'] == consts.PORT_RUNNINGSTS_LINKDOWN:\n                    conn_status = constants.PortConnectionStatus.DISCONNECTED\n\n                speed = port.get('RUNSPEED')        # ether -1 or M bits/sec\n                if speed == '-1':\n                    speed = None\n                max_speed = port.get('MAXSPEED')\n\n                port_type = consts.PortTypeMap.get(port['TYPE'],\n                                                   constants.PortType.OTHER)\n                # FC\n                if port['TYPE'] == consts.PORT_TYPE_FC:\n                    max_speed = port['MAXSUPPORTSPEED']     # in 1000 M bits/s\n\n                # Ethernet\n                if port['TYPE'] == consts.PORT_TYPE_ETH:\n                    max_speed = port['maxSpeed']        # in M bits/s\n                    speed = port['SPEED']               # in M bits/s\n\n                # PCIE\n                if port['TYPE'] == consts.PORT_TYPE_PCIE:\n                    speed = port['PCIESPEED']\n                    logical_type = constants.PortLogicalType.OTHER\n\n                p = {\n                    'name': port['NAME'],\n                    'storage_id': self.storage_id,\n                    'native_port_id': port['ID'],\n                    'location': port.get('LOCATION'),\n                    'connection_status': conn_status,\n                    'health_status': health_status,\n                    'type': port_type,\n                    'logical_type': logical_type,\n                    'speed': speed,\n                    'max_speed': max_speed,\n                    'native_parent_id': port.get('PARENTID'),\n                    'wwn': port.get('WWN'),\n                    'mac_address': port.get('MACADDRESS'),\n                    'ipv4': port.get('IPV4ADDR'),\n                    'ipv4_mask': port.get('IPV4MASK'),\n                    'ipv6': port.get('IPV6ADDR'),\n                    'ipv6_mask': port.get('IPV6MASK'),\n                }\n                port_list.append(p)\n\n            return port_list\n\n        except Exception:\n            LOG.error(\"Failed to get port metrics from OceanStor\")\n            raise\n\n    def list_disks(self, context):\n        try:\n            # Get list of OceanStor disks details\n            disks = self.client.get_all_disks()\n\n            disk_list = []\n            for disk in disks:\n                status = constants.DiskStatus.NORMAL\n                if disk['RUNNINGSTATUS'] == consts.DISK_STATUS_OFFLINE:\n                    status = constants.DiskStatus.OFFLINE\n                if disk['RUNNINGSTATUS'] == consts.DISK_STATUS_UNKNOWN:\n                    status = constants.DiskStatus.ABNORMAL\n\n                physical_type = consts.DiskPhysicalTypeMap.get(\n                    disk['DISKTYPE'], constants.DiskPhysicalType.UNKNOWN)\n\n                logical_type = consts.DiskLogicalTypeMap.get(\n                    disk['LOGICTYPE'], constants.DiskLogicalType.UNKNOWN)\n\n                health_score = disk['HEALTHMARK']\n\n                capacity = int(disk['SECTORS']) * int(disk['SECTORSIZE'])\n\n                d = {\n                    'name': disk['MODEL'] + ':' + disk['SERIALNUMBER'],\n                    'storage_id': self.storage_id,\n                    'native_disk_id': disk['ID'],\n                    'serial_number': disk['SERIALNUMBER'],\n                    'manufacturer': disk['MANUFACTURER'],\n                    'model': disk['MODEL'],\n                    'firmware': disk['FIRMWAREVER'],\n                    'speed': int(disk['SPEEDRPM']),\n                    'capacity': capacity,\n                    'status': status,\n                    'physical_type': physical_type,\n                    'logical_type': logical_type,\n                    'health_score': health_score,\n                    'native_disk_group_id': None,\n                    'location': disk['LOCATION'],\n                }\n                disk_list.append(d)\n\n            return disk_list\n\n        except Exception:\n            LOG.error(\"Failed to get disk metrics from OceanStor\")\n            raise\n\n    def _list_quotas(self, quotas, fs_id, qt_id):\n        q_type = {\n            consts.QUOTA_TYPE_TREE: constants.QuotaType.TREE,\n            consts.QUOTA_TYPE_USER: constants.QuotaType.USER,\n            consts.QUOTA_TYPE_GROUP: constants.QuotaType.GROUP,\n        }\n        q_list = []\n        for qt in quotas:\n            chq, csq, fhq, fsq = None, None, None, None\n            uc, fc = None, None\n            if qt['SPACEHARDQUOTA'] != consts.QUOTA_NOT_ENABLED:\n                chq = qt['SPACEHARDQUOTA']\n            if qt['SPACESOFTQUOTA'] != consts.QUOTA_NOT_ENABLED:\n                csq = qt['SPACESOFTQUOTA']\n            if qt['FILEHARDQUOTA'] != consts.QUOTA_NOT_ENABLED:\n                fhq = qt['FILEHARDQUOTA']\n            if qt['FILESOFTQUOTA'] != consts.QUOTA_NOT_ENABLED:\n                fsq = qt['FILESOFTQUOTA']\n            if qt['SPACEUSED'] != consts.QUOTA_NOT_ENABLED:\n                uc = qt['SPACEUSED']\n            if qt['FILEUSED'] != consts.QUOTA_NOT_ENABLED:\n                fc = qt['FILEUSED']\n            q = {\n                \"native_quota_id\": qt['ID'],\n                \"type\": q_type.get(qt['QUOTATYPE']),\n                \"storage_id\": self.storage_id,\n                \"native_filesystem_id\": fs_id,\n                \"native_qtree_id\": qt_id,\n                \"capacity_hard_limit\": chq,\n                \"capacity_soft_limit\": csq,\n                \"file_hard_limit\": fhq,\n                \"file_soft_limit\": fsq,\n                \"file_count\": fc,\n                \"used_capacity\": uc,\n                \"user_group_name\": qt['USRGRPOWNERNAME'],\n            }\n            q_list.append(q)\n            return q_list\n\n    def list_quotas(self, context):\n        try:\n            # Get list of OceanStor quotas details\n            quotas_list = []\n            filesystems = self.client.get_all_filesystems()\n            for fs in filesystems:\n                fs_id = fs[\"ID\"]\n                quotas = self.client.get_all_filesystem_quotas(fs_id)\n                if quotas:\n                    qs = self._list_quotas(quotas, fs_id, None)\n                    quotas_list.extend(qs)\n\n            qtrees = self.client.get_all_qtrees(filesystems)\n            for qt in qtrees:\n                qt_id = qt[\"ID\"]\n                quotas = self.client.get_all_qtree_quotas(qt_id)\n                if quotas:\n                    qs = self._list_quotas(quotas, None, qt_id)\n                    quotas_list.extend(qs)\n\n            return quotas_list\n\n        except Exception:\n            LOG.error(\"Failed to get quotas from OceanStor\")\n            raise\n\n    def list_filesystems(self, context):\n        try:\n            # Get list of OceanStor filesystems details\n            fss = self.client.get_all_filesystems()\n\n            fs_list = []\n            worm_type = {\n                consts.FS_WORM_COMPLIANCE: constants.WORMType.COMPLIANCE,\n                consts.FS_WORM_AUDIT_LOG: constants.WORMType.AUDIT_LOG,\n                consts.FS_WORM_ENTERPRISE: constants.WORMType.ENTERPRISE\n            }\n            for fs in fss:\n                status = constants.FilesystemStatus.FAULTY\n                if fs['HEALTHSTATUS'] == consts.FS_HEALTH_NORMAL:\n                    status = constants.FilesystemStatus.NORMAL\n                fs_type = constants.FSType.THICK\n                if fs['ALLOCTYPE'] == consts.FS_TYPE_THIN:\n                    fs_type = constants.FSType.THIN\n\n                pool_id = None\n                if fs['PARENTTYPE'] == consts.PARENT_TYPE_POOL:\n                    pool_id = fs['PARENTID']\n\n                sector_size = int(fs['SECTORSIZE'])\n                total_cap = int(fs['CAPACITY']) * sector_size\n                used_cap = int(fs['ALLOCCAPACITY']) * sector_size\n                free_cap = int(fs['AVAILABLECAPCITY']) * sector_size\n\n                compressed = False\n                if fs['ENABLECOMPRESSION'] != 'false':\n                    compressed = True\n\n                deduplicated = False\n                if fs['ENABLEDEDUP'] != 'false':\n                    deduplicated = True\n\n                f = {\n                    'name': fs['NAME'],\n                    'storage_id': self.storage_id,\n                    'native_filesystem_id': fs['ID'],\n                    'native_pool_id': pool_id,\n                    'compressed': compressed,\n                    'deduplicated': deduplicated,\n                    'worm': worm_type.get(fs['WORMTYPE'],\n                                          constants.WORMType.NON_WORM),\n                    'status': status,\n                    'type': fs_type,\n                    'total_capacity': total_cap,\n                    'used_capacity': used_cap,\n                    'free_capacity': free_cap,\n                }\n                fs_list.append(f)\n\n            return fs_list\n\n        except Exception:\n            LOG.error(\"Failed to get filesystems from OceanStor\")\n            raise\n\n    def list_qtrees(self, context):\n        try:\n            # Get list of OceanStor qtrees details\n            filesystems = self.client.get_all_filesystems()\n            qts = self.client.get_all_qtrees(filesystems)\n            security_mode = {\n                consts.SECURITY_STYLE_MIXED: constants.NASSecurityMode.MIXED,\n                consts.SECURITY_STYLE_NATIVE: constants.NASSecurityMode.NATIVE,\n                consts.SECURITY_STYLE_NTFS: constants.NASSecurityMode.NTFS,\n                consts.SECURITY_STYLE_UNIX: constants.NASSecurityMode.UNIX,\n            }\n\n            qt_list = []\n            for qt in qts:\n                fs_id = None\n                if qt['PARENTTYPE'] == consts.PARENT_OBJECT_TYPE_FS:\n                    fs_id = qt['PARENTID']\n                q = {\n                    'name': qt['NAME'],\n                    'storage_id': self.storage_id,\n                    'native_qtree_id': qt['ID'],\n                    'native_filesystem_id': fs_id,\n                    'security_mode': security_mode.get(qt['securityStyle']),\n                }\n                qt_list.append(q)\n\n            return qt_list\n\n        except Exception:\n            LOG.error(\"Failed to get qtrees from OceanStor\")\n            raise\n\n    def list_shares(self, context):\n        try:\n            # Get list of OceanStor shares details\n            ss = self.client.get_all_shares()\n\n            s_list = []\n            for s in ss:\n\n                protocol = None\n                if s.get('type') == consts.SHARE_NFS:\n                    protocol = constants.ShareProtocol.NFS\n                if s.get('subType'):\n                    protocol = constants.ShareProtocol.CIFS\n                if s.get('ACCESSNAME'):\n                    protocol = constants.ShareProtocol.FTP\n\n                s = {\n                    'name': s['NAME'],\n                    'storage_id': self.storage_id,\n                    'native_share_id': s['ID'],\n                    'native_filesystem_id': s['FSID'],\n                    'path': s['SHAREPATH'],\n                    'protocol': protocol\n                }\n                s_list.append(s)\n\n            return s_list\n\n        except Exception:\n            LOG.error(\"Failed to get shares from OceanStor\")\n            raise\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return alert_handler.AlertHandler().parse_alert(context, alert)\n\n    def clear_alert(self, context, sequence_number):\n        return self.client.clear_alert(sequence_number)\n\n    def list_alerts(self, context, query_para):\n        # First query alerts and then translate to model\n        alert_list = self.client.list_alerts()\n        alert_model_list = alert_handler.AlertHandler()\\\n            .parse_queried_alerts(alert_list, query_para)\n        return alert_model_list\n\n    def collect_perf_metrics(self, context, storage_id,\n                             resource_metrics, start_time,\n                             end_time):\n        \"\"\"Collects performance metric for the given interval\"\"\"\n        try:\n            if self.init_perf_config:\n                self.client.configure_metrics_collection()\n                self.init_perf_config = False\n\n        except Exception:\n            LOG.error(\"Failed to configure collection in OceanStor\")\n            raise\n\n        metrics = []\n        try:\n            # storage-pool metrics\n            if resource_metrics.get(constants.ResourceType.STORAGE_POOL):\n                pool_metrics = self.client.get_pool_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.STORAGE_POOL))\n                metrics.extend(pool_metrics)\n\n            # volume metrics\n            if resource_metrics.get(constants.ResourceType.VOLUME):\n                volume_metrics = self.client.get_volume_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.VOLUME))\n                metrics.extend(volume_metrics)\n\n            # controller metrics\n            if resource_metrics.get(constants.ResourceType.CONTROLLER):\n                controller_metrics = self.client.get_controller_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.CONTROLLER))\n                metrics.extend(controller_metrics)\n\n            # port metrics\n            if resource_metrics.get(constants.ResourceType.PORT):\n                port_metrics = self.client.get_port_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.PORT))\n                metrics.extend(port_metrics)\n\n            # disk metrics\n            if resource_metrics.get(constants.ResourceType.DISK):\n                disk_metrics = self.client.get_disk_metrics(\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.DISK))\n                metrics.extend(disk_metrics)\n\n        except Exception:\n            LOG.error(\"Failed to collect metrics from OceanStor\")\n            raise\n\n        return metrics\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        \"\"\"Get capability of supported driver\"\"\"\n        return {\n            'is_historic': False,\n            'resource_metrics': {\n                constants.ResourceType.STORAGE_POOL: consts.POOL_CAP,\n                constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n                constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP,\n                constants.ResourceType.PORT: consts.PORT_CAP,\n                constants.ResourceType.DISK: consts.DISK_CAP\n            }\n        }\n\n    def list_storage_host_initiators(self, ctx):\n        try:\n            # Get list of OceanStor initiators details\n            initiators = self.client.get_all_initiators()\n            initiator_list = []\n            switcher = {\n                consts.INITIATOR_RUNNINGSTATUS_ONLINE:\n                    constants.InitiatorStatus.ONLINE,\n                consts.INITIATOR_RUNNINGSTATUS_OFFLINE:\n                    constants.InitiatorStatus.OFFLINE,\n                consts.INITIATOR_RUNNINGSTATUS_UNKNOWN:\n                    constants.InitiatorStatus.UNKNOWN,\n            }\n            type_switch = {\n                consts.ISCSI_INITIATOR_TYPE:\n                    consts.ISCSI_INITIATOR_DESCRIPTION,\n                consts.FC_INITIATOR_TYPE:\n                    consts.FC_INITIATOR_DESCRIPTION,\n                consts.IB_INITIATOR_TYPE:\n                    consts.IB_INITIATOR_DESCRIPTION,\n            }\n            for initiator in initiators:\n                status = switcher.get(initiator['RUNNINGSTATUS'],\n                                      constants.InitiatorStatus.UNKNOWN)\n                description = type_switch.get(\n                    initiator['TYPE'], consts.UNKNOWN_INITIATOR_DESCRIPTION)\n\n                initiator_item = {\n                    \"name\": initiator.get('NAME'),\n                    \"description\": description,\n                    \"alias\": initiator['ID'],\n                    \"storage_id\": self.storage_id,\n                    \"native_storage_host_initiator_id\": initiator['ID'],\n                    \"wwn\": initiator['ID'],\n                    \"status\": status,\n                    \"native_storage_host_id\": initiator.get('PARENTID'),\n                }\n                initiator_list.append(initiator_item)\n\n            return initiator_list\n\n        except Exception:\n            LOG.error(\"Failed to get initiators from OceanStor\")\n            raise\n\n    def list_storage_hosts(self, ctx):\n        try:\n            # Get list of OceanStor host details\n            hosts = self.client.get_all_hosts()\n            host_list = []\n            for host in hosts:\n                os_type = ''\n                host_os = int(host['OPERATIONSYSTEM'])\n                if host_os < len(consts.HOST_OS):\n                    os_type = consts.HOST_OS[host_os]\n                status = constants.HostStatus.NORMAL\n                if host['RUNNINGSTATUS'] != consts.HOST_RUNNINGSTATUS_NORMAL:\n                    status = constants.HostStatus.ABNORMAL\n\n                h = {\n                    \"name\": host['NAME'],\n                    \"description\": host['DESCRIPTION'],\n                    \"storage_id\": self.storage_id,\n                    \"native_storage_host_id\": host['ID'],\n                    \"os_type\": os_type,\n                    \"status\": status,\n                    \"ip_address\": host['IP']\n                }\n                host_list.append(h)\n\n            return host_list\n\n        except Exception:\n            LOG.error(\"Failed to get host metrics from OceanStor\")\n            raise\n\n    def list_storage_host_groups(self, ctx):\n        try:\n            # Get list of OceanStor host_groups details\n            host_groups = self.client.get_all_host_groups()\n            host_group_list = []\n            for host_group in host_groups:\n                hosts = self.client.get_all_associate_hosts(\n                    host_group['TYPE'], host_group['ID'])\n                hosts_str = None\n                for host in hosts:\n                    if hosts_str:\n                        hosts_str = \"{0},{1}\".format(hosts_str, host['ID'])\n                    else:\n                        hosts_str = \"{0}\".format(host['ID'])\n\n                host_g = {\n                    \"name\": host_group['NAME'],\n                    \"description\": host_group['DESCRIPTION'],\n                    \"storage_id\": self.storage_id,\n                    \"native_storage_host_group_id\": host_group['ID'],\n                    \"storage_hosts\": hosts_str\n                }\n                host_group_list.append(host_g)\n\n            return host_group_list\n\n        except Exception:\n            LOG.error(\"Failed to get host_groups from OceanStor\")\n            raise\n\n    def list_port_groups(self, ctx):\n        try:\n            # Get list of OceanStor port_groups details\n            port_groups = self.client.get_all_port_groups()\n            port_group_list = []\n            for port_group in port_groups:\n                ports = self.client.get_all_associate_ports(\n                    port_group['TYPE'], port_group['ID'])\n                ports_str = None\n                for port in ports:\n                    if ports_str:\n                        ports_str = \"{0},{1}\".format(ports_str, port['ID'])\n                    else:\n                        ports_str = \"{0}\".format(port['ID'])\n\n                port_g = {\n                    \"name\": port_group['NAME'],\n                    \"description\": port_group['DESCRIPTION'],\n                    \"storage_id\": self.storage_id,\n                    \"native_port_group_id\": port_group['ID'],\n                    \"ports\": ports_str\n                }\n                port_group_list.append(port_g)\n\n            return port_group_list\n\n        except Exception:\n            LOG.error(\"Failed to get port_groups from OceanStor\")\n            raise\n\n    def list_volume_groups(self, ctx):\n        try:\n            # Get list of OceanStor vol_groups details\n            vol_groups = self.client.get_all_volume_groups()\n            vol_group_list = []\n            for vol_group in vol_groups:\n                volumes = self.client.get_all_associate_volumes(\n                    vol_group['TYPE'], vol_group['ID'])\n                volumes_str = None\n                for volume in volumes:\n                    if volumes_str:\n                        volumes_str = \"{0},{1}\".format(volumes_str,\n                                                       volume['ID'])\n                    else:\n                        volumes_str = \"{0}\".format(volume['ID'])\n\n                vol_g = {\n                    \"name\": vol_group['NAME'],\n                    \"description\": vol_group['DESCRIPTION'],\n                    \"storage_id\": self.storage_id,\n                    \"native_volume_group_id\": vol_group['ID'],\n                    \"volumes\": volumes_str\n                }\n                vol_group_list.append(vol_g)\n\n            return vol_group_list\n\n        except Exception:\n            LOG.error(\"Failed to get vol_groups from OceanStor\")\n            raise\n\n    def list_masking_views(self, ctx):\n        try:\n            # Get list of OceanStor masking view details\n            views = self.client.get_all_mapping_views()\n\n            view_dict = {}\n            for view in views:\n                v = {\n                    \"name\": view['NAME'],\n                    \"description\": view['DESCRIPTION'],\n                    \"storage_id\": self.storage_id,\n                    \"native_masking_view_id\": view['ID'],\n                }\n                view_dict[view['ID']] = v\n\n            view_keys = view_dict.keys()\n\n            host_groups = self.client.get_all_host_groups()\n            for host_group in host_groups:\n                hg_views = self.client.get_all_associate_mapping_views(\n                    host_group['TYPE'], host_group['ID'])\n                for hg_view in hg_views:\n                    v_id = hg_view['ID']\n                    if v_id in view_keys:\n                        view_dict[v_id]['native_storage_host_group_id'] =\\\n                            host_group['ID']\n                    else:\n                        msg = \"Missing mapping view for host group id {0}\".\\\n                            format(host_group['ID'])\n                        LOG.info(msg)\n\n            volume_groups = self.client.get_all_volume_groups()\n            for volume_group in volume_groups:\n                vg_views = self.client.get_all_associate_mapping_views(\n                    volume_group['TYPE'], volume_group['ID'])\n                for vg_view in vg_views:\n                    v_id = vg_view['ID']\n                    if v_id in view_keys:\n                        view_dict[v_id]['native_volume_group_id'] =\\\n                            volume_group['ID']\n                    else:\n                        msg = \"Missing mapping view for volume group id {0}\".\\\n                            format(volume_group['ID'])\n                        LOG.info(msg)\n\n            port_groups = self.client.get_all_port_groups()\n            for port_group in port_groups:\n                pg_views = self.client.get_all_associate_mapping_views(\n                    port_group['TYPE'], port_group['ID'])\n                for pg_view in pg_views:\n                    v_id = pg_view['ID']\n                    if v_id in view_keys:\n                        view_dict[v_id]['native_port_group_id'] =\\\n                            port_group['ID']\n                    else:\n                        msg = \"Missing mapping view for port group id {0}\".\\\n                            format(port_group['ID'])\n                        LOG.info(msg)\n\n            return list(view_dict.values())\n\n        except Exception:\n            LOG.error(\"Failed to get view metrics from OceanStor\")\n            raise\n"
  },
  {
    "path": "delfin/drivers/huawei/oceanstor/oid_mapper.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass OidMapper(object):\n    \"\"\"Functions/attributes for oid to alert info mapper\"\"\"\n\n    # Map to translate trap oid strings to oid names\n    OID_MAP = {\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.1\": \"hwIsmReportingAlarmNodeCode\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.2\": \"hwIsmReportingAlarmLocationInfo\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.3\": \"hwIsmReportingAlarmRestoreAdvice\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.4\": \"hwIsmReportingAlarmFaultTitle\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.5\": \"hwIsmReportingAlarmFaultType\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.6\": \"hwIsmReportingAlarmFaultLevel\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.7\": \"hwIsmReportingAlarmAlarmID\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.8\": \"hwIsmReportingAlarmFaultTime\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.9\": \"hwIsmReportingAlarmSerialNo\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.10\": \"hwIsmReportingAlarmAdditionInfo\",\n        \"1.3.6.1.4.1.2011.2.91.10.3.1.1.11\": \"hwIsmReportingAlarmFaultCategory\"\n    }\n\n    def __init__(self):\n        pass\n\n    @staticmethod\n    def map_oids(alert):\n        \"\"\"Translate oids using static map.\"\"\"\n        alert_model = dict()\n\n        for attr in alert:\n            # Remove the instance number at the end of oid before mapping\n            oid_str = attr.rsplit('.', 1)[0]\n            key = OidMapper.OID_MAP.get(oid_str, None)\n            alert_model[key] = alert[attr]\n\n        return alert_model\n"
  },
  {
    "path": "delfin/drivers/huawei/oceanstor/rest_client.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2016 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport json\n\nimport requests\nimport six\nimport urllib3\nfrom urllib3.exceptions import InsecureRequestWarning\nfrom oslo_log import log as logging\n\nfrom delfin.common import constants\nfrom delfin import cryptor\nfrom delfin import exception\nfrom delfin.drivers.huawei.oceanstor import consts\nfrom delfin.ssl_utils import HostNameIgnoreAdapter\nfrom delfin.i18n import _\n\nLOG = logging.getLogger(__name__)\n\n\ndef _get_timestamp_values(metric, value):\n    timestamp = int(metric['CMO_STATISTIC_TIMESTAMP']) * 1000\n    return {timestamp: value}\n\n\ndef _get_selection(selection):\n    selected_metrics = []\n    ids = ''\n    for key, value in consts.OCEANSTOR_METRICS.items():\n        if selection.get(key):\n            selected_metrics.append(key)\n            if ids:\n                ids = ids + ',' + value\n            else:\n                ids = value\n    return selected_metrics, ids\n\n\nclass RestClient(object):\n    \"\"\"Common class for Huawei OceanStor storage system.\"\"\"\n\n    def __init__(self, **kwargs):\n\n        rest_access = kwargs.get('rest')\n        if rest_access is None:\n            raise exception.InvalidInput('Input rest_access is missing')\n        self.rest_host = rest_access.get('host')\n        self.rest_port = rest_access.get('port')\n        self.rest_username = rest_access.get('username')\n        self.rest_password = rest_access.get('password')\n\n        # Lists of addresses to try, for authorization\n        address = 'https://%(host)s:%(port)s/deviceManager/rest/' % \\\n                  {'host': self.rest_host, 'port': str(self.rest_port)}\n        self.san_address = [address]\n        self.session = None\n        self.url = None\n        self.device_id = None\n        self.verify = None\n        urllib3.disable_warnings(InsecureRequestWarning)\n        self.reset_connection(**kwargs)\n\n    def reset_connection(self, **kwargs):\n        self.verify = kwargs.get('verify', False)\n        try:\n            self.login()\n        except Exception as ex:\n            msg = \"Failed to login to OceanStor: {}\".format(ex)\n            LOG.error(msg)\n            raise exception.InvalidCredential(msg)\n\n    def init_http_head(self):\n        self.url = None\n        self.session = requests.Session()\n        self.session.headers.update({\n            \"Connection\": \"keep-alive\",\n            \"Content-Type\": \"application/json\"})\n        if not self.verify:\n            self.session.verify = False\n        else:\n            LOG.debug(\"Enable certificate verification, verify: {0}\".format(\n                self.verify))\n            self.session.verify = self.verify\n            self.session.mount(\"https://\", HostNameIgnoreAdapter())\n\n        self.session.trust_env = False\n\n    def do_call(self, url, data, method,\n                calltimeout=consts.SOCKET_TIMEOUT, log_filter_flag=False):\n        \"\"\"Send requests to Huawei storage server.\n\n        Send HTTPS call, get response in JSON.\n        Convert response into Python Object and return it.\n        \"\"\"\n        if self.url:\n            url = self.url + url\n\n        kwargs = {'timeout': calltimeout}\n        if data:\n            kwargs['data'] = json.dumps(data)\n\n        if method in ('POST', 'PUT', 'GET', 'DELETE'):\n            func = getattr(self.session, method.lower())\n        else:\n            msg = _(\"Request method %s is invalid.\") % method\n            LOG.error(msg)\n            raise exception.StorageBackendException(msg)\n\n        try:\n            res = func(url, **kwargs)\n        except requests.exceptions.SSLError as e:\n            LOG.error('SSLError exception from server: %(url)s.'\n                      ' Error: %(err)s', {'url': url, 'err': e})\n            err_str = six.text_type(e)\n            if 'certificate verify failed' in err_str:\n                raise exception.SSLCertificateFailed()\n            else:\n                raise exception.SSLHandshakeFailed()\n        except Exception as err:\n            LOG.exception('Bad response from server: %(url)s.'\n                          ' Error: %(err)s', {'url': url, 'err': err})\n            return {\"error\": {\"code\": consts.ERROR_CONNECT_TO_SERVER,\n                              \"description\": \"Connect to server error.\"}}\n\n        try:\n            res.raise_for_status()\n        except requests.HTTPError as exc:\n            return {\"error\": {\"code\": exc.response.status_code,\n                              \"description\": six.text_type(exc)}}\n\n        res_json = res.json()\n        if not log_filter_flag:\n            LOG.info('\\n\\n\\n\\nRequest URL: %(url)s\\n\\n'\n                     'Call Method: %(method)s\\n\\n'\n                     'Request Data: %(data)s\\n\\n'\n                     'Response Data:%(res)s\\n\\n',\n                     {'url': url,\n                      'method': method,\n                      'data': data,\n                      'res': res_json})\n\n        return res_json\n\n    def login(self):\n        \"\"\"Login Huawei storage array.\"\"\"\n        device_id = None\n        for item_url in self.san_address:\n            url = item_url + \"xx/sessions\"\n            data = {\"username\": self.rest_username,\n                    \"password\": cryptor.decode(self.rest_password),\n                    \"scope\": \"0\"}\n            self.init_http_head()\n            result = self.do_call(url, data, 'POST',\n                                  calltimeout=consts.LOGIN_SOCKET_TIMEOUT,\n                                  log_filter_flag=True)\n\n            if (result['error']['code'] != 0) or (\"data\" not in result):\n                LOG.error(\"Login error. URL: %(url)s\\n\"\n                          \"Reason: %(reason)s.\",\n                          {\"url\": item_url, \"reason\": result})\n                continue\n\n            LOG.debug('Login success: %(url)s', {'url': item_url})\n            device_id = result['data']['deviceid']\n            self.device_id = device_id\n            self.url = item_url + device_id\n            self.session.headers['iBaseToken'] = result['data']['iBaseToken']\n            if (result['data']['accountstate']\n                    in (consts.PWD_EXPIRED, consts.PWD_RESET)):\n                self.logout()\n                msg = _(\"Password has expired or has been reset, \"\n                        \"please change the password.\")\n                LOG.error(msg)\n                raise exception.StorageBackendException(msg)\n            break\n\n        if device_id is None:\n            msg = _(\"Failed to login with all rest URLs.\")\n            LOG.error(msg)\n            raise exception.StorageBackendException(msg)\n\n        return device_id\n\n    def call(self, url, data=None, method=None, log_filter_flag=False):\n        \"\"\"Send requests to server.\n\n        If fail, try another RestURL.\n        \"\"\"\n        device_id = None\n        old_url = self.url\n        result = self.do_call(url, data, method,\n                              log_filter_flag=log_filter_flag)\n        error_code = result['error']['code']\n        if (error_code == consts.ERROR_CONNECT_TO_SERVER\n                or error_code == consts.ERROR_UNAUTHORIZED_TO_SERVER):\n            LOG.error(\"Can't open the recent url, relogin.\")\n            device_id = self.login()\n\n        if device_id is not None:\n            LOG.debug('Replace URL: \\n'\n                      'Old URL: %(old_url)s\\n,'\n                      'New URL: %(new_url)s\\n.',\n                      {'old_url': old_url,\n                       'new_url': self.url})\n            result = self.do_call(url, data, method,\n                                  log_filter_flag=log_filter_flag)\n            if result['error']['code'] in consts.RELOGIN_ERROR_PASS:\n                result['error']['code'] = 0\n        return result\n\n    def paginated_call(self, url, data=None, method=None,\n                       params=None, log_filter_flag=False,\n                       page_size=consts.QUERY_PAGE_SIZE):\n        if params:\n            url = \"{0}?{1}\".format(url, params)\n        else:\n            url = \"{0}?\".format(url)\n\n        result_list = []\n        start, end = 0, page_size\n        msg = _('Query resource volume error')\n        while True:\n            url_p = \"{0}range=[{1}-{2}]\".format(url, start, end)\n            start, end = end, end + page_size\n            result = self.call(url_p, data, method, log_filter_flag)\n            self._assert_rest_result(result, msg)\n\n            # Empty data if this is first page, OR last page got all data\n            if 'data' not in result:\n                break\n\n            result_list.extend(result['data'])\n            # Check if this is last page\n            if len(result['data']) < page_size:\n                break\n\n        return result_list\n\n    def logout(self):\n        \"\"\"Logout the session.\"\"\"\n        url = \"/sessions\"\n        if self.url:\n            result = self.do_call(url, None, \"DELETE\")\n            self._assert_rest_result(result, _('Logout session error.'))\n\n    def _assert_rest_result(self, result, err_str):\n        if result['error']['code'] != 0:\n            msg = (_('%(err)s\\nresult: %(res)s.') % {'err': err_str,\n                                                     'res': result})\n            LOG.error(msg)\n            raise exception.StorageBackendException(msg)\n\n    def _assert_data_in_result(self, result, msg):\n        if 'data' not in result:\n            err_msg = _('%s \"data\" is not in result.') % msg\n            LOG.error(err_msg)\n            raise exception.StorageBackendException(err_msg)\n\n    def get_storage(self):\n        url = \"/system/\"\n        result = self.call(url, method='GET', log_filter_flag=True)\n\n        msg = _('Get storage error.')\n        self._assert_rest_result(result, msg)\n        self._assert_data_in_result(result, msg)\n\n        return result['data']\n\n    def get_all_controllers(self):\n        url = \"/controller\"\n        result = self.call(url, method='GET', log_filter_flag=True)\n\n        msg = _('Get controller error.')\n        self._assert_rest_result(result, msg)\n        self._assert_data_in_result(result, msg)\n\n        return result['data']\n\n    def get_all_ports(self):\n        url = \"/fc_port\"\n        fc_ports = self.paginated_call(\n            url, None, \"GET\", log_filter_flag=True)\n\n        url = \"/fcoe_port\"\n        fcoe_ports = self.paginated_call(\n            url, None, \"GET\", log_filter_flag=True)\n\n        url = \"/eth_port\"\n        eth_ports = self.paginated_call(\n            url, None, \"GET\", log_filter_flag=True)\n\n        url = \"/pcie_port\"\n        pcie_ports = self.paginated_call(\n            url, None, \"GET\", log_filter_flag=True)\n\n        url = \"/bond_port\"\n        bond_ports = self.paginated_call(\n            url, None, \"GET\", log_filter_flag=True)\n\n        url = \"/sas_port\"\n        sas_ports = self.paginated_call(\n            url, None, \"GET\", log_filter_flag=True)\n\n        return fc_ports + fcoe_ports + eth_ports\\\n            + pcie_ports + bond_ports + sas_ports\n\n    def get_all_volumes(self):\n        url = \"/lun\"\n        return self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n\n    def get_all_disks(self):\n        url = \"/disk\"\n        return self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n\n    def get_all_pools(self):\n        url = \"/storagepool\"\n        return self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n\n    def get_all_filesystems(self):\n        url = \"/filesystem\"\n        return self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n\n    def get_all_qtrees(self, filesystems):\n        url = \"/quotatree\"\n        qt_list = []\n        for fs in filesystems:\n            params = \"PARENTTYPE=40&PARENTID={0}&\".format(fs['ID'])\n            qt = self.paginated_call(url, None, \"GET\",\n                                     params=params, log_filter_flag=True)\n            qt_list.extend(qt)\n        return qt_list\n\n    def get_all_filesystem_quotas(self, fs_id):\n        url = \"/FS_QUOTA\"\n        params = \"PARENTTYPE=40&PARENTID={0}&\".format(fs_id)\n        return self.paginated_call(url, None, \"GET\",\n                                   params=params, log_filter_flag=True)\n\n    def get_all_qtree_quotas(self, qt_id):\n        url = \"/FS_QUOTA\"\n        params = \"PARENTTYPE=16445&PARENTID={0}&\".format(qt_id)\n        return self.paginated_call(url, None, \"GET\",\n                                   params=params, log_filter_flag=True)\n\n    def get_all_shares(self):\n        url = \"/CIFSHARE\"\n        cifs = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n\n        url = \"/NFSHARE\"\n        nfs = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n\n        url = \"/FTP_SHARE_AUTH_CLIENT\"\n        ftps = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n\n        return cifs + nfs + ftps\n\n    def get_all_mapping_views(self):\n        url = \"/mappingview\"\n        view = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n        return view\n\n    def get_all_associate_resources(self, url, obj_type, obj_id):\n        params = \"ASSOCIATEOBJTYPE={0}&ASSOCIATEOBJID={1}&\".format(obj_type,\n                                                                   obj_id)\n        return self.paginated_call(url, None, \"GET\",\n                                   params=params, log_filter_flag=True)\n\n    def get_all_associate_mapping_views(self, obj_type, obj_id):\n        url = \"/mappingview/associate\"\n        return self.get_all_associate_resources(url, obj_type, obj_id)\n\n    def get_all_associate_hosts(self, obj_type, obj_id):\n        url = \"/host/associate\"\n        return self.get_all_associate_resources(url, obj_type, obj_id)\n\n    def get_all_associate_volumes(self, obj_type, obj_id):\n        url = \"/lun/associate\"\n        return self.get_all_associate_resources(url, obj_type, obj_id)\n\n    def get_all_associate_ports(self, obj_type, obj_id):\n        eth_ports = self.get_all_associate_resources(\n            \"/eth_port/associate\", obj_type, obj_id)\n        fc_ports = self.get_all_associate_resources(\n            \"/fc_port/associate\", obj_type, obj_id)\n        fcoe_ports = self.get_all_associate_resources(\n            \"/fcoe_port/associate\", obj_type, obj_id)\n\n        return eth_ports + fc_ports + fcoe_ports\n\n    def get_all_hosts(self):\n        url = \"/host\"\n        host = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n        return host\n\n    def get_all_initiators(self):\n        url = \"/fc_initiator\"\n        fc_i = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n        url = \"/iscsi_initiator\"\n        iscsi_i = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n        url = \"/ib_initiator\"\n        ib_i = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n        return fc_i + iscsi_i + ib_i\n\n    def get_all_host_groups(self):\n        url = \"/hostgroup\"\n        hostg = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n        return hostg\n\n    def get_all_port_groups(self):\n        url = \"/portgroup\"\n        portg = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n        return portg\n\n    def get_all_volume_groups(self):\n        url = \"/lungroup\"\n        lungroup = self.paginated_call(url, None, \"GET\", log_filter_flag=True)\n        return lungroup\n\n    def clear_alert(self, sequence_number):\n        url = \"/alarm/currentalarm?sequence=%s\" % sequence_number\n\n        # Result always contains error code and description\n        result = self.call(url, method=\"DELETE\", log_filter_flag=True)\n        if result['error']['code']:\n            msg = 'Clear alert failed with reason: %s.' \\\n                  % result['error']['description']\n            raise exception.InvalidResults(msg)\n        return result\n\n    def list_alerts(self):\n        url = \"/alarm/currentalarm\"\n        result_list = self.paginated_call(url,\n                                          None,\n                                          \"GET\",\n                                          log_filter_flag=True)\n        return result_list\n\n    def _get_performance_switch(self):\n        url = \"/performance_statistic_switch\"\n        result = self.call(url, method='GET', log_filter_flag=True)\n\n        msg = _('Get performance_statistic_switch failed.')\n        self._assert_rest_result(result, msg)\n        self._assert_data_in_result(result, msg)\n        return result['data']\n\n    def _set_performance_switch(self, value):\n        url = \"/performance_statistic_switch\"\n        data = {\"CMO_PERFORMANCE_SWITCH\": value}\n        result = self.call(url, data, method='PUT', log_filter_flag=True)\n\n        msg = _('Set performance_statistic_switch failed.')\n        self._assert_rest_result(result, msg)\n        self._assert_data_in_result(result, msg)\n        return result['data']\n\n    def _get_performance_strategy(self):\n        url = \"/performance_statistic_strategy\"\n        result = self.call(url, method='GET', log_filter_flag=True)\n\n        msg = _('Get performance_statistic_strategy failed.')\n        self._assert_rest_result(result, msg)\n        self._assert_data_in_result(result, msg)\n        return result['data']\n\n    def _set_performance_strategy(self, hist_enable=1, hist_duration=60,\n                                  auto_stop=0, duration=5, max_duration=0):\n        url = \"/performance_statistic_strategy\"\n        data = {\n            \"CMO_STATISTIC_ARCHIVE_SWITCH\": hist_enable,\n            \"CMO_STATISTIC_ARCHIVE_TIME\": hist_duration,\n            \"CMO_STATISTIC_AUTO_STOP\": auto_stop,\n            \"CMO_STATISTIC_INTERVAL\": duration,\n            \"CMO_STATISTIC_MAX_TIME\": max_duration\n        }\n        result = self.call(url, data, method='PUT', log_filter_flag=True)\n\n        msg = _('Set performance_statistic_strategy failed.')\n        self._assert_rest_result(result, msg)\n        self._assert_data_in_result(result, msg)\n        return result['data']\n\n    def _get_metrics(self, resource_type, resource_id, metrics_ids):\n        url = \"/performace_statistic/cur_statistic_data\"\n        params = \"CMO_STATISTIC_UUID={0}:{1}&CMO_STATISTIC_DATA_ID_LIST={2}&\"\\\n                 \"timeConversion=0&\"\\\n            .format(resource_type, resource_id, metrics_ids)\n        return self.paginated_call(url, None, \"GET\",\n                                   params=params, log_filter_flag=True)\n\n    def enable_metrics_collection(self):\n        return self._set_performance_switch('1')\n\n    def disable_metrics_collection(self):\n        return self._set_performance_switch('0')\n\n    def configure_metrics_collection(self):\n        self.disable_metrics_collection()\n        self._set_performance_strategy(hist_enable=1, hist_duration=300,\n                                       auto_stop=0, duration=60,\n                                       max_duration=0)\n        self.enable_metrics_collection()\n\n    def get_pool_metrics(self, storage_id, selection):\n        pools = self.get_all_pools()\n        pool_metrics = []\n\n        select_metrics, select_ids = _get_selection(selection)\n        for pool in pools:\n            try:\n                metrics = self._get_metrics(pool['TYPE'], pool['ID'],\n                                            select_ids)\n                for metric in metrics:\n                    data_list = metric['CMO_STATISTIC_DATA_LIST'].split(\",\")\n                    for index, key in enumerate(select_metrics):\n                        data = int(data_list[index])\n                        if key in consts.CONVERT_TO_MILLI_SECOND_LIST:\n                            data = data * 1000\n                        labels = {\n                            'storage_id': storage_id,\n                            'resource_type': 'pool',\n                            'resource_id': pool['ID'],\n                            'resource_name': pool['NAME'],\n                            'type': 'RAW',\n                            'unit': consts.POOL_CAP[key]['unit']\n                        }\n                        values = _get_timestamp_values(metric, data)\n                        m = constants.metric_struct(name=key, labels=labels,\n                                                    values=values)\n                        pool_metrics.append(m)\n            except Exception as ex:\n                msg = \"Failed to get metrics for pool:{0} error: {1}\" \\\n                    .format(pool['NAME'], ex)\n                LOG.error(msg)\n        return pool_metrics\n\n    def get_volume_metrics(self, storage_id, selection):\n        volumes = self.get_all_volumes()\n        volume_metrics = []\n\n        select_metrics, select_ids = _get_selection(selection)\n        for volume in volumes:\n            try:\n                metrics = self._get_metrics(volume['TYPE'], volume['ID'],\n                                            select_ids)\n                for metric in metrics:\n                    data_list = metric['CMO_STATISTIC_DATA_LIST'].split(\",\")\n                    for index, key in enumerate(select_metrics):\n                        data = int(data_list[index])\n                        if key in consts.CONVERT_TO_MILLI_SECOND_LIST:\n                            data = data * 1000\n                        labels = {\n                            'storage_id': storage_id,\n                            'resource_type': 'volume',\n                            'resource_id': volume['ID'],\n                            'resource_name': volume['NAME'],\n                            'type': 'RAW',\n                            'unit': consts.VOLUME_CAP[key]['unit']\n                        }\n                        values = _get_timestamp_values(metric, data)\n                        m = constants.metric_struct(name=key, labels=labels,\n                                                    values=values)\n                        volume_metrics.append(m)\n            except Exception as ex:\n                msg = \"Failed to get metrics for volume:{0} error: {1}\" \\\n                    .format(volume['NAME'], ex)\n                LOG.error(msg)\n\n        return volume_metrics\n\n    def get_controller_metrics(self, storage_id, selection):\n        controllers = self.get_all_controllers()\n        controller_metrics = []\n\n        select_metrics, select_ids = _get_selection(selection)\n        for controller in controllers:\n            try:\n                metrics = self._get_metrics(controller['TYPE'],\n                                            controller['ID'],\n                                            select_ids)\n                for metric in metrics:\n                    data_list = metric['CMO_STATISTIC_DATA_LIST'].split(\",\")\n                    for index, key in enumerate(select_metrics):\n                        data = int(data_list[index])\n                        if key in consts.CONVERT_TO_MILLI_SECOND_LIST:\n                            data = data * 1000\n                        labels = {\n                            'storage_id': storage_id,\n                            'resource_type': 'controller',\n                            'resource_id': controller['ID'],\n                            'resource_name': controller['NAME'],\n                            'type': 'RAW',\n                            'unit': consts.CONTROLLER_CAP[key]['unit']\n                        }\n                        values = _get_timestamp_values(metric, data)\n                        m = constants.metric_struct(name=key, labels=labels,\n                                                    values=values)\n                        controller_metrics.append(m)\n            except Exception as ex:\n                msg = \"Failed to get metrics for controller:{0} error: {1}\" \\\n                    .format(controller['NAME'], ex)\n                LOG.error(msg)\n\n        return controller_metrics\n\n    def get_port_metrics(self, storage_id, selection):\n        ports = self.get_all_ports()\n        port_metrics = []\n\n        select_metrics, select_ids = _get_selection(selection)\n        for port in ports:\n            # ETH_PORT collection not supported\n            if port['TYPE'] == 213:\n                continue\n            try:\n                metrics = self._get_metrics(port['TYPE'], port['ID'],\n                                            select_ids)\n                for metric in metrics:\n                    data_list = metric['CMO_STATISTIC_DATA_LIST'].split(\",\")\n                    for index, key in enumerate(select_metrics):\n                        data = int(data_list[index])\n                        if key in consts.CONVERT_TO_MILLI_SECOND_LIST:\n                            data = data * 1000\n                        labels = {\n                            'storage_id': storage_id,\n                            'resource_type': 'port',\n                            'resource_id': port['ID'],\n                            'resource_name': port['NAME'],\n                            'type': 'RAW',\n                            'unit': consts.PORT_CAP[key]['unit']\n                        }\n                        values = _get_timestamp_values(metric, data)\n                        m = constants.metric_struct(name=key, labels=labels,\n                                                    values=values)\n                        port_metrics.append(m)\n            except Exception as ex:\n                msg = \"Failed to get metrics for port:{0} error: {1}\" \\\n                    .format(port['NAME'], ex)\n                LOG.error(msg)\n\n        return port_metrics\n\n    def get_disk_metrics(self, storage_id, selection):\n        disks = self.get_all_disks()\n        disk_metrics = []\n\n        select_metrics, select_ids = _get_selection(selection)\n        for disk in disks:\n            try:\n                metrics = self._get_metrics(disk['TYPE'], disk['ID'],\n                                            select_ids)\n                for metric in metrics:\n                    data_list = metric['CMO_STATISTIC_DATA_LIST'].split(\",\")\n                    for index, key in enumerate(select_metrics):\n                        data = int(data_list[index])\n                        if key in consts.CONVERT_TO_MILLI_SECOND_LIST:\n                            data = data * 1000\n                        labels = {\n                            'storage_id': storage_id,\n                            'resource_type': 'disk',\n                            'resource_id': disk['ID'],\n                            'type': 'RAW',\n                            'unit': consts.DISK_CAP[key]['unit'],\n                            'resource_name':\n                                disk['MODEL'] + ':' + disk['SERIALNUMBER']\n                        }\n                        values = _get_timestamp_values(metric, data)\n                        m = constants.metric_struct(name=key, labels=labels,\n                                                    values=values)\n                        disk_metrics.append(m)\n            except Exception as ex:\n                msg = \"Failed to get metrics for disk:{0} error: {1}\"\\\n                    .format(disk['ID'], ex)\n                LOG.error(msg)\n\n        return disk_metrics\n"
  },
  {
    "path": "delfin/drivers/ibm/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/ibm/ds8k/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/ibm/ds8k/alert_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\n\nimport six\nfrom oslo_log import log\n\nfrom delfin import exception\nfrom delfin.common import alert_util\nfrom delfin.common import constants\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertHandler(object):\n\n    TIME_PATTERN = \"%Y-%m-%dT%H:%M:%S%z\"\n\n    ALERT_LEVEL_MAP = {'error': constants.Severity.CRITICAL,\n                       'warning': constants.Severity.WARNING,\n                       'info': constants.Severity.INFORMATIONAL\n                       }\n    SECONDS_TO_MS = 1000\n\n    def parse_queried_alerts(self, alert_model_list, alert_list, query_para):\n        alerts = alert_list.get('data', {}).get('events')\n        if alerts:\n            for alert in alerts:\n                try:\n                    occur_time = int(time.mktime(time.strptime(\n                        alert.get('time'),\n                        self.TIME_PATTERN))) * AlertHandler.SECONDS_TO_MS\n                    if not alert_util.is_alert_in_time_range(\n                            query_para, occur_time):\n                        continue\n\n                    alert_model = {}\n                    alert_model['alert_id'] = alert.get('type')\n                    alert_model['alert_name'] = alert.get('description')\n                    alert_model['severity'] = self.ALERT_LEVEL_MAP.get(\n                        alert.get('severity'),\n                        constants.Severity.INFORMATIONAL)\n                    alert_model['description'] = alert.get('description')\n                    alert_model['category'] = constants.Category.FAULT\n                    alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n                    alert_model['sequence_number'] = alert.get('id')\n                    alert_model['occur_time'] = occur_time\n                    alert_model['resource_type'] = \\\n                        constants.DEFAULT_RESOURCE_TYPE\n                    alert_model_list.append(alert_model)\n                except Exception as e:\n                    LOG.error(e)\n                    err_msg = \"Failed to build alert model as some\" \\\n                              \" attributes missing in queried alerts: %s\"\\\n                              % (six.text_type(e))\n                    raise exception.InvalidResults(err_msg)\n"
  },
  {
    "path": "delfin/drivers/ibm/ds8k/consts.py",
    "content": "HOST_PORT_URL = '/api/v1/host_ports'\nHOST_URL = '/api/v1/hosts'\n"
  },
  {
    "path": "delfin/drivers/ibm/ds8k/ds8k.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport six\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.ibm.ds8k import rest_handler, alert_handler, consts\n\nLOG = log.getLogger(__name__)\n\n\nclass DS8KDriver(driver.StorageDriver):\n\n    PORT_TYPE_MAP = {'FC-AL': constants.PortType.FC,\n                     'SCSI-FCP': constants.PortType.FC,\n                     'FICON': constants.PortType.FICON\n                     }\n    PORT_STATUS_MAP = {\n        'online': constants.PortHealthStatus.NORMAL,\n        'offline': constants.PortHealthStatus.ABNORMAL,\n        'fenced': constants.PortHealthStatus.UNKNOWN,\n        'quiescing': constants.PortHealthStatus.UNKNOWN\n    }\n    INITIATOR_STATUS_MAP = {'logged in': constants.InitiatorStatus.ONLINE,\n                            'logged out': constants.InitiatorStatus.OFFLINE,\n                            'unconfigured': constants.InitiatorStatus.UNKNOWN\n                            }\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.rest_handler = rest_handler.RestHandler(**kwargs)\n        self.rest_handler.login()\n\n    def reset_connection(self, context, **kwargs):\n        self.rest_handler.logout()\n        self.rest_handler.verify = kwargs.get('verify', False)\n        self.rest_handler.login()\n\n    def close_connection(self):\n        self.rest_handler.logout()\n\n    def get_storage(self, context):\n        try:\n            result = None\n            system_info = self.rest_handler.get_rest_info('/api/v1/systems')\n            if system_info:\n                system_data = system_info.get('data', {}).get('systems', [])\n                if system_data:\n                    for system in system_data:\n                        name = system.get('name')\n                        model = system.get('MTM')\n                        serial_number = system.get('sn')\n                        version = system.get('release')\n                        status = constants.StorageStatus.NORMAL\n                        if system.get('state') != 'online':\n                            status = constants.StorageStatus.ABNORMAL\n                        total = 0\n                        free = 0\n                        used = 0\n                        raw = 0\n                        if system.get('cap') != '' and \\\n                                system.get('cap') is not None:\n                            total = int(system.get('cap'))\n                        if system.get('capraw') != '' and \\\n                                system.get('capraw') is not None:\n                            raw = int(system.get('capraw'))\n                        if system.get('capalloc') != '' and \\\n                                system.get('capalloc') is not None:\n                            used = int(system.get('capalloc'))\n                        if system.get('capavail') != '' and \\\n                                system.get('capavail') is not None:\n                            free = int(system.get('capavail'))\n                        result = {\n                            'name': name,\n                            'vendor': 'IBM',\n                            'model': model,\n                            'status': status,\n                            'serial_number': serial_number,\n                            'firmware_version': version,\n                            'location': '',\n                            'total_capacity': total,\n                            'raw_capacity': raw,\n                            'used_capacity': used,\n                            'free_capacity': free\n                        }\n                        break\n                else:\n                    raise exception.StorageBackendException(\n                        \"ds8k storage system info is None\")\n            else:\n                raise exception.StorageBackendException(\n                    \"ds8k storage system info is None\")\n            return result\n        except Exception as err:\n            err_msg = \"Failed to get storage attributes from ds8k: %s\" % \\\n                      (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    def list_storage_pools(self, context):\n        pool_info = self.rest_handler.get_rest_info('/api/v1/pools')\n        pool_list = []\n        status = constants.StoragePoolStatus.NORMAL\n        if pool_info is not None:\n            pool_data = pool_info.get('data', {}).get('pools', [])\n            for pool in pool_data:\n                if pool.get('stgtype') == 'fb':\n                    pool_type = constants.StorageType.BLOCK\n                else:\n                    pool_type = constants.StorageType.FILE\n                if (int(pool.get('capalloc')) / int(pool.get('cap'))) * 100 > \\\n                        int(pool.get('threshold')):\n                    status = constants.StoragePoolStatus.ABNORMAL\n                pool_name = '%s_%s' % (pool.get('name'), pool.get('node'))\n                pool_result = {\n                    'name': pool_name,\n                    'storage_id': self.storage_id,\n                    'native_storage_pool_id': str(pool.get('id')),\n                    'status': status,\n                    'storage_type': pool_type,\n                    'total_capacity': int(pool.get('cap')),\n                    'used_capacity': int(pool.get('capalloc')),\n                    'free_capacity': int(pool.get('capavail'))\n                }\n                pool_list.append(pool_result)\n        return pool_list\n\n    def list_volumes(self, context):\n        volume_list = []\n        pool_list = self.rest_handler.get_rest_info('/api/v1/pools')\n        if pool_list is not None:\n            pool_data = pool_list.get('data', {}).get('pools', [])\n            for pool in pool_data:\n                url = '/api/v1/pools/%s/volumes' % pool.get('id')\n                volumes = self.rest_handler.get_rest_info(url)\n                if volumes is not None:\n                    vol_entries = volumes.get('data', {}).get('volumes', [])\n                    for volume in vol_entries:\n                        total = volume.get('cap')\n                        used = volume.get('capalloc')\n                        vol_type = constants.VolumeType.THICK if \\\n                            volume.get('stgtype') == 'fb' else \\\n                            constants.VolumeType.THIN\n                        status = constants.StorageStatus.NORMAL if \\\n                            volume.get('state') == 'normal' else\\\n                            constants.StorageStatus.ABNORMAL\n                        vol_name = '%s_%s' % (volume.get('name'),\n                                              volume.get('id'))\n                        vol = {\n                            'name': vol_name,\n                            'storage_id': self.storage_id,\n                            'description': '',\n                            'status': status,\n                            'native_volume_id': str(volume.get('id')),\n                            'native_storage_pool_id':\n                                volume.get('pool').get('id'),\n                            'wwn': '',\n                            'type': vol_type,\n                            'total_capacity': int(total),\n                            'used_capacity': int(used),\n                            'free_capacity': int(total) - int(used)\n                        }\n                        volume_list.append(vol)\n        return volume_list\n\n    def list_alerts(self, context, query_para=None):\n        alert_model_list = []\n        alert_list = self.rest_handler.get_rest_info(\n            '/api/v1/events?severity=warning,error')\n        alert_handler.AlertHandler() \\\n            .parse_queried_alerts(alert_model_list, alert_list, query_para)\n        return alert_model_list\n\n    @staticmethod\n    def division_port_wwn(original_wwn):\n        result_wwn = None\n        if not original_wwn:\n            return result_wwn\n        is_first = True\n        for i in range(0, len(original_wwn), 2):\n            if is_first is True:\n                result_wwn = '%s' % (original_wwn[i:i + 2])\n                is_first = False\n            else:\n                result_wwn = '%s:%s' % (result_wwn, original_wwn[i:i + 2])\n        return result_wwn\n\n    def list_ports(self, context):\n        port_list = []\n        port_info = self.rest_handler.get_rest_info('/api/v1/ioports')\n        if port_info:\n            port_data = port_info.get('data', {}).get('ioports', [])\n            for port in port_data:\n                status = DS8KDriver.PORT_STATUS_MAP.get(\n                    port.get('state'), constants.PortHealthStatus.UNKNOWN)\n                speed = None\n                connection_status = constants.PortConnectionStatus.CONNECTED\\\n                    if status == constants.PortHealthStatus.NORMAL \\\n                    else constants.PortConnectionStatus.DISCONNECTED\n                if port.get('speed'):\n                    speed = int(port.get('speed').split(' ')[0]) * units.G\n                port_result = {\n                    'name': port.get('loc'),\n                    'storage_id': self.storage_id,\n                    'native_port_id': port.get('id'),\n                    'location': port.get('loc'),\n                    'connection_status': connection_status,\n                    'health_status': status,\n                    'type': DS8KDriver.PORT_TYPE_MAP.get(\n                        port.get('protocol'), constants.PortType.OTHER),\n                    'logical_type': '',\n                    'speed': speed,\n                    'max_speed': speed,\n                    'wwn': DS8KDriver.division_port_wwn(port.get('wwpn'))\n                }\n                port_list.append(port_result)\n        return port_list\n\n    def list_controllers(self, context):\n        controller_list = []\n        controller_info = self.rest_handler.get_rest_info('/api/v1/nodes')\n        if controller_info:\n            contrl_data = controller_info.get('data', {}).get('nodes', [])\n            for contrl in contrl_data:\n                status = constants.ControllerStatus.NORMAL if \\\n                    contrl.get('state') == 'online' else \\\n                    constants.ControllerStatus.UNKNOWN\n                controller_result = {\n                    'name': contrl.get('id'),\n                    'storage_id': self.storage_id,\n                    'native_controller_id': contrl.get('id'),\n                    'status': status\n                }\n                controller_list.append(controller_result)\n        return controller_list\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        pass\n\n    def clear_alert(self, context, alert):\n        pass\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}:{port}'\n\n    def list_storage_hosts(self, context):\n        try:\n            host_list = []\n            hosts = self.rest_handler.get_rest_info(consts.HOST_URL)\n            if not hosts:\n                return host_list\n            host_entries = hosts.get('data', {}).get('hosts', [])\n            for host in host_entries:\n                status = constants.HostStatus.NORMAL if \\\n                    host.get('state') == 'online' else \\\n                    constants.HostStatus.OFFLINE\n                os_type = constants.HostOSTypes.VMWARE_ESX if \\\n                    host.get('hosttype') == 'VMware' else \\\n                    constants.HostOSTypes.UNKNOWN\n                host_result = {\n                    \"name\": host.get('name'),\n                    \"storage_id\": self.storage_id,\n                    \"native_storage_host_id\": host.get('name'),\n                    \"os_type\": os_type,\n                    \"status\": status\n                }\n                host_list.append(host_result)\n            return host_list\n        except Exception as e:\n            LOG.error(\"Failed to get hosts from ds8k\")\n            raise e\n\n    def list_masking_views(self, context):\n        try:\n            view_list = []\n            hosts = self.rest_handler.get_rest_info(consts.HOST_URL)\n            if not hosts:\n                return view_list\n            host_entries = hosts.get('data', {}).get('hosts', [])\n            for host in host_entries:\n                view_url = '%s/%s/mappings' % (consts.HOST_URL,\n                                               host.get('name'))\n                views = self.rest_handler.get_rest_info(view_url)\n                if not views:\n                    continue\n                view_entries = views.get('data', {}).get('mappings', [])\n                for view in view_entries:\n                    view_id = '%s_%s' % (view.get('lunid'), host.get('name'))\n                    view_result = {\n                        \"name\": view_id,\n                        \"native_storage_host_id\": host.get('name'),\n                        \"storage_id\": self.storage_id,\n                        \"native_volume_id\": view.get('volume', {}).get('id'),\n                        \"native_masking_view_id\": view_id,\n                    }\n                    view_list.append(view_result)\n            return view_list\n        except Exception as e:\n            LOG.error(\"Failed to get views from ds8k\")\n            raise e\n\n    def list_storage_host_initiators(self, context):\n        try:\n            initiator_list = []\n            host_ports = self.rest_handler.get_rest_info(consts.HOST_PORT_URL)\n            if not host_ports:\n                return initiator_list\n            port_entries = host_ports.get('data', {}).get('host_ports', [])\n            for port in port_entries:\n                status = DS8KDriver.INITIATOR_STATUS_MAP.get(port.get('state'))\n                init_result = {\n                    \"name\": port.get('wwpn'),\n                    \"storage_id\": self.storage_id,\n                    \"native_storage_host_initiator_id\": port.get('wwpn'),\n                    \"wwn\": port.get('wwpn'),\n                    \"status\": status,\n                    \"type\": constants.InitiatorType.UNKNOWN,\n                    \"native_storage_host_id\": port.get('host', {}).get('name')\n                }\n                initiator_list.append(init_result)\n            return initiator_list\n        except Exception as e:\n            LOG.error(\"Failed to get initiators from ds8k\")\n            raise e\n"
  },
  {
    "path": "delfin/drivers/ibm/ds8k/rest_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport threading\n\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import cryptor\nfrom delfin import exception\nfrom delfin.drivers.utils.rest_client import RestClient\n\nLOG = logging.getLogger(__name__)\n\n\nclass RestHandler(RestClient):\n    REST_TOKEN_URL = '/api/v1/tokens'\n\n    def __init__(self, **kwargs):\n        self.session_lock = threading.Lock()\n        super(RestHandler, self).__init__(**kwargs)\n\n    def call_with_token(self, url, data, method):\n        auth_key = None\n        if self.session:\n            auth_key = self.session.headers.get('X-Auth-Token', None)\n            if auth_key:\n                self.session.headers['X-Auth-Token'] \\\n                    = cryptor.decode(auth_key)\n        res = self.do_call(url, data, method)\n        if auth_key:\n            self.session.headers['X-Auth-Token'] = auth_key\n        return res\n\n    def call(self, url, data=None, method=None):\n        try:\n            res = self.call_with_token(url, data, method)\n            if res.status_code == 401:\n                LOG.error(\"Failed to get token,status_code:%s,error_mesg:%s\" %\n                          (res.status_code, res.text))\n                self.login()\n                res = self.call_with_token(url, data, method)\n            elif res.status_code == 503:\n                raise exception.InvalidResults(res.text)\n            return res\n        except Exception as e:\n            LOG.error(\"Method:%s,url:%s failed: %s\" % (method, url,\n                                                       six.text_type(e)))\n            raise e\n\n    def get_rest_info(self, url, data=None, method='GET'):\n        result_json = None\n        res = self.call(url, data, method)\n        if res.status_code == 200:\n            result_json = res.json()\n        return result_json\n\n    def login(self):\n        try:\n            data = {\n                'request': {\n                    'params': {\n                        \"username\": self.rest_username,\n                        \"password\": cryptor.decode(self.rest_password)\n                    }\n                }\n            }\n            with self.session_lock:\n                if self.session is None:\n                    self.init_http_head()\n                res = self.call_with_token(\n                    RestHandler.REST_TOKEN_URL, data, 'POST')\n                if res.status_code == 200:\n                    result = res.json()\n                    self.session.headers['X-Auth-Token'] = \\\n                        cryptor.encode(result.get('token').get('token'))\n                else:\n                    LOG.error(\"Login error. URL: %(url)s，Reason: %(reason)s.\",\n                              {\"url\": RestHandler.REST_TOKEN_URL,\n                               \"reason\": res.text})\n                    if 'Authentication has failed' in res.text:\n                        raise exception.InvalidUsernameOrPassword()\n                    else:\n                        raise exception.StorageBackendException(res.text)\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise e\n        finally:\n            data = None\n\n    def logout(self):\n        try:\n            if self.session:\n                self.session.close()\n        except Exception as e:\n            err_msg = \"Logout error: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n"
  },
  {
    "path": "delfin/drivers/ibm/storwize_svc/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/ibm/storwize_svc/consts.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2016 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nLOCAL_FILE_PATH = '/delfin/drivers/utils/performance_file/svc/'\nREMOTE_FILE_PATH = '/dumps/iostats/'\n\nIOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Input/output operations per second\"\n}\nREAD_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Read input/output operations per second\"\n}\nWRITE_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Write input/output operations per second\"\n}\nTHROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data is \"\n                   \"successfully transferred in MB/s\"\n}\nREAD_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data read is \"\n                   \"successfully transferred in MB/s\"\n}\nWRITE_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data write is \"\n                   \"successfully transferred in MB/s\"\n}\nRESPONSE_TIME_DESCRIPTION = {\n    \"unit\": \"ms\",\n    \"description\": \"Average time taken for an IO \"\n                   \"operation in ms\"\n}\nCACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of io that are cache hits\"\n}\nREAD_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of read ops that are cache hits\"\n}\nWRITE_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of write ops that are cache hits\"\n}\nIO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of IO requests in KB\"\n}\nREAD_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of read IO requests in KB\"\n}\nWRITE_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of write IO requests in KB\"\n}\nCPU_USAGE_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of CPU usage\"\n}\nMEMORY_USAGE_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of DISK memory usage in percentage\"\n}\nSERVICE_TIME = {\n    \"unit\": 'ms',\n    \"description\": \"Service time of the resource in ms\"\n}\nVOLUME_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"cacheHitRatio\": CACHE_HIT_RATIO_DESCRIPTION,\n    \"readCacheHitRatio\": READ_CACHE_HIT_RATIO_DESCRIPTION,\n    \"writeCacheHitRatio\": WRITE_CACHE_HIT_RATIO_DESCRIPTION,\n    \"ioSize\": IO_SIZE_DESCRIPTION,\n    \"readIoSize\": READ_IO_SIZE_DESCRIPTION,\n    \"writeIoSize\": WRITE_IO_SIZE_DESCRIPTION\n}\nPORT_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION\n}\nDISK_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nCONTROLLER_CAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\n"
  },
  {
    "path": "delfin/drivers/ibm/storwize_svc/ssh_handler.py",
    "content": "# Copyright 2020 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport os\nimport re\nimport time\nfrom itertools import islice\n\nimport paramiko\nimport six\nfrom oslo_log import log as logging\nfrom oslo_utils import units\n\nfrom delfin import exception, utils\nfrom delfin.common import constants, alert_util\nfrom delfin.drivers.ibm.storwize_svc import consts\nfrom delfin.drivers.utils.ssh_client import SSHPool\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = logging.getLogger(__name__)\n\n\nclass SSHHandler(object):\n    OID_ERR_ID = '1.3.6.1.4.1.2.6.190.4.3'\n    OID_SEQ_NUMBER = '1.3.6.1.4.1.2.6.190.4.9'\n    OID_LAST_TIME = '1.3.6.1.4.1.2.6.190.4.10'\n    OID_OBJ_TYPE = '1.3.6.1.4.1.2.6.190.4.11'\n    OID_OBJ_NAME = '1.3.6.1.4.1.2.6.190.4.17'\n    OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'\n\n    TRAP_SEVERITY_MAP = {\n        '1.3.6.1.4.1.2.6.190.1': constants.Severity.CRITICAL,\n        '1.3.6.1.4.1.2.6.190.2': constants.Severity.WARNING,\n        '1.3.6.1.4.1.2.6.190.3': constants.Severity.INFORMATIONAL,\n    }\n\n    SEVERITY_MAP = {\"warning\": \"Warning\",\n                    \"informational\": \"Informational\",\n                    \"error\": \"Major\"\n                    }\n    CONTRL_STATUS_MAP = {\"online\": constants.ControllerStatus.NORMAL,\n                         \"offline\": constants.ControllerStatus.OFFLINE,\n                         \"service\": constants.ControllerStatus.NORMAL,\n                         \"flushing\": constants.ControllerStatus.UNKNOWN,\n                         \"pending\": constants.ControllerStatus.UNKNOWN,\n                         \"adding\": constants.ControllerStatus.UNKNOWN,\n                         \"deleting\": constants.ControllerStatus.UNKNOWN\n                         }\n\n    DISK_PHYSICAL_TYPE = {\n        'fc': constants.DiskPhysicalType.FC,\n        'sas_direct': constants.DiskPhysicalType.SAS\n    }\n    DISK_STATUS_MAP = {\n        'online': constants.DiskStatus.NORMAL,\n        'offline': constants.DiskStatus.OFFLINE,\n        'excluded': constants.DiskStatus.ABNORMAL,\n        'degraded_paths': constants.DiskStatus.DEGRADED,\n        'degraded_ports': constants.DiskStatus.DEGRADED,\n        'degraded': constants.DiskStatus.DEGRADED\n    }\n    VOLUME_PERF_METRICS = {\n        'readIops': 'ro',\n        'writeIops': 'wo',\n        'readThroughput': 'rb',\n        'writeThroughput': 'wb',\n        'readIoSize': 'rb',\n        'writeIoSize': 'wb',\n        'responseTime': 'res_time',\n        'throughput': 'tb',\n        'iops': 'to',\n        'ioSize': 'tb',\n        'cacheHitRatio': 'hrt',\n        'readCacheHitRatio': 'rhr',\n        'writeCacheHitRatio': 'whr'\n    }\n    DISK_PERF_METRICS = {\n        'readIops': 'ro',\n        'writeIops': 'wo',\n        'readThroughput': 'rb',\n        'writeThroughput': 'wb',\n        'responseTime': 'res_time',\n        'throughput': 'tb',\n        'iops': 'to'\n    }\n    CONTROLLER_PERF_METRICS = {\n        'readIops': 'ro',\n        'writeIops': 'wo',\n        'readThroughput': 'rb',\n        'writeThroughput': 'wb',\n        'responseTime': 'res_time',\n        'throughput': 'tb',\n        'iops': 'to'\n    }\n    PORT_PERF_METRICS = {\n        'readIops': 'ro',\n        'writeIops': 'wo',\n        'readThroughput': 'rb',\n        'writeThroughput': 'wb',\n        'throughput': 'tb',\n        'responseTime': 'res_time',\n        'iops': 'to'\n    }\n    TARGET_RESOURCE_RELATION = {\n        constants.ResourceType.DISK: 'mdsk',\n        constants.ResourceType.VOLUME: 'vdsk',\n        constants.ResourceType.PORT: 'port',\n        constants.ResourceType.CONTROLLER: 'node'\n    }\n    RESOURCE_PERF_MAP = {\n        constants.ResourceType.DISK: DISK_PERF_METRICS,\n        constants.ResourceType.VOLUME: VOLUME_PERF_METRICS,\n        constants.ResourceType.PORT: PORT_PERF_METRICS,\n        constants.ResourceType.CONTROLLER: CONTROLLER_PERF_METRICS\n    }\n    SECONDS_TO_MS = 1000\n    ALERT_NOT_FOUND_CODE = 'CMMVC8275E'\n    BLOCK_SIZE = 512\n    BYTES_TO_BIT = 8\n    OS_TYPE_MAP = {'generic': constants.HostOSTypes.UNKNOWN,\n                   'hpux': constants.HostOSTypes.HP_UX,\n                   'openvms': constants.HostOSTypes.OPEN_VMS,\n                   'tpgs': constants.HostOSTypes.UNKNOWN,\n                   'vvol': constants.HostOSTypes.UNKNOWN\n                   }\n    INITIATOR_STATUS_MAP = {'active': constants.InitiatorStatus.ONLINE,\n                            'offline': constants.InitiatorStatus.OFFLINE,\n                            'inactive': constants.InitiatorStatus.ONLINE\n                            }\n    HOST_STATUS_MAP = {'online': constants.HostStatus.NORMAL,\n                       'offline': constants.HostStatus.OFFLINE,\n                       'degraded': constants.HostStatus.DEGRADED,\n                       'mask': constants.HostStatus.NORMAL,\n                       }\n\n    def __init__(self, **kwargs):\n        self.ssh_pool = SSHPool(**kwargs)\n\n    @staticmethod\n    def handle_split(split_str, split_char, arr_number):\n        split_value = ''\n        if split_str is not None and split_str != '':\n            tmp_value = split_str.split(split_char, 1)\n            if arr_number == 1 and len(tmp_value) > 1:\n                split_value = tmp_value[arr_number].strip()\n            elif arr_number == 0:\n                split_value = tmp_value[arr_number].strip()\n        return split_value\n\n    @staticmethod\n    def parse_alert(alert):\n        try:\n            alert_model = dict()\n            alert_name = SSHHandler.handle_split(alert.get(\n                SSHHandler.OID_ERR_ID), ':', 1)\n            error_info = SSHHandler.handle_split(alert.get(\n                SSHHandler.OID_ERR_ID), ':', 0)\n            alert_id = SSHHandler.handle_split(error_info, '=', 1)\n            severity = SSHHandler.TRAP_SEVERITY_MAP.get(\n                alert.get(SSHHandler.OID_SEVERITY),\n                constants.Severity.INFORMATIONAL\n            )\n            alert_model['alert_id'] = str(alert_id)\n            alert_model['alert_name'] = alert_name\n            alert_model['severity'] = severity\n            alert_model['category'] = constants.Category.FAULT\n            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n            alert_model['sequence_number'] = SSHHandler. \\\n                handle_split(alert.get(SSHHandler.OID_SEQ_NUMBER), '=', 1)\n            timestamp = SSHHandler. \\\n                handle_split(alert.get(SSHHandler.OID_LAST_TIME), '=', 1)\n            time_type = '%a %b %d %H:%M:%S %Y'\n            occur_time = int(time.mktime(time.strptime(\n                timestamp,\n                time_type)))\n            alert_model['occur_time'] = int(occur_time * SSHHandler.\n                                            SECONDS_TO_MS)\n            alert_model['description'] = alert_name\n            alert_model['resource_type'] = SSHHandler.handle_split(\n                alert.get(SSHHandler.OID_OBJ_TYPE), '=', 1)\n            alert_model['location'] = SSHHandler.handle_split(alert.get(\n                SSHHandler.OID_OBJ_NAME), '=', 1)\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (\"Failed to build alert model as some attributes missing \"\n                   \"in alert message:%s.\") % (six.text_type(e))\n            raise exception.InvalidResults(msg)\n\n    def login(self):\n        try:\n            with self.ssh_pool.item() as ssh:\n                result = SSHHandler.do_exec('lssystem', ssh)\n                if 'is not a recognized command' in result:\n                    raise exception.InvalidIpOrPort()\n        except Exception as e:\n            LOG.error(\"Failed to login ibm storwize_svc %s\" %\n                      (six.text_type(e)))\n            raise e\n\n    @staticmethod\n    def do_exec(command_str, ssh):\n        \"\"\"Execute command\"\"\"\n        try:\n            utils.check_ssh_injection(command_str.split())\n            if command_str is not None and ssh is not None:\n                stdin, stdout, stderr = ssh.exec_command(command_str)\n                res, err = stdout.read(), stderr.read()\n                re = res if res else err\n                result = re.decode()\n        except paramiko.AuthenticationException as ae:\n            LOG.error('doexec Authentication error:{}'.format(ae))\n            raise exception.InvalidUsernameOrPassword()\n        except Exception as e:\n            err = six.text_type(e)\n            LOG.error('doexec InvalidUsernameOrPassword error')\n            if 'timed out' in err:\n                raise exception.SSHConnectTimeout()\n            elif 'No authentication methods available' in err \\\n                    or 'Authentication failed' in err:\n                raise exception.InvalidUsernameOrPassword()\n            elif 'not a valid RSA private key file' in err:\n                raise exception.InvalidPrivateKey()\n            else:\n                raise exception.SSHException(err)\n        return result\n\n    def exec_ssh_command(self, command):\n        try:\n            with self.ssh_pool.item() as ssh:\n                ssh_info = SSHHandler.do_exec(command, ssh)\n            return ssh_info\n        except Exception as e:\n            msg = \"Failed to ssh ibm storwize_svc %s: %s\" % \\\n                  (command, six.text_type(e))\n            raise exception.SSHException(msg)\n\n    def change_capacity_to_bytes(self, unit):\n        unit = unit.upper()\n        if unit == 'TB':\n            result = units.Ti\n        elif unit == 'GB':\n            result = units.Gi\n        elif unit == 'MB':\n            result = units.Mi\n        elif unit == 'KB':\n            result = units.Ki\n        else:\n            result = 1\n        return int(result)\n\n    def parse_string(self, value):\n        capacity = 0\n        if value:\n            if value.isdigit():\n                capacity = float(value)\n            else:\n                unit = value[-2:]\n                capacity = float(value[:-2]) * int(\n                    self.change_capacity_to_bytes(unit))\n        return capacity\n\n    def get_storage(self):\n        try:\n            system_info = self.exec_ssh_command('lssystem')\n            storage_map = {}\n            self.handle_detail(system_info, storage_map, split=' ')\n            serial_number = storage_map.get('id')\n            status = 'normal' if storage_map.get('statistics_status') == 'on' \\\n                else 'offline'\n            location = storage_map.get('location')\n            free_capacity = self.parse_string(storage_map.get(\n                'total_free_space'))\n            used_capacity = self.parse_string(storage_map.get(\n                'total_used_capacity'))\n            raw_capacity = self.parse_string(storage_map.get(\n                'total_mdisk_capacity'))\n            subscribed_capacity = self.parse_string(storage_map.get(\n                'virtual_capacity'))\n            total_capacity = int(free_capacity + used_capacity)\n            if total_capacity > raw_capacity:\n                raw_capacity = total_capacity\n            firmware_version = ''\n            if storage_map.get('code_level') is not None:\n                firmware_version = storage_map.get('code_level').split(' ')[0]\n            s = {\n                'name': storage_map.get('name'),\n                'vendor': 'IBM',\n                'model': storage_map.get('product_name'),\n                'status': status,\n                'serial_number': serial_number,\n                'firmware_version': firmware_version,\n                'location': location,\n                'total_capacity': total_capacity,\n                'raw_capacity': int(raw_capacity),\n                'subscribed_capacity': int(subscribed_capacity),\n                'used_capacity': int(used_capacity),\n                'free_capacity': int(free_capacity)\n            }\n            return s\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def handle_detail(self, deltail_info, detail_map, split):\n        detail_arr = deltail_info.split('\\n')\n        for detail in detail_arr:\n            if detail is not None and detail != '':\n                strinfo = detail.split(split, 1)\n                key = strinfo[0]\n                value = ''\n                if len(strinfo) > 1:\n                    value = strinfo[1]\n                detail_map[key] = value\n\n    def list_storage_pools(self, storage_id):\n        try:\n            pool_list = []\n            pool_info = self.exec_ssh_command('lsmdiskgrp')\n            pool_res = pool_info.split('\\n')\n            for i in range(1, len(pool_res)):\n                if pool_res[i] is None or pool_res[i] == '':\n                    continue\n\n                pool_str = ' '.join(pool_res[i].split())\n                strinfo = pool_str.split(' ')\n                detail_command = 'lsmdiskgrp %s' % strinfo[0]\n                deltail_info = self.exec_ssh_command(detail_command)\n                pool_map = {}\n                self.handle_detail(deltail_info, pool_map, split=' ')\n                status = 'normal' if pool_map.get('status') == 'online' \\\n                    else 'offline'\n                total_cap = self.parse_string(pool_map.get('capacity'))\n                free_cap = self.parse_string(pool_map.get('free_capacity'))\n                used_cap = self.parse_string(pool_map.get('used_capacity'))\n                subscribed_capacity = self.parse_string(pool_map.get(\n                    'virtual_capacity'))\n                p = {\n                    'name': pool_map.get('name'),\n                    'storage_id': storage_id,\n                    'native_storage_pool_id': pool_map.get('id'),\n                    'description': '',\n                    'status': status,\n                    'storage_type': constants.StorageType.BLOCK,\n                    'subscribed_capacity': int(subscribed_capacity),\n                    'total_capacity': int(total_cap),\n                    'used_capacity': int(used_cap),\n                    'free_capacity': int(free_cap)\n                }\n                pool_list.append(p)\n\n            return pool_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage pool: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage pool: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_volumes(self, storage_id):\n        try:\n            volume_list = []\n            volume_info = self.exec_ssh_command('lsvdisk')\n            volume_res = volume_info.split('\\n')\n            for i in range(1, len(volume_res)):\n                if volume_res[i] is None or volume_res[i] == '':\n                    continue\n                volume_str = ' '.join(volume_res[i].split())\n                strinfo = volume_str.split(' ')\n                volume_id = strinfo[0]\n                detail_command = 'lsvdisk -delim : %s' % volume_id\n                deltail_info = self.exec_ssh_command(detail_command)\n                volume_map = {}\n                self.handle_detail(deltail_info, volume_map, split=':')\n                status = 'normal' if volume_map.get('status') == 'online' \\\n                    else 'offline'\n                volume_type = 'thin' if volume_map.get('se_copy') == 'yes' \\\n                    else 'thick'\n                total_capacity = self.parse_string(volume_map.get('capacity'))\n                free_capacity = self.parse_string(volume_map.\n                                                  get('free_capacity'))\n                used_capacity = self.parse_string(volume_map.\n                                                  get('used_capacity'))\n                compressed = True\n                deduplicated = True\n                if volume_map.get('compressed_copy') == 'no':\n                    compressed = False\n                if volume_map.get('deduplicated_copy') == 'no':\n                    deduplicated = False\n\n                v = {\n                    'name': volume_map.get('name'),\n                    'storage_id': storage_id,\n                    'description': '',\n                    'status': status,\n                    'native_volume_id': str(volume_map.get('id')),\n                    'native_storage_pool_id': volume_map.get('mdisk_grp_id'),\n                    'wwn': str(volume_map.get('vdisk_UID')),\n                    'type': volume_type,\n                    'total_capacity': int(total_capacity),\n                    'used_capacity': int(used_capacity),\n                    'free_capacity': int(free_capacity),\n                    'compressed': compressed,\n                    'deduplicated': deduplicated\n                }\n                volume_list.append(v)\n            return volume_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage volume: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage volume: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_alerts(self, query_para):\n        try:\n            alert_list = []\n            alert_info = self.exec_ssh_command('lseventlog -monitoring yes '\n                                               '-message no')\n            alert_res = alert_info.split('\\n')\n            for i in range(1, len(alert_res)):\n                if alert_res[i] is None or alert_res[i] == '':\n                    continue\n                alert_str = ' '.join(alert_res[i].split())\n                strinfo = alert_str.split(' ', 1)\n                detail_command = 'lseventlog %s' % strinfo[0]\n                deltail_info = self.exec_ssh_command(detail_command)\n                alert_map = {}\n                self.handle_detail(deltail_info, alert_map, split=' ')\n                occur_time = int(alert_map.get('last_timestamp_epoch')) * \\\n                    self.SECONDS_TO_MS\n                if not alert_util.is_alert_in_time_range(query_para,\n                                                         occur_time):\n                    continue\n                alert_name = alert_map.get('event_id_text', '')\n                event_id = alert_map.get('event_id')\n                location = alert_map.get('object_name', '')\n                resource_type = alert_map.get('object_type', '')\n                severity = self.SEVERITY_MAP.get(alert_map.\n                                                 get('notification_type'))\n                if severity == 'Informational' or severity is None:\n                    continue\n                alert_model = {\n                    'alert_id': event_id,\n                    'alert_name': alert_name,\n                    'severity': severity,\n                    'category': constants.Category.FAULT,\n                    'type': 'EquipmentAlarm',\n                    'sequence_number': alert_map.get('sequence_number'),\n                    'occur_time': occur_time,\n                    'description': alert_name,\n                    'resource_type': resource_type,\n                    'location': location\n                }\n                alert_list.append(alert_model)\n\n            return alert_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage alert: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage alert: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def fix_alert(self, alert):\n        command_line = 'cheventlog -fix %s' % alert\n        result = self.exec_ssh_command(command_line)\n        if result:\n            if self.ALERT_NOT_FOUND_CODE not in result:\n                raise exception.InvalidResults(six.text_type(result))\n            LOG.warning(\"Alert %s doesn't exist.\", alert)\n\n    def list_controllers(self, storage_id):\n        try:\n            controller_list = []\n            controller_cmd = 'lsnode'\n            control_info = self.exec_ssh_command(controller_cmd)\n            if 'command not found' in control_info:\n                controller_cmd = 'lsnodecanister'\n                control_info = self.exec_ssh_command(controller_cmd)\n            control_res = control_info.split('\\n')\n            for i in range(1, len(control_res)):\n                if control_res[i] is None or control_res[i] == '':\n                    continue\n                control_str = ' '.join(control_res[i].split())\n                str_info = control_str.split(' ')\n                control_id = str_info[0]\n                detail_command = '%s %s' % (controller_cmd, control_id)\n                deltail_info = self.exec_ssh_command(detail_command)\n                control_map = {}\n                self.handle_detail(deltail_info, control_map, split=' ')\n                cpu_map = {}\n                cpu_cmd = 'lsnodehw -delim , %s' % control_id\n                cpu_info = self.exec_ssh_command(cpu_cmd)\n                if 'command not found' in cpu_info:\n                    cpu_cmd = 'lsnodecanisterhw -delim , %s' % control_id\n                    cpu_info = self.exec_ssh_command(cpu_cmd)\n                self.handle_detail(cpu_info, cpu_map, split=',')\n                cpu_actual = cpu_map.get('cpu_actual')\n                cpu_count = cpu_map.get('cpu_count')\n                status = SSHHandler.CONTRL_STATUS_MAP.get(\n                    control_map.get('status'),\n                    constants.ControllerStatus.UNKNOWN)\n                controller_result = {\n                    'name': control_map.get('name'),\n                    'storage_id': storage_id,\n                    'native_controller_id': control_map.get('id'),\n                    'status': status,\n                    'soft_version':\n                        control_map.get('code_level', '').split(' ')[0],\n                    'location': control_map.get('name'),\n                    'cpu_info': cpu_actual,\n                    'cpu_count': int(cpu_count)\n                }\n                controller_list.append(controller_result)\n            return controller_list\n        except Exception as err:\n            err_msg = \"Failed to get controller attributes from Storwize: %s\"\\\n                      % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_disks(self, storage_id):\n        try:\n            disk_list = []\n            disk_info = self.exec_ssh_command('lsmdisk')\n            disk_res = disk_info.split('\\n')\n            for i in range(1, len(disk_res)):\n                if disk_res[i] is None or disk_res[i] == '':\n                    continue\n                control_str = ' '.join(disk_res[i].split())\n                str_info = control_str.split(' ')\n                disk_id = str_info[0]\n                detail_command = 'lsmdisk %s' % disk_id\n                deltail_info = self.exec_ssh_command(detail_command)\n                disk_map = {}\n                self.handle_detail(deltail_info, disk_map, split=' ')\n                status = SSHHandler.DISK_STATUS_MAP.get(\n                    disk_map.get('status'), constants.DiskStatus.ABNORMAL)\n                physical_type = SSHHandler.DISK_PHYSICAL_TYPE.get(\n                    disk_map.get('fabric_type'),\n                    constants.DiskPhysicalType.UNKNOWN)\n                location = '%s_%s' % (disk_map.get('controller_name'),\n                                      disk_map.get('name'))\n                disk_result = {\n                    'name': disk_map.get('name'),\n                    'storage_id': storage_id,\n                    'native_disk_id': disk_map.get('id'),\n                    'capacity': int(self.parse_string(\n                        disk_map.get('capacity'))),\n                    'status': status,\n                    'physical_type': physical_type,\n                    'native_disk_group_id': disk_map.get('mdisk_grp_name'),\n                    'location': location\n                }\n                disk_list.append(disk_result)\n            return disk_list\n        except Exception as err:\n            err_msg = \"Failed to get disk attributes from Storwize: %s\" % \\\n                      (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    def get_fc_port(self, storage_id):\n        port_list = []\n        fc_info = self.exec_ssh_command('lsportfc')\n        fc_res = fc_info.split('\\n')\n        for i in range(1, len(fc_res)):\n            if fc_res[i] is None or fc_res[i] == '':\n                continue\n            control_str = ' '.join(fc_res[i].split())\n            str_info = control_str.split(' ')\n            port_id = str_info[0]\n            detail_command = 'lsportfc %s' % port_id\n            deltail_info = self.exec_ssh_command(detail_command)\n            port_map = {}\n            self.handle_detail(deltail_info, port_map, split=' ')\n            status = constants.PortHealthStatus.NORMAL\n            conn_status = constants.PortConnectionStatus.CONNECTED\n            if port_map.get('status') != 'active':\n                status = constants.PortHealthStatus.ABNORMAL\n                conn_status = constants.PortConnectionStatus.DISCONNECTED\n            port_type = constants.PortType.FC\n            if port_map.get('type') == 'ethernet':\n                port_type = constants.PortType.ETH\n            location = '%s_%s' % (port_map.get('node_name'),\n                                  port_map.get('id'))\n            speed = None\n            if port_map.get('port_speed')[:-2].isdigit():\n                speed = int(self.handle_port_bps(\n                    port_map.get('port_speed'), 'fc'))\n            port_result = {\n                'name': location,\n                'storage_id': storage_id,\n                'native_port_id': port_map.get('id'),\n                'location': location,\n                'connection_status': conn_status,\n                'health_status': status,\n                'type': port_type,\n                'speed': speed,\n                'native_parent_id': port_map.get('node_name'),\n                'wwn': port_map.get('WWPN')\n            }\n            port_list.append(port_result)\n        return port_list\n\n    def get_iscsi_port(self, storage_id):\n        port_list = []\n        for i in range(1, 3):\n            port_array = []\n            port_command = 'lsportip %s' % i\n            port_info = self.exec_ssh_command(port_command)\n            port_arr = port_info.split('\\n')\n            port_map = {}\n            for detail in port_arr:\n                if detail is not None and detail != '':\n                    strinfo = detail.split(' ', 1)\n                    key = strinfo[0]\n                    value = ''\n                    if len(strinfo) > 1:\n                        value = strinfo[1]\n                    port_map[key] = value\n                else:\n                    if len(port_map) > 1:\n                        port_array.append(port_map)\n                        port_map = {}\n                        continue\n            for port in port_array:\n                if port.get('failover') == 'yes':\n                    continue\n                status = constants.PortHealthStatus.ABNORMAL\n                if port.get('state') == 'online':\n                    status = constants.PortHealthStatus.NORMAL\n                conn_status = constants.PortConnectionStatus.DISCONNECTED\n                if port.get('link_state') == 'active':\n                    conn_status = constants.PortConnectionStatus.CONNECTED\n                port_type = constants.PortType.ETH\n                location = '%s_%s' % (port.get('node_name'),\n                                      port.get('id'))\n                port_result = {\n                    'name': location,\n                    'storage_id': storage_id,\n                    'native_port_id': location,\n                    'location': location,\n                    'connection_status': conn_status,\n                    'health_status': status,\n                    'type': port_type,\n                    'speed': int(self.handle_port_bps(\n                        port.get('speed'), 'eth')),\n                    'native_parent_id': port.get('node_name'),\n                    'mac_address': port.get('MAC'),\n                    'ipv4': port.get('IP_address'),\n                    'ipv4_mask': port.get('mask'),\n                    'ipv6': port.get('IP_address_6')\n                }\n                port_list.append(port_result)\n        return port_list\n\n    @staticmethod\n    def change_speed_to_bytes(unit):\n        unit = unit.upper()\n        if unit == 'TB':\n            result = units.T\n        elif unit == 'GB':\n            result = units.G\n        elif unit == 'MB':\n            result = units.M\n        elif unit == 'KB':\n            result = units.k\n        else:\n            result = 1\n        return int(result)\n\n    def handle_port_bps(self, value, port_type):\n        speed = 0\n        if value:\n            if value.isdigit():\n                speed = float(value)\n            else:\n                if port_type == 'fc':\n                    unit = value[-2:]\n                    speed = float(value[:-2]) * int(\n                        self.change_speed_to_bytes(unit))\n                else:\n                    unit = value[-4:-2]\n                    speed = float(value[:-4]) * int(\n                        self.change_speed_to_bytes(unit))\n        return speed\n\n    def list_ports(self, storage_id):\n        try:\n            port_list = []\n            port_list.extend(self.get_fc_port(storage_id))\n            port_list.extend(self.get_iscsi_port(storage_id))\n            return port_list\n        except Exception as err:\n            err_msg = \"Failed to get ports attributes from Storwize: %s\" % \\\n                      (six.text_type(err))\n            raise exception.InvalidResults(err_msg)\n\n    @staticmethod\n    def handle_stats_filename(file_name, file_map):\n        name_arr = file_name.split('_')\n        file_type = '%s_%s_%s' % (name_arr[0], name_arr[1], name_arr[2])\n        file_time = '20%s%s' % (name_arr[3], name_arr[4])\n        time_pattern = '%Y%m%d%H%M%S'\n        tools = Tools()\n        occur_time = tools.time_str_to_timestamp(file_time, time_pattern)\n        if file_map.get(file_type):\n            file_map[file_type][occur_time] = file_name\n        else:\n            file_map[file_type] = {occur_time: file_name}\n\n    def get_stats_filelist(self, file_map):\n        stats_file_command = 'lsdumps -prefix /dumps/iostats'\n        file_list = self.exec_ssh_command(stats_file_command)\n        file_line = file_list.split('\\n')\n        for file in islice(file_line, 1, None):\n            if file:\n                file_arr = ' '.join(file.split()).split(' ')\n                if len(file_arr) > 1:\n                    file_name = file_arr[1]\n                    SSHHandler.handle_stats_filename(file_name, file_map)\n        for file_stats in file_map:\n            file_map[file_stats] = sorted(file_map.get(file_stats).items(),\n                                          key=lambda x: x[0], reverse=False)\n\n    def packege_data(self, storage_id, resource_type, metrics, metric_map):\n        resource_id = None\n        resource_name = None\n        unit = None\n        for resource_info in metric_map:\n            if resource_type == constants.ResourceType.PORT:\n                port_info = self.get_fc_port(storage_id)\n                if port_info:\n                    for fc_port in port_info:\n                        if resource_info.strip('0x').upper() == fc_port.get(\n                                'wwn').upper():\n                            resource_id = fc_port.get('native_port_id')\n                            resource_name = fc_port.get('name')\n                            break\n            else:\n                resource_arr = resource_info.split('_')\n                resource_id = resource_arr[0]\n                resource_name = resource_arr[1]\n            for target in metric_map.get(resource_info):\n                if resource_type == constants.ResourceType.PORT:\n                    unit = consts.PORT_CAP[target]['unit']\n                elif resource_type == constants.ResourceType.VOLUME:\n                    unit = consts.VOLUME_CAP[target]['unit']\n                elif resource_type == constants.ResourceType.DISK:\n                    unit = consts.DISK_CAP[target]['unit']\n                elif resource_type == constants.ResourceType.CONTROLLER:\n                    unit = consts.CONTROLLER_CAP[target]['unit']\n                if 'responseTime' == target:\n                    for res_time in metric_map.get(resource_info).get(target):\n                        for iops_time in metric_map.get(resource_info).get(\n                                'iops'):\n                            if res_time == iops_time:\n                                res_value = metric_map.get(resource_info).get(\n                                    target).get(res_time)\n                                iops_value = metric_map.get(\n                                    resource_info).get('iops').get(iops_time)\n                                res_value = \\\n                                    res_value / iops_value if iops_value else 0\n                                res_value = round(res_value, 3)\n                                metric_map[resource_info][target][res_time] = \\\n                                    res_value\n                                break\n                labels = {\n                    'storage_id': storage_id,\n                    'resource_type': resource_type,\n                    'resource_id': resource_id,\n                    'resource_name': resource_name,\n                    'type': 'RAW',\n                    'unit': unit\n                }\n                metric_value = constants.metric_struct(name=target,\n                                                       labels=labels,\n                                                       values=metric_map.get(\n                                                           resource_info).get(\n                                                           target))\n                metrics.append(metric_value)\n\n    @staticmethod\n    def count_metric_data(last_data, now_data, interval, target, metric_type,\n                          metric_map, res_id):\n        if not target:\n            return\n        if 'CACHEHITRATIO' not in metric_type.upper():\n            value = SSHHandler.count_difference(now_data.get(target),\n                                                last_data.get(target))\n        else:\n            value = now_data.get(\n                SSHHandler.VOLUME_PERF_METRICS.get(metric_type))\n        if 'THROUGHPUT' in metric_type.upper():\n            value = value / interval / units.Mi\n        elif 'IOSIZE' in metric_type.upper():\n            value = value / units.Ki\n        elif 'IOPS' in metric_type.upper():\n            value = int(value / interval)\n        elif 'RESPONSETIME' in metric_type.upper():\n            value = value / interval\n        value = round(value, 3)\n        if metric_map.get(res_id):\n            if metric_map.get(res_id).get(metric_type):\n                if metric_map.get(res_id).get(metric_type).get(\n                        now_data.get('time')):\n                    metric_map[res_id][metric_type][now_data.get('time')] \\\n                        += value\n                else:\n                    metric_map[res_id][metric_type][now_data.get('time')] \\\n                        = value\n            else:\n                metric_map[res_id][metric_type] = {now_data.get('time'): value}\n        else:\n            metric_map[res_id] = {metric_type: {now_data.get('time'): value}}\n\n    @staticmethod\n    def count_difference(now_value, last_value):\n        return now_value if now_value < last_value else now_value - last_value\n\n    @staticmethod\n    def handle_volume_cach_hit(now_data, last_data):\n        rh = SSHHandler.count_difference(now_data.get('rh'),\n                                         last_data.get('rh'))\n        wh = SSHHandler.count_difference(now_data.get('wh'),\n                                         last_data.get('wh'))\n        rht = SSHHandler.count_difference(now_data.get('rht'),\n                                          last_data.get('rht'))\n        wht = SSHHandler.count_difference(now_data.get('wht'),\n                                          last_data.get('wht'))\n        rhr = rh * 100 / rht if rht > 0 else 0\n        whr = wh * 100 / wht if wht > 0 else 0\n        hrt = rhr + whr\n        now_data['rhr'] = rhr\n        now_data['whr'] = whr\n        now_data['hrt'] = hrt\n\n    def get_date_from_each_file(self, file, metric_map, target_list,\n                                resource_type, last_data):\n        with self.ssh_pool.item() as ssh:\n            local_path = '%s/%s' % (\n                os.path.abspath(os.path.join(os.getcwd())),\n                consts.LOCAL_FILE_PATH)\n            file_xml = Tools.get_remote_file_to_xml(\n                ssh, file[1], local_path,\n                consts.REMOTE_FILE_PATH)\n            if not file_xml:\n                return\n            for data in file_xml:\n                if re.sub(u\"\\\\{.*?}\", \"\", data.tag) == \\\n                        SSHHandler.TARGET_RESOURCE_RELATION.get(\n                            resource_type):\n                    if resource_type == constants.ResourceType.PORT:\n                        if data.attrib.get('fc_wwpn'):\n                            resource_info = data.attrib.get('fc_wwpn')\n                        else:\n                            continue\n                    elif resource_type == constants. \\\n                            ResourceType.CONTROLLER:\n                        resource_info = '%s_%s' % (\n                            int(data.attrib.get('node_id'), 16),\n                            data.attrib.get('id'))\n                    else:\n                        resource_info = '%s_%s' % (data.attrib.get('idx'),\n                                                   data.attrib.get('id'))\n                    now_data = SSHHandler.package_xml_data(data.attrib,\n                                                           file[0],\n                                                           resource_type)\n                    if last_data.get(resource_info):\n                        interval = (int(file[0]) - last_data.get(\n                            resource_info).get('time')) / units.k\n                        if interval <= 0:\n                            break\n                        if resource_type == constants.ResourceType.VOLUME:\n                            SSHHandler.handle_volume_cach_hit(\n                                now_data, last_data.get(resource_info))\n                        for target in target_list:\n                            device_target = SSHHandler. \\\n                                RESOURCE_PERF_MAP.get(resource_type)\n                            SSHHandler.count_metric_data(\n                                last_data.get(resource_info),\n                                now_data, interval,\n                                device_target.get(target),\n                                target, metric_map, resource_info)\n                        last_data[resource_info] = now_data\n                    else:\n                        last_data[resource_info] = now_data\n\n    def get_stats_from_file(self, file_list, metric_map, target_list,\n                            resource_type, start_time, end_time):\n        if not file_list:\n            return\n        find_first_file = False\n        recent_file = None\n        last_data = {}\n        for file in file_list:\n            if file[0] >= start_time and file[0] <= end_time:\n                if find_first_file is False:\n                    if recent_file:\n                        self.get_date_from_each_file(recent_file, metric_map,\n                                                     target_list,\n                                                     resource_type,\n                                                     last_data)\n                    self.get_date_from_each_file(file, metric_map, target_list,\n                                                 resource_type, last_data)\n                    find_first_file = True\n                else:\n                    self.get_date_from_each_file(file, metric_map, target_list,\n                                                 resource_type, last_data)\n            recent_file = file\n\n    @staticmethod\n    def package_xml_data(file_data, file_time, resource_type):\n        rb = 0\n        wb = 0\n        res_time = 0\n        rh = 0\n        wh = 0\n        rht = 0\n        wht = 0\n        if resource_type == constants.ResourceType.PORT:\n            rb = (int(file_data.get('cbr')) + int(file_data.get('hbr')) + int(\n                file_data.get('lnbr')) + int(\n                file_data.get('rmbr'))) * SSHHandler.BYTES_TO_BIT\n            wb = (int(file_data.get('cbt')) + int(file_data.get('hbt')) + int(\n                file_data.get('lnbt')) + int(\n                file_data.get('rmbt'))) * SSHHandler.BYTES_TO_BIT\n            ro = int(file_data.get('cer')) + int(file_data.get('her')) + int(\n                file_data.get('lner')) + int(file_data.get('rmer'))\n            wo = int(file_data.get('cet')) + int(file_data.get('het')) + int(\n                file_data.get('lnet')) + int(file_data.get('rmet'))\n            res_time = int(file_data.get('dtdt', 0)) / units.Ki\n        else:\n            if resource_type == constants.ResourceType.VOLUME:\n                rb = int(file_data.get('rb')) * SSHHandler.BLOCK_SIZE\n                wb = int(file_data.get('wb')) * SSHHandler.BLOCK_SIZE\n                rh = int(file_data.get('ctrhs'))\n                wh = int(file_data.get('ctwhs'))\n                rht = int(file_data.get('ctrs'))\n                wht = int(file_data.get('ctws'))\n                res_time = int(file_data.get('xl'))\n            elif resource_type == constants.ResourceType.DISK:\n                rb = int(file_data.get('rb')) * SSHHandler.BLOCK_SIZE\n                wb = int(file_data.get('wb')) * SSHHandler.BLOCK_SIZE\n                res_time = int(file_data.get('rq')) + int(file_data.get('wq'))\n            elif resource_type == constants.ResourceType.CONTROLLER:\n                rb = int(file_data.get('rb')) * SSHHandler.BYTES_TO_BIT\n                wb = int(file_data.get('wb')) * SSHHandler.BYTES_TO_BIT\n                res_time = int(file_data.get('rq')) + int(file_data.get('wq'))\n            ro = int(file_data.get('ro'))\n            wo = int(file_data.get('wo'))\n        now_data = {\n            'rb': rb,\n            'wb': wb,\n            'ro': ro,\n            'wo': wo,\n            'tb': rb + wb,\n            'to': ro + wo,\n            'rh': rh,\n            'wh': wh,\n            'rht': rht,\n            'wht': wht,\n            'res_time': res_time,\n            'time': int(file_time)\n        }\n        return now_data\n\n    def get_stats_file_data(self, file_map, res_type, metrics, storage_id,\n                            target_list, start_time, end_time):\n        metric_map = {}\n        for file_tye in file_map:\n            file_list = file_map.get(file_tye)\n            if 'Nv' in file_tye and res_type == constants.ResourceType.VOLUME:\n                self.get_stats_from_file(file_list, metric_map, target_list,\n                                         constants.ResourceType.VOLUME,\n                                         start_time, end_time)\n            elif 'Nm' in file_tye and res_type == constants.ResourceType.DISK:\n                self.get_stats_from_file(file_list, metric_map, target_list,\n                                         constants.ResourceType.DISK,\n                                         start_time, end_time)\n            elif 'Nn' in file_tye and res_type == constants.ResourceType.PORT:\n                self.get_stats_from_file(file_list, metric_map, target_list,\n                                         constants.ResourceType.PORT,\n                                         start_time, end_time)\n            elif 'Nn' in file_tye and res_type == \\\n                    constants.ResourceType.CONTROLLER:\n                self.get_stats_from_file(file_list, metric_map, target_list,\n                                         constants.ResourceType.CONTROLLER,\n                                         start_time, end_time)\n        self.packege_data(storage_id, res_type, metrics, metric_map)\n\n    def collect_perf_metrics(self, storage_id, resource_metrics,\n                             start_time, end_time):\n        metrics = []\n        file_map = {}\n        try:\n            self.get_stats_filelist(file_map)\n            if resource_metrics.get(constants.ResourceType.VOLUME):\n                self.get_stats_file_data(\n                    file_map,\n                    constants.ResourceType.VOLUME,\n                    metrics,\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.VOLUME),\n                    start_time, end_time)\n            if resource_metrics.get(constants.ResourceType.DISK):\n                self.get_stats_file_data(\n                    file_map,\n                    constants.ResourceType.DISK,\n                    metrics,\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.DISK),\n                    start_time, end_time)\n            if resource_metrics.get(constants.ResourceType.PORT):\n                self.get_stats_file_data(\n                    file_map,\n                    constants.ResourceType.PORT,\n                    metrics,\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.PORT),\n                    start_time, end_time)\n            if resource_metrics.get(constants.ResourceType.CONTROLLER):\n                self.get_stats_file_data(\n                    file_map,\n                    constants.ResourceType.CONTROLLER,\n                    metrics,\n                    storage_id,\n                    resource_metrics.get(constants.ResourceType.CONTROLLER),\n                    start_time, end_time)\n        except Exception as err:\n            err_msg = \"Failed to collect metrics from svc: %s\" % \\\n                      (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n        return metrics\n\n    def get_latest_perf_timestamp(self):\n        latest_time = 0\n        stats_file_command = 'lsdumps -prefix /dumps/iostats'\n        file_list = self.exec_ssh_command(stats_file_command)\n        file_line = file_list.split('\\n')\n        for file in islice(file_line, 1, None):\n            if file:\n                file_arr = ' '.join(file.split()).split(' ')\n                if len(file_arr) > 1:\n                    file_name = file_arr[1]\n                    name_arr = file_name.split('_')\n                    file_time = '20%s%s' % (name_arr[3], name_arr[4])\n                    time_pattern = '%Y%m%d%H%M%S'\n                    tools = Tools()\n                    occur_time = tools.time_str_to_timestamp(\n                        file_time, time_pattern)\n                    if latest_time < occur_time:\n                        latest_time = occur_time\n        return latest_time\n\n    def list_storage_hosts(self, storage_id):\n        try:\n            host_list = []\n            hosts = self.exec_ssh_command('lshost')\n            host_res = hosts.split('\\n')\n            for i in range(1, len(host_res)):\n                if host_res[i] is None or host_res[i] == '':\n                    continue\n                control_str = ' '.join(host_res[i].split())\n                str_info = control_str.split(' ')\n                host_id = str_info[0]\n                detail_command = 'lshost %s' % host_id\n                deltail_info = self.exec_ssh_command(detail_command)\n                host_map = {}\n                self.handle_detail(deltail_info, host_map, split=' ')\n                status = SSHHandler.HOST_STATUS_MAP.get(host_map.get('status'))\n                host_result = {\n                    \"name\": host_map.get('name'),\n                    \"storage_id\": storage_id,\n                    \"native_storage_host_id\": host_map.get('id'),\n                    \"os_type\": SSHHandler.OS_TYPE_MAP.get(\n                        host_map.get('type', '').lower()),\n                    \"status\": status\n                }\n                host_list.append(host_result)\n            return host_list\n        except Exception as e:\n            LOG.error(\"Failed to get host metrics from svc\")\n            raise e\n\n    def list_masking_views(self, storage_id):\n        try:\n            view_list = []\n            hosts = self.exec_ssh_command('lshostvdiskmap')\n            host_res = hosts.split('\\n')\n            for i in range(1, len(host_res)):\n                if host_res[i] is None or host_res[i] == '':\n                    continue\n                control_str = ' '.join(host_res[i].split())\n                str_info = control_str.split(' ')\n                if len(str_info) > 3:\n                    host_id = str_info[0]\n                    vdisk_id = str_info[3]\n                    view_id = '%s_%s' % (str_info[0], str_info[3])\n                    view_result = {\n                        \"name\": view_id,\n                        \"native_storage_host_id\": host_id,\n                        \"storage_id\": storage_id,\n                        \"native_volume_id\": vdisk_id,\n                        \"native_masking_view_id\": view_id,\n                    }\n                    view_list.append(view_result)\n            return view_list\n        except Exception as e:\n            LOG.error(\"Failed to get view metrics from svc\")\n            raise e\n\n    def list_storage_host_initiators(self, storage_id):\n        try:\n            initiator_list = []\n            hosts = self.exec_ssh_command('lshost')\n            host_res = hosts.split('\\n')\n            for i in range(1, len(host_res)):\n                if host_res[i] is None or host_res[i] == '':\n                    continue\n                control_str = ' '.join(host_res[i].split())\n                str_info = control_str.split(' ')\n                host_id = str_info[0]\n                detail_command = 'lshost %s' % host_id\n                deltail_info = self.exec_ssh_command(detail_command)\n                init_name = None\n                type = None\n                host_id = None\n                for host in deltail_info.split('\\n'):\n                    if host:\n                        strinfo = host.split(' ', 1)\n                        key = strinfo[0]\n                        value = None\n                        if len(strinfo) > 1:\n                            value = strinfo[1]\n                        if key == 'WWPN':\n                            init_name = value\n                            type = 'fc'\n                        elif key == 'iscsi_name':\n                            init_name = value\n                            type = 'iscsi'\n                        elif key == 'id':\n                            host_id = value\n                        elif key == 'state' and init_name:\n                            status = SSHHandler.INITIATOR_STATUS_MAP.get(value)\n                            init_result = {\n                                \"name\": init_name,\n                                \"storage_id\": storage_id,\n                                \"native_storage_host_initiator_id\": init_name,\n                                \"wwn\": init_name,\n                                \"status\": status,\n                                \"type\": type,\n                                \"native_storage_host_id\": host_id\n                            }\n                            initiator_list.append(init_result)\n                            init_name = None\n                            type = None\n            return initiator_list\n        except Exception as e:\n            LOG.error(\"Failed to get initiators metrics from svc\")\n            raise e\n"
  },
  {
    "path": "delfin/drivers/ibm/storwize_svc/storwize_svc.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.ibm.storwize_svc import ssh_handler, consts\nfrom delfin.drivers.ibm.storwize_svc.ssh_handler import SSHHandler\n\n\nclass StorwizeSVCDriver(driver.StorageDriver):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.ssh_hanlder = ssh_handler.SSHHandler(**kwargs)\n        self.ssh_hanlder.login()\n\n    def reset_connection(self, context, **kwargs):\n        self.ssh_hanlder.login()\n\n    def get_storage(self, context):\n        return self.ssh_hanlder.get_storage()\n\n    def list_storage_pools(self, context):\n        return self.ssh_hanlder.list_storage_pools(self.storage_id)\n\n    def list_volumes(self, context):\n        return self.ssh_hanlder.list_volumes(self.storage_id)\n\n    def list_controllers(self, context):\n        return self.ssh_hanlder.list_controllers(self.storage_id)\n\n    def list_ports(self, context):\n        return self.ssh_hanlder.list_ports(self.storage_id)\n\n    def list_disks(self, context):\n        return self.ssh_hanlder.list_disks(self.storage_id)\n\n    def list_alerts(self, context, query_para=None):\n        return self.ssh_hanlder.list_alerts(query_para)\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return SSHHandler.parse_alert(alert)\n\n    def clear_alert(self, context, alert):\n        return self.ssh_hanlder.fix_alert(alert)\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n\n    def collect_perf_metrics(self, context, storage_id,\n                             resource_metrics, start_time,\n                             end_time):\n        return self.ssh_hanlder.collect_perf_metrics(\n            storage_id, resource_metrics, start_time, end_time)\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        \"\"\"Get capability of supported driver\"\"\"\n        return {\n            'is_historic': True,\n            'resource_metrics': {\n                constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n                constants.ResourceType.PORT: consts.PORT_CAP,\n                constants.ResourceType.DISK: consts.DISK_CAP,\n                constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP\n            }\n        }\n\n    def get_latest_perf_timestamp(self, context):\n        return self.ssh_hanlder.get_latest_perf_timestamp()\n\n    def list_storage_hosts(self, context):\n        return self.ssh_hanlder.list_storage_hosts(self.storage_id)\n\n    def list_masking_views(self, context):\n        return self.ssh_hanlder.list_masking_views(self.storage_id)\n\n    def list_storage_host_initiators(self, context):\n        return self.ssh_hanlder.list_storage_host_initiators(self.storage_id)\n"
  },
  {
    "path": "delfin/drivers/inspur/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/inspur/as5500/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/inspur/as5500/as5500.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.drivers.ibm.storwize_svc.storwize_svc import StorwizeSVCDriver\n\n\nclass As5500Driver(StorwizeSVCDriver):\n    def get_storage(self, context):\n        storage = super().get_storage(context)\n        storage['vendor'] = 'Inspur'\n        return storage\n"
  },
  {
    "path": "delfin/drivers/macro_san/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/macro_san/ms/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/macro_san/ms/consts.py",
    "content": "# Copyright 2022 The SODA Authors.\n# Copyright (c) 2022 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nfrom delfin.common import constants\n\n# Command\nODSP_SH = '/odsp/scripts/odsp_sh.sh'\nSYSTEM_QUERY = 'system mgt query'\nSYSTEM_VERSION = 'system mgt getversion'\nSYSTEM_CPU = 'system mgt getcpuinfo'\nPOOL_LIST = 'pool mgt getlist'\nRAID_LIST = 'raid mgt getlist -p {}'\nLUN_LIST = 'lun mgt getlist -p {}'\nLUN_QUERY = 'lun mgt query -n {}'\nDSU_LIST = 'dsu mgt getlist'\nDISK_LIST = 'disk mgt getlist -d {}'\nDISK_QUERY = 'disk mgt query -d {}'\nHA_STATUS = 'ha mgt getstatus'\nCLIENT_INITIATOR_GETLIST = 'client initiator getlist -t all'\nCLIENT_LIST = 'client mgt getclientlist'\nCLIENT_HOST = 'client host gethostlist'\nHOST_GROUP = 'client hostgroup gethglist'\nHOST_GROUP_N = 'client hostgroup gethostlist -n {}'\nVOLUME_GROUP = 'client lungroup getlglist'\nVOLUME_GROUP_N = 'client lungroup getlunlist -n {}'\nSHARE_LUN_LIST = 'client mgt getsharelunlist -n {}'\nMAPVIEW = 'client mapview getlist'\nTARGET_QUERY_PORT_LIST = 'client target queryportlist'\nSAS_PORT_LIST = 'system sas getportlist -c {}:{}'\n\n# character\nSUCCESSFUL_TAG = 'Command completed successfully.'\nFAILED_TAG = 'Command failed.'\nUNKNOWN_COMMAND_TAG = 'Unknown command.'\nPORT_SUCCESSFUL_TAG = 'Commandcompletedsuccessfully.'\nCOLON = ':'\nLEFT_HALF_BRACKET = '['\nAFTER_HALF_BRACKET = 'Version]'\nCPU_INFORMATION_BRACKET = 'CPU Information]'\nSP = 'SP'\nODSP_MSC_VERSION_KEY = 'ODSP_MSCVersion'\nODSP_DRIVER_VERSION_KEY = 'ODSP_DriverVersion'\nPROCESSOR_VENDOR_KEY = 'Processor0Vendor_id'\nPROCESSOR_FREQUENCY_KEY = 'Processor0CPUFrequency'\nSTORAGE_VENDOR = 'MacroSAN'\nFIELDS_NAME = 'Name:'\nFIELDS_ENABLE = 'enable'\nFIELDS_INITIATOR_ALIAS = 'InitiatorAlias:'\nFIELDS_INITIATOR_HOST = 'N/A'\nFIELDS_HOST_NAME = 'Host Name:'\nFIELDS_HOST_NAME_TWO = 'HostName:'\nFIELDS_HOST_GROUP_NAME = 'Host Group Name:'\nFIELDS_VOLUME_GROUP_NAME = 'LUN Group Name:'\nFIELDS_LUN_NAME = 'LUNName:'\nFIELDS_MAPVIEW_NAME = 'Mapview Name:'\nFIELDS_LINK_STATUS = 'Link Status'\nDSU = 'DSU-'\nDISK = 'Disk-'\nHA_RUNNING_STATUS = 'HARunningStatus'\nPORT = 'port'\nGBPS = 'Gbps'\nMBPS = 'Mbps'\nKBPS = 'KBPS'\nTIME_PATTERN = '%Y-%m-%d %H:%M:%S'\n\n# regular expression\nSYSTEM_CPU_SP_REGULAR = '^\\\\[SP\\\\d.* CPU.*]'\nSYSTEM_VERSION_SP_REGULAR = '\\\\[SP\\\\d.* Version\\\\]'\nTARGET_PORT_REGULAR = 'port\\\\-\\\\d\\\\:\\\\d\\\\:\\\\d$'\n\n# The time limit\nTIME_LIMIT = 8\n\n# model\nMODEL_PATH = '{}/delfin/drivers/macro_san/ms/file/{}{}'\nSTORAGE_INFO_REGULAR = '^storage_info.*\\\\.xls$'\nSTORAGE_INFO_MODEL_REGULAR = '^MS'\nFTP_PATH_TMP = '/tmp'\nFTP_PATH_FILE = '/tmp/{}'\n\n# alert\nMACRO_SAN_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'\nOS_PATH = '{}/delfin/drivers/macro_san/ms/file/alert{}'\nALERT_FILE_NAME = 'alarm_history_query.csv.sp'\nFTP_ALERT_PATH = '/odsp/log/remote'\nYES_FIELDS = '是'\nSEVERITY_MAP = {\n    'fatal': constants.Severity.FATAL,\n    '紧急': constants.Severity.FATAL,\n    'critical': constants.Severity.CRITICAL,\n    '重要': constants.Severity.MAJOR,\n    'major': constants.Severity.MAJOR,\n    'minor': constants.Severity.MINOR,\n    'warning': constants.Severity.WARNING,\n    '警告': constants.Severity.WARNING,\n    'informational': constants.Severity.INFORMATIONAL,\n    'NotSpecified': constants.Severity.NOT_SPECIFIED\n}\n\n\nclass digital_constant(object):\n    ZERO_INT = 0\n    ONE_INT = 1\n    MINUS_ONE_INT = -1\n    TWO_INT = 2\n    THREE_INT = 3\n    FOUR_INT = 4\n    FIVE_INT = 5\n    SIX_INT = 6\n    SEVEN_INT = 7\n    TWELVE_INT = 12\n    SIXTEEN_INT = 13\n    THIRTY_SIX = 36\n    SIXTY = 60\n\n\nSTORAGE_STATUS_MAP = {\n    'normal': constants.StorageStatus.NORMAL,\n    'offline': constants.StorageStatus.OFFLINE,\n    'abnormal': constants.StorageStatus.ABNORMAL,\n    'takeover': constants.StorageStatus.NORMAL,\n    'degraded': constants.StorageStatus.DEGRADED,\n    'unknown': constants.StorageStatus.UNKNOWN,\n}\n\nLIST_VOLUMES_STATUS_MAP = {\n    'normal': constants.StorageStatus.NORMAL,\n    'offline': constants.StorageStatus.OFFLINE,\n    'abnormal': constants.StorageStatus.ABNORMAL,\n    'error': constants.StorageStatus.ABNORMAL,\n    'fault': constants.StorageStatus.ABNORMAL,\n    'faulty': constants.StorageStatus.ABNORMAL,\n    'degraded': constants.StorageStatus.DEGRADED,\n    'unknown': constants.StorageStatus.UNKNOWN\n}\nVOLUME_TYPE_MAP = {\n    'disable': constants.VolumeType.THICK,\n    'enable': constants.VolumeType.THIN\n}\n\n\nclass POOL_STATUS_ABNORMAL(object):\n    FAULTY = 'faulty'\n    FAULT = 'fault'\n    ERROR = 'error'\n    ABNORMAL = 'abnormal'\n    ALL = (FAULTY, FAULT, ERROR, ABNORMAL)\n\n\nclass POOL_STATUS_NORMAL(object):\n    OFFLINE = 'offline'\n    NORMAL = 'normal'\n    ALL = (OFFLINE, NORMAL)\n\n\nPOOLS_STATUS_MAP = {\n    'normal': constants.StoragePoolStatus.NORMAL,\n    'offline': constants.StoragePoolStatus.OFFLINE,\n    'abnormal': constants.StoragePoolStatus.ABNORMAL,\n    'error': constants.StoragePoolStatus.ABNORMAL,\n    'fault': constants.StoragePoolStatus.ABNORMAL,\n    'faulty': constants.StoragePoolStatus.ABNORMAL,\n    'unknown': constants.StoragePoolStatus.UNKNOWN,\n    'degraded': constants.StoragePoolStatus.DEGRADED\n}\n\nDISK_PHYSICAL_TYPE_MAP = {\n    'ssd': constants.DiskPhysicalType.SSD,\n    'sata': constants.DiskPhysicalType.SATA,\n    'sas': constants.DiskPhysicalType.SAS,\n    'nl-ssd': constants.DiskPhysicalType.NL_SSD,\n    'fc': constants.DiskPhysicalType.FC,\n    'lun': constants.DiskPhysicalType.LUN,\n    'ata': constants.DiskPhysicalType.ATA,\n    'flash': constants.DiskPhysicalType.FLASH,\n    'vmdisk': constants.DiskPhysicalType.VMDISK,\n    'nl-sas': constants.DiskPhysicalType.NL_SAS,\n    'ssd-card': constants.DiskPhysicalType.SSD_CARD,\n    'sas-flash-vp': constants.DiskPhysicalType.SAS_FLASH_VP,\n    'hdd': constants.DiskPhysicalType.HDD,\n    'unknown': constants.DiskPhysicalType.UNKNOWN\n}\n\nDISK_LOGICAL_TYPE_MAP = {\n    'free': constants.DiskLogicalType.FREE,\n    'member': constants.DiskLogicalType.MEMBER,\n    'hotspare': constants.DiskLogicalType.HOTSPARE,\n    'cache': constants.DiskLogicalType.CACHE,\n    'aggregate': constants.DiskLogicalType.AGGREGATE,\n    'broken': constants.DiskLogicalType.BROKEN,\n    'foreign': constants.DiskLogicalType.FOREIGN,\n    'labelmaint': constants.DiskLogicalType.LABELMAINT,\n    'maintenance': constants.DiskLogicalType.MAINTENANCE,\n    'shared': constants.DiskLogicalType.SHARED,\n    'spare': constants.DiskLogicalType.SPARE,\n    'unassigned': constants.DiskLogicalType.UNASSIGNED,\n    'unsupported': constants.DiskLogicalType.UNSUPPORTED,\n    'remote': constants.DiskLogicalType.REMOTE,\n    'mediator': constants.DiskLogicalType.MEDIATOR,\n    'data': constants.DiskLogicalType.DATA,\n    'datadisk': constants.DiskLogicalType.DATA,\n    'unknown': constants.DiskLogicalType.UNKNOWN\n}\n\nDISK_STATUS_MAP = {\n    'normal': constants.DiskStatus.NORMAL,\n    'abnormal': constants.DiskStatus.ABNORMAL,\n    'fault': constants.DiskStatus.ABNORMAL,\n    'faulty': constants.DiskStatus.ABNORMAL,\n    'degraded': constants.DiskStatus.DEGRADED,\n    'offline': constants.DiskStatus.OFFLINE\n}\n\nCONTROLLERS_STATUS_MAP = {\n    'normal': constants.ControllerStatus.NORMAL,\n    'dual--single': constants.ControllerStatus.NORMAL,\n    'single-single': constants.ControllerStatus.NORMAL,\n    'single': constants.ControllerStatus.NORMAL,\n    'offline': constants.ControllerStatus.OFFLINE,\n    'absent--poweroff': constants.ControllerStatus.OFFLINE,\n    'poweroff': constants.ControllerStatus.OFFLINE,\n    'fault': constants.ControllerStatus.FAULT,\n    'error': constants.ControllerStatus.FAULT,\n    'abnormal': constants.ControllerStatus.FAULT,\n    'degraded': constants.ControllerStatus.DEGRADED,\n    'double-idle': constants.ControllerStatus.NORMAL,\n    'double': constants.ControllerStatus.NORMAL,\n    'triple': constants.ControllerStatus.NORMAL,\n    'quadruple': constants.ControllerStatus.NORMAL,\n    'unknown': constants.ControllerStatus.UNKNOWN\n}\n\nPORT_CONNECTION_STATUS_MAP = {\n    '1': constants.PortConnectionStatus.CONNECTED,\n    '2': constants.PortConnectionStatus.DISCONNECTED,\n    'Full-Linkup': constants.PortConnectionStatus.CONNECTED,\n    'Linkdown': constants.PortConnectionStatus.DISCONNECTED\n}\n\nINITIATOR_TYPE_MAP = {\n    'fc': constants.InitiatorType.FC,\n    'iscsi': constants.InitiatorType.ISCSI,\n    'roce': constants.InitiatorType.NVME_OVER_ROCE,\n    'sas': constants.InitiatorType.SAS,\n    'nvme-of': constants.InitiatorType.NVME_OVER_FABRIC,\n    'unknown': constants.InitiatorType.UNKNOWN\n}\n\nINITIATOR_STATUS_MAP = {\n    'offline': constants.InitiatorStatus.OFFLINE,\n    'online': constants.InitiatorStatus.ONLINE,\n    'normal': constants.InitiatorStatus.ONLINE,\n    'n/a': constants.InitiatorStatus.UNKNOWN\n}\n\nHOST_OS_TYPES_MAP = {\n    'linux': constants.HostOSTypes.LINUX,\n    'windows': constants.HostOSTypes.WINDOWS,\n    'windows2008': constants.HostOSTypes.WINDOWS,\n    'solaris': constants.HostOSTypes.SOLARIS,\n    'hp-ux': constants.HostOSTypes.HP_UX,\n    'hp_unix': constants.HostOSTypes.HP_UX,\n    'aix': constants.HostOSTypes.AIX,\n    'xenserver': constants.HostOSTypes.XEN_SERVER,\n    'vmware esx': constants.HostOSTypes.VMWARE_ESX,\n    'esxi': constants.HostOSTypes.VMWARE_ESX,\n    'linux_vis': constants.HostOSTypes.LINUX_VIS,\n    'windows server 2012': constants.HostOSTypes.WINDOWS_SERVER_2012,\n    'windows2012': constants.HostOSTypes.WINDOWS_SERVER_2012,\n    'oracle vm': constants.HostOSTypes.ORACLE_VM,\n    'open vms': constants.HostOSTypes.OPEN_VMS,\n    'mac os': constants.HostOSTypes.MAC_OS,\n    'svc': constants.HostOSTypes.UNKNOWN,\n    'other': constants.HostOSTypes.UNKNOWN,\n    'suse': constants.HostOSTypes.UNKNOWN,\n    'unknown': constants.HostOSTypes.UNKNOWN\n}\n\nPARSE_ALERT_ALERT_ID = '1.3.6.1.2.1.1.3.0'\nPARSE_ALERT_TIME = '1.3.6.1.2.1.25.1.2'\nPARSE_ALERT_STORAGE = '1.3.6.1.4.1.35904.1.2.1.1'\nPARSE_ALERT_NAME = '1.3.6.1.4.1.35904.1.2.1.4.1'\nPARSE_ALERT_LOCATION = '1.3.6.1.4.1.35904.1.2.1.4.2'\nPARSE_ALERT_DESCRIPTION = '1.3.6.1.4.1.35904.1.2.1.4.3'\nPARSE_ALERT_SEVERITY = '1.3.6.1.4.1.35904.1.2.1.4.4'\n\nALERT_NAME_CONFIG = {\n    'power_supply_failed': '设备供电异常',\n    'power_supply_failed_reissue': '设备供电异常重发',\n    'power_supply_normal': '设备供电恢复正常',\n    'power_supply_abnormal': '设备供电异常',\n    'power_supply_abnormal_reissue': '设备供电异常重发',\n    'power_supply_absent': '电源模块不在位',\n    'power_supply_absent_reissue': '电源模块不在位重发',\n    'fan_normal': '风扇模块恢复正常',\n    'fan_failed': '风扇模块故障',\n    'fan_failed_reissue': '风扇模块故障重发',\n    'fan_absent': '风扇模块不在位',\n    'fan_absent_reissue': '风扇模块不在位重发',\n    'battery_normal': '电池模块恢复正常',\n    'battery_failed': '电池模块故障',\n    'battery_failed_reissue': '电池模块故障重发',\n    'battery_absent': '电池模块不在位',\n    'battery_absent_reissue': '电池模块不在位重发',\n    'battery_charging': '电池模块正在充电',\n    'battery_will_expire': '电池模块即将超期',\n    'battery_expired': '电池模块超期',\n    'battery_expired_reissue': '电池模块超期重发',\n    'battery_model_inconsistent': '电池模块型号不一致',\n    'temperature_normal': '温度恢复正常',\n    'temperature_warning': '温度一般告警',\n    'temperature_warning_reissue': '温度一般告警重发',\n    'temperature_critical': '温度严重告警',\n    'temperature_critical_reissue': '温度严重告警重发',\n    'Voltage_normal': '电压恢复正常',\n    'Voltage_warning': '电压一般告警',\n    'Voltage_warning_reissue': '电压一般告警重发',\n    'Voltage_critical': '电压严重告警',\n    'Voltage_critical_reissue': '电压严重告警重发',\n    'sp_power_on': 'SP开机',\n    'sp_power_off': 'SP关机',\n    'sp_absent': 'SP不在位',\n    'sp_memory_shrink': 'SP内存变小',\n    'sp_reboot_for_memory_insufficient': 'SP内存不足自动重启',\n    'sp_hardware_abnormally': 'SP硬件异常',\n    'sp_boot_disk_warning': 'SP系统盘告警',\n    'ha_auto_recover_disabled': 'HA自动恢复选项被禁用',\n    'ha_heartbeat_lost': 'HA心跳丢失',\n    'ha_self_detect_failure': 'HA自检发现故障',\n    'ha_takeover': 'SP被接管',\n    'ha_takeover_abnormally': 'SP接管异常',\n    'ha_recover_successfully': 'SP恢复成功',\n    'ha_recover_abnormally': 'SP恢复异常',\n    'ha_peer_sp_abnormally': '对端SP异常',\n    'cpu_utilization_normal': 'CPU利用率恢复正常',\n    'cpu_utilization_warning': 'CPU利用率一般告警',\n    'cpu_utilization_serious': 'CPU利用率重要告警',\n    'cpu_utilization_critical': 'CPU利用率严重告警',\n    'memory_utilization_normal': '内存利用率恢复正常',\n    'memory_utilization_warning': '内存利用率告警',\n    'sp_average_responsetime_normal': 'SP平均延时恢复正常',\n    'sp_average_responsetime_warning': 'SP平均延时告警',\n    'host_average_responsetime_normal': '主机平均延时恢复正常',\n    'host_average_responsetime_warning': '主机平均延时告警',\n    'iscsi_port_average_responsetime_normal': 'iSCSI端口平均延时恢复正常',\n    'iscsi_port_average_responsetime_warning': 'iSCSI端口平均延时告警',\n    'fc_port_average_responsetime_normal': 'FC端口平均延时恢复正常',\n    'fc_port_average_responsetime_warning': 'FC端口平均延时告警',\n    'nvmf_port_average_responsetime_normal': 'NVMf端口平均延时恢复正常',\n    'nvmf_port_average_responsetime_warning': 'NVMf端口平均延时告警',\n    'lun_average_responsetime_normal': 'LUN平均延时恢复正常',\n    'lun_average_responsetime_warning': 'LUN平均延时告警',\n    'device_busy': '设备管理通道忙',\n    'sys_lun_cache_capacity_insufficient': 'SYS-LUN-Cache空间不足',\n    'sys_lun_log_capacity_insufficient': 'SYS-LUN-Log空间不足',\n    'global_write_cache_disabled_manually': '全局写缓存被手动禁用',\n    'global_write_cache_disabled_automatically': '全局写缓存被自动禁用',\n    'cache_vault_has_data': 'Cache Vault中有脏数据',\n    'software_version_inconsistent': '软件版本不一致',\n    'license_expired': 'License超期',\n    'system_failure_reboot': '系统异常重启',\n    'io_card_safe_remove': 'IO卡安全下电',\n    'io_card_pullout_forcibly': 'IO卡暴力拔出',\n    'io_card_abnormal': 'IO卡异常',\n    'port_linkup': '端口已连接',\n    'port_linkdown': '端口断开连接',\n    'port_link_recovery': '端口链路恢复',\n    'port_link_unstable': '端口链路不稳定',\n    'port_abnormal': '端口异常',\n    'port_closed': '端口被关闭',\n    'port_speed_nonoptimal': '端口非最大速率运行',\n    'port_optical_transceiver_mismatch': '端口光模块不匹配',\n    'sas_phy_disabled': 'SAS PHY被禁用',\n    'sas_phy_inconsistent': 'SAS_PHY速率不一致',\n    'sas_port_inconsistent': 'SAS端口连接状态不一致',\n    'i_t_connection_recovery': 'I_T连接恢复',\n    'i_t_connection_unstable': 'I_T连接不稳定',\n    'i_t_connected': 'I_T建立连接',\n    'i_t_unconnected': 'I_T未建立连接',\n    'i_t_l_insufficient': 'I_T_L低于预期',\n    'initiator_has_unestablished_connection': 'Initiator存在未建立的连接',\n    'nvmf_subsystem_full_connected': 'NVMf Subsystem完全连接',\n    'nvmf_subsystem_partial_connected': 'NVMf Subsystem部分连接',\n    'nvmf_subsystem_unconnected': 'NVMf Subsystem未连接',\n    'ep_online': 'EP上线',\n    'ep_offline': 'EP离线',\n    'ep_install_unproperly': 'EP未安装到位',\n    'ep_disordered_link': 'EP拓扑乱序',\n    'dsu_inconsistent_link': 'DSU拓扑不一致',\n    'disk_online': '磁盘上线',\n    'disk_offline': '磁盘异常离线',\n    'disk_safe_remove': '磁盘安全下电',\n    'disk_pullout_forcibly': '磁盘暴力拔出',\n    'disk_warning': '磁盘告警',\n    'disk_failed': '磁盘故障',\n    'disk_path_missing': '磁盘路径丢失',\n    'disk_poweron_time_warning': '磁盘上电时间告警',\n    'disk_poweron_time_warning_reissue': '磁盘上电时间告警重发',\n    'ssd_life_remaining_warning': 'SSD预计剩余寿命预警',\n    'ssd_life_remaining_critical': 'SSD预计剩余寿命严重告警',\n    'ssd_time_remaining_warning': 'SSD预计可用时间预警',\n    'ssd_time_remaining_critical': 'SSD预计可用时间严重告警',\n    'ssd_interface_unknown': 'SSD接口类型未知',\n    'raid_normal': 'RAID恢复正常',\n    'raid_degraded': 'RAID降级',\n    'raid_faulty': 'RAID错误',\n    'raid_failed': 'RAID故障',\n    'raid_rebuild_start': 'RAID开始重建',\n    'raid_rebuild_successfully': 'RAID完成重建',\n    'raid_cannot_rebuild': 'RAID重建等待热备盘',\n    'raid_rebuild_paused_abnormally': 'RAID重建失败',\n    'raid_spare_capacity_warning': 'RAID热备空间告警',\n    'raid_sync_successfully': 'RAID完成同步',\n    'raid_sync_failed': 'RAID同步失败',\n    'raid_disk_type_inconsistent': 'RAID成员磁盘类型不一致',\n    'lun_normal': 'LUN恢复正常',\n    'lun_faulty': 'LUN错误',\n    'lun_write_zero_failed': 'LUN自动置零功能失效',\n    'lun_write_cache_frozen': 'LUN写缓存被冻结',\n    'thinlun_expand_failed': 'Thin-LUN自动扩容失败',\n    'thinlun_physical_capacity_will_useup': 'Thin-LUN物理空间即将用光',\n    'thinlun_physical_capacity_has_usedup': 'Thin-LUN物理空间已经用光',\n    'thinlun_metadata_abnormal': 'Thin-LUN元数据异常',\n    'pool_capacity_normal': '存储池空间使用率恢复正常',\n    'pool_capacity_warning': '存储池空间使用率一般告警',\n    'pool_capacity_serious': '存储池空间使用率重要告警',\n    'pool_capacity_critical': '存储池空间使用率严重告警',\n    'pool_capacity_has_usedup': '存储池空间已经用光',\n    'pool_capacity_over_quota': '存储池已分配容量超出配额',\n    'pool_user_capacity_over_quota': '存储池用户容量超出配额',\n    'pool_data_protection_capacity_over_quota': '存储池数据保护容量超出配额',\n    'volume_online': '卷上线',\n    'volume_offline': '卷离线',\n    'volume_path_recovery': '卷路径恢复',\n    'volume_path_missing': '卷路径丢失',\n    'volume_attached': '卷联机',\n    'volume_detached': '卷脱机',\n    'volume_io_error': '卷IO错误',\n    'volume_average_responsetime_normal': '卷平均延时恢复正常',\n    'volume_average_responsetime_warning': '卷平均延时告警',\n    'snapshot_resource_full': '快照资源空间即将用光',\n    'snapshot_resource_invalid': '快照资源数据无效',\n    'snapshot_resource_expand_successfully': '快照资源自动扩容成功',\n    'snapshot_resource_expand_failed': '快照资源自动扩容失败',\n    'snapshot_point_delete_automatically': '自动删除快照时间点',\n    'snapshot_point_create_failed': '自动创建快照时间点失败',\n    'snapshot_rollback_successfully': '快照回滚成功',\n    'snapshot_rollback_failed': '快照回滚失败',\n    'replication_start': '开始复制',\n    'replication_successfully': '复制成功',\n    'replication_failed': '复制失败',\n    'replication_scan_failed': '扫描失败',\n    'replication_replica_faulty': '副本资源复制状态异常',\n    'xan_link_unreachable': 'XAN链路不可达',\n    'xan_link_reachable': 'XAN链路恢复',\n    'sdas_link_unreachable': 'SDAS链路不可达',\n    'sdas_link_reachable': 'SDAS链路恢复',\n    'arbiter_unreachable': '节点不能访问仲裁者',\n    'arbiter_reachable': '节点可以访问仲裁者',\n    'mirror_auto_swap_successfully': '镜像对自动反转成功',\n    'mirror_auto_swap_failed': '镜像对自动反转失败',\n    'mirror_unsynchronized': '镜像对未同步',\n    'mirror_synchronized': '镜像对恢复已同步',\n    'mirror_negotiating': '镜像对是协商状态',\n    'clone_sync_start': '开始克隆同步',\n    'clone_sync_successfully': '克隆同步成功',\n    'clone_sync_failed': '克隆同步失败',\n    'migrate_start': '开始迁移',\n    'migrate_successfully': '迁移成功',\n    'migrate_failed': '迁移失败',\n    'migrate_negotiating': '迁移对是协商状态',\n    'migrate_auto_disable_failed': '迁移自动禁用失败',\n    'migrate_itl_remaining': '迁移残留ITL',\n    'dedup_data_exceed_spec': '重删数据量超过规格',\n    'dedup_discard_some_fingerprints': '重删丢弃部分指纹',\n    'sp_temperature_normal': 'SP温度恢复正常',\n    'sp_temperature_warning': 'SP温度一般告警',\n    'sp_temperature_warning_reissue': 'SP温度一般告警重发',\n    'sp_temperature_critical': 'SP温度严重告警',\n    'sp_temperature_critical_reissue': 'SP温度严重告警重发',\n    'sp_voltage_normal': 'SP电压恢复正常',\n    'sp_voltage_warning': 'SP电压一般告警',\n    'sp_voltage_warning_reissue': 'SP电压一般告警重发',\n    'sp_voltage_critical': 'SP电压严重告警',\n    'sp_voltage_critical_reissue': 'SP电压严重告警重发',\n    'ep_temperature_normal': 'EP温度恢复正常',\n    'ep_temperature_warning': 'EP温度一般告警',\n    'ep_temperature_warning_reissue': 'EP温度一般告警重发',\n    'ep_temperature_critical': 'EP温度严重告警',\n    'ep_temperature_critical_reissue': 'EP温度严重告警重发',\n    'spu_bat_normal': 'SPU电池模块恢复正常',\n    'spu_bat_failed': 'SPU电池模块变为故障',\n    'spu_bat_failed_reissue': 'SPU电池模块故障重发',\n    'spu_bat_absent': 'SPU电池模块不在位',\n    'spu_bat_absent_reissue': 'SPU电池模块不在位重发',\n    'spu_bat_will_expire': 'SPU电池模块即将超期',\n    'spu_bat_expired': 'SPU电池模块超期',\n    'spu_bat_expired_reissue': 'SPU电池模块超期重发',\n    'cmos_bat_normal': 'CMOS电池恢复正常',\n    'cmos_bat_failed': 'CMOS电池电力不足',\n    'cmos_bat_failed_reissue': 'CMOS电池电力不足重发',\n    'fc_link_error': 'FC链路错误',\n    'sp_unexpected_power_down': 'SP异常掉电',\n    'ha_takeover_successfully': 'HA接管成功',\n    'ha_takeover_failed': 'HA接管失败',\n    'write_cache_frozen': '写缓存被冻结',\n    'write_cache_disabled': '写缓存被自动禁用',\n    'sas_phy_speed_warning': 'SAS_PHY速率告警',\n    'disk_pullout_electrified': '磁盘带电拔出',\n    'sys_raid_warning': 'SYS_RAID告警',\n    'thinlun_physical_capacity_usedup': 'Thin-LUN物理空间已经用光',\n    'pool_capacity_will_useup': '存储池空间即将用光',\n    'sdas_link_recovery': 'SDAS链路恢复',\n    'sdas_auto_swap_successfully': 'SDAS自动反转成功',\n    'sdas_auto_swap_failed': 'SDAS自动反转失败',\n}\n\nPARSE_ALERT_SEVERITY_MAP = {\n    '0': constants.Severity.NOT_SPECIFIED,\n    '1': constants.Severity.FATAL,\n    '2': constants.Severity.MAJOR,\n    '3': constants.Severity.WARNING,\n    '4': constants.Severity.INFORMATIONAL,\n}\n\n\nSTORAGE_CAP = {\n    constants.StorageMetric.IOPS.name: {\n        \"unit\": constants.StorageMetric.IOPS.unit,\n        \"description\": constants.StorageMetric.IOPS.description\n    },\n    constants.StorageMetric.READ_IOPS.name: {\n        \"unit\": constants.StorageMetric.READ_IOPS.unit,\n        \"description\": constants.StorageMetric.READ_IOPS.description\n    },\n    constants.StorageMetric.WRITE_IOPS.name: {\n        \"unit\": constants.StorageMetric.WRITE_IOPS.unit,\n        \"description\": constants.StorageMetric.WRITE_IOPS.description\n    },\n    constants.StorageMetric.THROUGHPUT.name: {\n        \"unit\": constants.StorageMetric.THROUGHPUT.unit,\n        \"description\": constants.StorageMetric.THROUGHPUT.description\n    },\n    constants.StorageMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.StorageMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.StorageMetric.READ_THROUGHPUT.description\n    },\n    constants.StorageMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.StorageMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.StorageMetric.WRITE_THROUGHPUT.description\n    },\n    constants.StorageMetric.RESPONSE_TIME.name: {\n        \"unit\": constants.StorageMetric.RESPONSE_TIME.unit,\n        \"description\": constants.StorageMetric.RESPONSE_TIME.description\n    },\n    constants.StorageMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.StorageMetric.READ_RESPONSE_TIME.unit,\n        \"description\": constants.StorageMetric.READ_RESPONSE_TIME.description\n    },\n    constants.StorageMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.StorageMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\": constants.StorageMetric.WRITE_RESPONSE_TIME.description\n    },\n    constants.StorageMetric.CACHE_HIT_RATIO.name: {\n        \"unit\": constants.StorageMetric.CACHE_HIT_RATIO.unit,\n        \"description\": constants.StorageMetric.CACHE_HIT_RATIO.description\n    },\n    constants.StorageMetric.READ_CACHE_HIT_RATIO.name: {\n        \"unit\": constants.StorageMetric.READ_CACHE_HIT_RATIO.unit,\n        \"description\": constants.StorageMetric.READ_CACHE_HIT_RATIO.description\n    },\n    constants.StorageMetric.WRITE_CACHE_HIT_RATIO.name: {\n        \"unit\": constants.StorageMetric.WRITE_CACHE_HIT_RATIO.unit,\n        \"description\":\n            constants.StorageMetric.WRITE_CACHE_HIT_RATIO.description\n    }\n}\n\nVOLUME_CAP = {\n    constants.VolumeMetric.IOPS.name: {\n        \"unit\": constants.VolumeMetric.IOPS.unit,\n        \"description\": constants.VolumeMetric.IOPS.description\n    },\n    constants.VolumeMetric.READ_IOPS.name: {\n        \"unit\": constants.VolumeMetric.READ_IOPS.unit,\n        \"description\": constants.VolumeMetric.READ_IOPS.description\n    },\n    constants.VolumeMetric.WRITE_IOPS.name: {\n        \"unit\": constants.VolumeMetric.WRITE_IOPS.unit,\n        \"description\": constants.VolumeMetric.WRITE_IOPS.description\n    },\n    constants.VolumeMetric.THROUGHPUT.name: {\n        \"unit\": constants.VolumeMetric.THROUGHPUT.unit,\n        \"description\": constants.VolumeMetric.THROUGHPUT.description\n    },\n    constants.VolumeMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.VolumeMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.VolumeMetric.READ_THROUGHPUT.description\n    },\n    constants.VolumeMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.VolumeMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.VolumeMetric.WRITE_THROUGHPUT.description\n    },\n    constants.VolumeMetric.RESPONSE_TIME.name: {\n        \"unit\": constants.VolumeMetric.RESPONSE_TIME.unit,\n        \"description\": constants.VolumeMetric.RESPONSE_TIME.description\n    },\n    constants.VolumeMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.VolumeMetric.READ_RESPONSE_TIME.unit,\n        \"description\": constants.VolumeMetric.READ_RESPONSE_TIME.description\n    },\n    constants.VolumeMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.VolumeMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\": constants.VolumeMetric.WRITE_RESPONSE_TIME.description\n    },\n    constants.VolumeMetric.CACHE_HIT_RATIO.name: {\n        \"unit\": constants.VolumeMetric.CACHE_HIT_RATIO.unit,\n        \"description\": constants.VolumeMetric.CACHE_HIT_RATIO.description\n    },\n    constants.VolumeMetric.READ_CACHE_HIT_RATIO.name: {\n        \"unit\": constants.VolumeMetric.READ_CACHE_HIT_RATIO.unit,\n        \"description\": constants.VolumeMetric.READ_CACHE_HIT_RATIO.description\n    },\n    constants.VolumeMetric.WRITE_CACHE_HIT_RATIO.name: {\n        \"unit\": constants.VolumeMetric.WRITE_CACHE_HIT_RATIO.unit,\n        \"description\": constants.VolumeMetric.WRITE_CACHE_HIT_RATIO.description\n    }\n}\n\nDISK_CAP = {\n    constants.DiskMetric.IOPS.name: {\n        \"unit\": constants.DiskMetric.IOPS.unit,\n        \"description\": constants.DiskMetric.IOPS.description\n    },\n    constants.DiskMetric.READ_IOPS.name: {\n        \"unit\": constants.DiskMetric.READ_IOPS.unit,\n        \"description\": constants.DiskMetric.READ_IOPS.description\n    },\n    constants.DiskMetric.WRITE_IOPS.name: {\n        \"unit\": constants.DiskMetric.WRITE_IOPS.unit,\n        \"description\": constants.DiskMetric.WRITE_IOPS.description\n    },\n    constants.DiskMetric.THROUGHPUT.name: {\n        \"unit\": constants.DiskMetric.THROUGHPUT.unit,\n        \"description\": constants.DiskMetric.THROUGHPUT.description\n    },\n    constants.DiskMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.DiskMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.DiskMetric.READ_THROUGHPUT.description\n    },\n    constants.DiskMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.DiskMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.DiskMetric.WRITE_THROUGHPUT.description\n    },\n    constants.DiskMetric.RESPONSE_TIME.name: {\n        \"unit\": constants.DiskMetric.RESPONSE_TIME.unit,\n        \"description\": constants.DiskMetric.RESPONSE_TIME.description\n    },\n    constants.DiskMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.DiskMetric.READ_RESPONSE_TIME.unit,\n        \"description\": constants.DiskMetric.READ_RESPONSE_TIME.description\n    },\n    constants.DiskMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.DiskMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\": constants.DiskMetric.WRITE_RESPONSE_TIME.description\n    },\n    constants.DiskMetric.CACHE_HIT_RATIO.name: {\n        \"unit\": constants.DiskMetric.CACHE_HIT_RATIO.unit,\n        \"description\": constants.DiskMetric.CACHE_HIT_RATIO.description\n    },\n    constants.DiskMetric.READ_CACHE_HIT_RATIO.name: {\n        \"unit\": constants.DiskMetric.READ_CACHE_HIT_RATIO.unit,\n        \"description\": constants.DiskMetric.READ_CACHE_HIT_RATIO.description\n    },\n    constants.DiskMetric.WRITE_CACHE_HIT_RATIO.name: {\n        \"unit\": constants.DiskMetric.WRITE_CACHE_HIT_RATIO.unit,\n        \"description\": constants.DiskMetric.WRITE_CACHE_HIT_RATIO.description\n    }\n}\n\nPORT_CAP = {\n    constants.PortMetric.IOPS.name: {\n        \"unit\": constants.PortMetric.IOPS.unit,\n        \"description\": constants.PortMetric.IOPS.description\n    },\n    constants.PortMetric.READ_IOPS.name: {\n        \"unit\": constants.PortMetric.READ_IOPS.unit,\n        \"description\": constants.PortMetric.READ_IOPS.description\n    },\n    constants.PortMetric.WRITE_IOPS.name: {\n        \"unit\": constants.PortMetric.WRITE_IOPS.unit,\n        \"description\": constants.PortMetric.WRITE_IOPS.description\n    },\n    constants.PortMetric.THROUGHPUT.name: {\n        \"unit\": constants.PortMetric.THROUGHPUT.unit,\n        \"description\": constants.PortMetric.THROUGHPUT.description\n    },\n    constants.PortMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.PortMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.PortMetric.READ_THROUGHPUT.description\n    },\n    constants.PortMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.PortMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.PortMetric.WRITE_THROUGHPUT.description\n    },\n    constants.PortMetric.RESPONSE_TIME.name: {\n        \"unit\": constants.PortMetric.RESPONSE_TIME.unit,\n        \"description\": constants.PortMetric.RESPONSE_TIME.description\n    },\n    constants.PortMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.PortMetric.READ_RESPONSE_TIME.unit,\n        \"description\": constants.PortMetric.READ_RESPONSE_TIME.description\n    },\n    constants.PortMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.PortMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\": constants.PortMetric.WRITE_RESPONSE_TIME.description\n    },\n    constants.PortMetric.CACHE_HIT_RATIO.name: {\n        \"unit\": constants.PortMetric.CACHE_HIT_RATIO.unit,\n        \"description\": constants.PortMetric.CACHE_HIT_RATIO.description\n    },\n    constants.PortMetric.READ_CACHE_HIT_RATIO.name: {\n        \"unit\": constants.PortMetric.READ_CACHE_HIT_RATIO.unit,\n        \"description\": constants.PortMetric.READ_CACHE_HIT_RATIO.description\n    },\n    constants.PortMetric.WRITE_CACHE_HIT_RATIO.name: {\n        \"unit\": constants.PortMetric.WRITE_CACHE_HIT_RATIO.unit,\n        \"description\": constants.PortMetric.WRITE_CACHE_HIT_RATIO.description\n    }\n}\nFTP_PERF_PATH = '/odsp/log/local/perf'\nSTRAGE_REGULAR = '^perf_device'\nLUN_REGULAR = '^perf_lun'\nSASPORT_REGULAR = '^perf_sasport'\nISCSIPORT_REGULAR = '^perf_iscsiport'\nFCPORT_REGULAR = '^perf_fciport'\nDISK_REGULAR = '^perf_disk'\nSYSTEM_PERFORMANCE_FILE = 'system performance getfilelist'\nVERSION_SHOW = 'versionshow'\nCSV = '.csv'\nSIXTY = 60\nADD_FOLDER = '{}/delfin/drivers/utils/performance_file/macro_san/{}{}{}'\nPERF_LUN = 'perf_lun_'\nPERF_SP = '_SP'\nPERF_SAS_PORT = 'perf_sasport_'\nPERF_ISCSI_PORT = 'perf_iscsiport_'\nGET_DATE = 'date +%s'\nSPECIAL_VERSION = 'Version:'\nSAS_PORT = 'sasport'\nISCSI_PORT = 'iscsiport'\nFC_PORT = 'fcport'\n"
  },
  {
    "path": "delfin/drivers/macro_san/ms/file/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/macro_san/ms/macro_ssh_client.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2011 OpenStack LLC\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport time\n\nimport paramiko\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import cryptor\nfrom delfin import exception, utils\nfrom delfin.drivers.utils.ssh_client import SSHPool\n\nLOG = logging.getLogger(__name__)\n\n\nclass MacroSanSSHPool(SSHPool):\n    def create(self):\n        ssh = paramiko.SSHClient()\n        try:\n            if self.ssh_pub_key is None:\n                ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n            else:\n                host_key = '%s %s %s' % \\\n                           (self.ssh_host, self.ssh_pub_key_type,\n                            self.ssh_pub_key)\n                self.set_host_key(host_key, ssh)\n\n            ssh.connect(hostname=self.ssh_host, port=self.ssh_port,\n                        username=self.ssh_username,\n                        password=cryptor.decode(self.ssh_password),\n                        timeout=self.ssh_conn_timeout,\n                        banner_timeout=self.ssh_conn_timeout)\n            transport = ssh.get_transport()\n            transport.set_keepalive(self.ssh_conn_timeout)\n            return ssh\n        except Exception as e:\n            err = six.text_type(e)\n            LOG.error(err)\n            if 'timed out' in err:\n                raise exception.InvalidIpOrPort()\n            elif 'No authentication methods available' in err \\\n                    or 'Authentication failed' in err:\n                raise exception.InvalidUsernameOrPassword()\n            elif 'not a valid RSA private key file' in err:\n                raise exception.InvalidPrivateKey()\n            elif 'not found in known_hosts' in err:\n                raise exception.SSHNotFoundKnownHosts(self.ssh_host)\n            else:\n                raise exception.SSHException(err)\n\n    def do_exec_shell(self, command_list, sleep_time=0.5):\n        result = ''\n        try:\n            with self.item() as ssh:\n                if command_list and ssh:\n                    channel = ssh.invoke_shell()\n                    for command in command_list:\n                        utils.check_ssh_injection(command)\n                        channel.send(command + '\\n')\n                        time.sleep(sleep_time)\n                    channel.send(\"exit\" + \"\\n\")\n                    channel.close()\n                    while True:\n                        resp = channel.recv(9999).decode('utf8')\n                        if not resp:\n                            break\n                        result += resp\n            if 'is not a recognized command' in result:\n                raise exception.InvalidIpOrPort()\n        except paramiko.AuthenticationException as ae:\n            LOG.error('doexec Authentication error:{}'.format(ae))\n            raise exception.InvalidUsernameOrPassword()\n        except Exception as e:\n            err = six.text_type(e)\n            LOG.error(err)\n            if 'timed out' in err \\\n                    or 'SSH connect timeout' in err:\n                raise exception.SSHConnectTimeout()\n            elif 'No authentication methods available' in err \\\n                    or 'Authentication failed' in err \\\n                    or 'Invalid username or password' in err:\n                raise exception.InvalidUsernameOrPassword()\n            elif 'not a valid RSA private key file' in err \\\n                    or 'not a valid RSA private key' in err:\n                raise exception.InvalidPrivateKey()\n            elif 'Unable to connect to port' in err \\\n                    or 'Invalid ip or port' in err:\n                raise exception.InvalidIpOrPort()\n            else:\n                raise exception.SSHException(err)\n        return result\n"
  },
  {
    "path": "delfin/drivers/macro_san/ms/ms_handler.py",
    "content": "# Copyright 2022 The SODA Authors.\n# Copyright (c) 2022 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport codecs\nimport csv\nimport datetime\nimport hashlib\nimport os\nimport re\nimport shutil\nimport tarfile\nimport time\n\nimport six\nimport xlrd\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers.macro_san.ms import consts\nfrom delfin.drivers.macro_san.ms.consts import digital_constant\nfrom delfin.drivers.macro_san.ms.macro_ssh_client import MacroSanSSHPool\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = log.getLogger(__name__)\n\n\nclass MsHandler(object):\n\n    def __init__(self, **kwargs):\n        self.ssh_pool = MacroSanSSHPool(**kwargs)\n        ssh_access = kwargs.get('ssh')\n        self.ssh_host = ssh_access.get('host')\n        self.down_lock = True\n\n    def login(self):\n        res = ''\n        try:\n            res = self.ssh_pool.do_exec_shell([consts.ODSP_SH])\n        except Exception as e:\n            LOG.error('Failed to ssh login macro_san %s' % (\n                six.text_type(e)))\n        if consts.UNKNOWN_COMMAND_TAG in res:\n            try:\n                self.ssh_pool.do_exec_shell([consts.SYSTEM_QUERY])\n                self.down_lock = False\n            except Exception as e:\n                LOG.error('Failed to cli login macro_san %s' % (\n                    six.text_type(e)))\n                raise e\n\n    def get_storage(self, storage_id):\n        storage_data_map = self.get_data_query(consts.SYSTEM_QUERY)\n        if not storage_data_map:\n            raise exception.SSHException('The command returns empty data')\n        device_uuid = storage_data_map.get('DeviceUUID')\n        serial_number = '{}:{}'.format(self.ssh_host, device_uuid)\n        storage_name = storage_data_map.get('DeviceName')\n        firmware_version = self.get_firmware_version()\n        pools = self.list_storage_pools(storage_id)\n        total_capacity = digital_constant.ZERO_INT\n        used_capacity = digital_constant.ZERO_INT\n        for pool in pools:\n            total_capacity += pool.get('total_capacity')\n            used_capacity += pool.get('used_capacity')\n        disks = self.list_disks(storage_id)\n        raw_capacity = digital_constant.ZERO_INT\n        for disk in disks:\n            raw_capacity += disk.get('capacity')\n        storage_status = self.get_storage_status(storage_id)\n        model = self.get_storage_model(storage_id)\n        storage = {\n            'name': storage_name if storage_name else device_uuid,\n            'vendor': consts.STORAGE_VENDOR,\n            'status': storage_status,\n            'model': model,\n            'serial_number': serial_number,\n            'firmware_version': firmware_version,\n            'raw_capacity': raw_capacity,\n            'total_capacity': total_capacity,\n            'used_capacity': used_capacity,\n            'free_capacity': total_capacity - used_capacity\n        }\n        return storage\n\n    def get_storage_model(self, storage_id):\n        storage_model = ''\n        if not self.down_lock:\n            return storage_model\n        local_path = self.download_model_file(storage_id)\n        if local_path:\n            try:\n                storage_model = self.analysis_model_file(local_path,\n                                                         storage_model)\n            finally:\n                shutil.rmtree(local_path)\n        return storage_model\n\n    @staticmethod\n    def analysis_model_file(local_path, storage_model):\n        list_dir = os.listdir(local_path)\n        for dir_name in list_dir:\n            excel = xlrd.open_workbook('{}/{}'.format(local_path, dir_name))\n            sheet = excel[consts.digital_constant.ZERO_INT]\n            rows_data_list = sheet.row_values(consts.digital_constant.ONE_INT)\n            for rows_data in rows_data_list:\n                title_pattern = re.compile(consts.STORAGE_INFO_MODEL_REGULAR)\n                title_search_obj = title_pattern.search(rows_data)\n                if title_search_obj:\n                    storage_model = rows_data\n                    break\n        return storage_model\n\n    def download_model_file(self, storage_id):\n        sftp = None\n        local_path = ''\n        try:\n            ssh = self.ssh_pool.create()\n            sftp = ssh.open_sftp()\n            file_name_list = sftp.listdir(consts.FTP_PATH_TMP)\n            for file_name in file_name_list:\n                title_pattern = re.compile(consts.STORAGE_INFO_REGULAR)\n                title_search_obj = title_pattern.search(file_name)\n                if title_search_obj:\n                    os_path = os.getcwd()\n                    localtime = int(time.mktime(time.localtime())) * units.k\n                    local_path = consts.MODEL_PATH.format(\n                        os_path, storage_id, localtime)\n                    os.mkdir(local_path)\n                    local_path_file = '{}/{}'.format(local_path, file_name)\n                    sftp.get(consts.FTP_PATH_FILE.format(file_name),\n                             local_path_file)\n                    break\n        except Exception as e:\n            LOG.error('Failed to down storage model file macro_san %s' %\n                      (six.text_type(e)))\n        if sftp:\n            sftp.close()\n        return local_path\n\n    def get_firmware_version(self):\n        firmware_version = None\n        version_map = self.get_storage_version()\n        for sp_num in range(\n                consts.digital_constant.ONE_INT,\n                len(version_map) + consts.digital_constant.ONE_INT):\n            sp_key = '{}{}'.format(consts.SP, sp_num)\n            firmware_version = \\\n                version_map.get(sp_key, {}).get('{}{}'.format(\n                    sp_key, consts.ODSP_MSC_VERSION_KEY))\n            if consts.FIELDS_INITIATOR_HOST != firmware_version:\n                break\n        return firmware_version\n\n    def get_storage_status(self, storage_id):\n        storage_status = constants.StorageStatus.NORMAL\n        ha_status_map = self.get_data_query(consts.HA_STATUS)\n        ha_status = ha_status_map.get('SystemHAStatus')\n        if ha_status:\n            storage_status = consts.STORAGE_STATUS_MAP.get(\n                ha_status.lower(), constants.StorageStatus.UNKNOWN)\n        else:\n            controllers_list = self.list_controllers(storage_id)\n            for controllers in controllers_list:\n                controllers_status = controllers.get('status')\n                if controllers_status in constants.ControllerStatus.FAULT:\n                    storage_status = constants.StorageStatus.ABNORMAL\n        return storage_status\n\n    def list_storage_pools(self, storage_id):\n        pool_list = []\n        pools = self.get_data_list(consts.POOL_LIST, consts.FIELDS_NAME)\n        for pool in pools:\n            pool_name = pool.get('Name')\n            health_status = self.get_pool_status(pool_name)\n            total_capacity = Tools.get_capacity_size(pool.get('AllCapacity'))\n            used_capacity = Tools.get_capacity_size(pool.get('UsedCapacity'))\n            pool_model = {\n                'name': pool_name,\n                'storage_id': storage_id,\n                'native_storage_pool_id': pool_name,\n                'status': health_status,\n                'storage_type': constants.StorageType.BLOCK,\n                'total_capacity': total_capacity,\n                'used_capacity': used_capacity,\n                'free_capacity': total_capacity - used_capacity\n            }\n            pool_list.append(pool_model)\n        return pool_list\n\n    def get_pool_status(self, pool_name):\n        raids = self.get_data_list(consts.RAID_LIST.format(pool_name),\n                                   consts.FIELDS_NAME)\n        pool_status = constants.StoragePoolStatus.UNKNOWN\n        if raids:\n            pool_status = constants.StoragePoolStatus.NORMAL\n        for raid in raids:\n            health_status = raid.get('HealthStatus').lower() \\\n                if raid.get('HealthStatus') else None\n            if health_status in consts.POOL_STATUS_ABNORMAL.ALL:\n                pool_status = constants.StoragePoolStatus.ABNORMAL\n                break\n            if health_status == constants.StoragePoolStatus.DEGRADED:\n                pool_status = constants.StoragePoolStatus.DEGRADED\n                break\n            if health_status not in consts.POOL_STATUS_NORMAL.ALL:\n                pool_status = constants.StoragePoolStatus.UNKNOWN\n        return pool_status\n\n    def list_volumes(self, storage_id):\n        volume_list = []\n        pool_volumes = self.get_volumes(storage_id)\n        for volume in pool_volumes:\n            status = volume.get('HealthStatus').lower() \\\n                if volume.get('HealthStatus') else None\n            total_capacity = self.get_total_capacity(volume)\n            thin_provisioning = volume.get('Thin-Provisioning').lower() \\\n                if volume.get('Thin-Provisioning') else None\n            used_capacity = self.get_used_capacity(thin_provisioning,\n                                                   total_capacity, volume)\n            volume_model = {\n                'name': volume.get('Name'),\n                'storage_id': storage_id,\n                'status': consts.LIST_VOLUMES_STATUS_MAP.get(\n                    status, constants.StorageStatus.UNKNOWN),\n                'native_volume_id': volume.get('Name'),\n                'native_storage_pool_id': volume.get('Owner(Pool)'),\n                'type': consts.VOLUME_TYPE_MAP.get(\n                    thin_provisioning, constants.VolumeType.THICK),\n                'wwn': volume.get('DeviceID') if\n                volume.get('DeviceID') else volume.get('WWN'),\n                'total_capacity': total_capacity,\n                'used_capacity': used_capacity,\n                'free_capacity': total_capacity - used_capacity\n            }\n            volume_list.append(volume_model)\n        return volume_list\n\n    @staticmethod\n    def get_used_capacity(thin_provisioning, total_capacity, volume):\n        if consts.FIELDS_ENABLE == thin_provisioning:\n            used_capacity_str = volume.get('Thin-LUNUsedCapacity')\n            number_b = used_capacity_str.index('B')\n            used_capacity = \\\n                used_capacity_str[:number_b + consts.digital_constant.ONE_INT]\n            used_capacity = Tools.get_capacity_size(used_capacity)\n        else:\n            used_capacity = total_capacity\n        return used_capacity\n\n    @staticmethod\n    def get_total_capacity(volume):\n        total_size = volume.get('TotalSize')\n        if not total_size:\n            physical_size = volume.get('TotalPhysicalSize')\n            number_b = physical_size.index('B')\n            total_size = \\\n                physical_size[:number_b + consts.digital_constant.ONE_INT]\n        total_capacity = Tools.get_capacity_size(total_size)\n        return total_capacity\n\n    def list_controllers(self, storage_id):\n        controllers_list = []\n        sp_map = self.get_storage_version()\n        cpu_map = self.get_cup_information()\n        ha_status_map = self.get_data_query(consts.HA_STATUS)\n        for sp_name in sp_map.keys():\n            status_key = '{}{}'.format(sp_name, consts.HA_RUNNING_STATUS)\n            status = ha_status_map.get(status_key).lower() \\\n                if ha_status_map.get(status_key) else None\n            soft_version = sp_map.get(sp_name, {}).get(\n                '{}{}'.format(sp_name, consts.ODSP_MSC_VERSION_KEY))\n            cpu_vendor_id = cpu_map.get(sp_name, {}).get(\n                '{}{}'.format(sp_name, consts.PROCESSOR_VENDOR_KEY))\n            cpu_frequency = cpu_map.get(sp_name, {}).get(\n                '{}{}'.format(sp_name, consts.PROCESSOR_FREQUENCY_KEY))\n            cpu_info = ''\n            if cpu_vendor_id and cpu_frequency:\n                cpu_info = '{}@{}'.format(cpu_vendor_id, cpu_frequency)\n            controller_model = {\n                'name': sp_name,\n                'storage_id': storage_id,\n                'native_controller_id': sp_name,\n                'status': consts.CONTROLLERS_STATUS_MAP.get(\n                    status, constants.ControllerStatus.UNKNOWN),\n                'location': sp_name,\n                'soft_version': soft_version,\n                'cpu_info': cpu_info\n            }\n            if cpu_info:\n                controller_model['cpu_count'] = consts.digital_constant.ONE_INT\n            controllers_list.append(controller_model)\n        return controllers_list\n\n    def get_cup_information(self):\n        cpu_res = self.do_exec(consts.SYSTEM_CPU)\n        sp_map = {}\n        if cpu_res:\n            cpu_res_list = cpu_res.strip(). \\\n                replace('\\r', '').split('\\n')\n            sp_cpu_map = {}\n            sp = None\n            bag = True\n            for row_cpu in (cpu_res_list or []):\n                row_pattern = re.compile(consts.SYSTEM_CPU_SP_REGULAR)\n                row_search = row_pattern.search(row_cpu)\n                if row_search:\n                    bag = False\n                    sp = row_cpu.replace(\n                        consts.LEFT_HALF_BRACKET, '').replace(\n                        consts.CPU_INFORMATION_BRACKET, '').replace(' ', '')\n                if bag:\n                    continue\n                if consts.COLON in row_cpu:\n                    row_version_list = row_cpu.replace(' ', '').split(\n                        consts.COLON, digital_constant.ONE_INT)\n                    key = row_version_list[digital_constant.ZERO_INT]\n                    sp_cpu_map[key] = row_version_list[\n                        digital_constant.ONE_INT]\n                if not row_cpu:\n                    sp_map[sp] = sp_cpu_map\n                    sp_cpu_map = {}\n                    sp = None\n        return sp_map\n\n    def list_disks(self, storage_id):\n        disk_list = []\n        disks = self.get_disks()\n        for disk in disks:\n            disk_name = disk.get('Name')\n            physical = disk.get('Type').lower() if disk.get('Type') else None\n            logical = disk.get('Role').lower() if disk.get('Role') else None\n            status = disk.get('HealthStatus').lower() if \\\n                disk.get('HealthStatus') else None\n            disk_model = {\n                'name': disk_name,\n                'storage_id': storage_id,\n                'native_disk_id': disk_name,\n                'serial_number': disk.get('SerialNumber'),\n                'manufacturer': disk.get('Vendor'),\n                'model': disk.get('Model'),\n                'firmware': disk.get('FWVersion'),\n                'location': disk_name,\n                'speed': int(disk.get('RPMs')) if disk.get('RPMs') else '',\n                'capacity': Tools.get_capacity_size(disk.get('Capacity')),\n                'status': consts.DISK_STATUS_MAP.get(\n                    status, constants.DiskStatus.NORMAL),\n                'physical_type': consts.DISK_PHYSICAL_TYPE_MAP.get(\n                    physical, constants.DiskPhysicalType.UNKNOWN),\n                'logical_type': consts.DISK_LOGICAL_TYPE_MAP.get(\n                    logical, constants.DiskLogicalType.UNKNOWN)\n            }\n            disk_list.append(disk_model)\n        return disk_list\n\n    def list_ports(self, storage_id):\n        ports = self.get_fc_port_encapsulation(storage_id)\n        ports.extend(self.get_sas_port_data(storage_id))\n        return ports\n\n    def get_fc_port_encapsulation(self, storage_id):\n        ports = []\n        fc_port_map = self.get_fc_port()\n        for fc_port_id in fc_port_map.keys():\n            fc_port_id_upper = fc_port_id.upper()\n            port_type = self.get_port_type(fc_port_id.lower())\n            fc_ports = fc_port_map.get(fc_port_id)\n            status_int = fc_ports.get('onlinestate')\n            native_parent_id = '{}{}'.format(\n                consts.SP, self.numbers_character(fc_port_id))\n            fc_port_m = {\n                'native_port_id': fc_port_id_upper,\n                'name': fc_port_id_upper,\n                'type': port_type,\n                'logical_type': constants.PortLogicalType.PHYSICAL,\n                'connection_status': consts.PORT_CONNECTION_STATUS_MAP.get(\n                    status_int, constants.PortConnectionStatus.UNKNOWN),\n                'health_status': constants.PortHealthStatus.UNKNOWN,\n                'location': fc_port_id_upper,\n                'storage_id': storage_id,\n                'native_parent_id': native_parent_id,\n                'speed': Tools.get_capacity_size(fc_ports.get('actualspeed')),\n                'wwn': fc_ports.get('wwn')\n            }\n            ports.append(fc_port_m)\n        return ports\n\n    @staticmethod\n    def parse_alert(alert):\n        try:\n            if consts.PARSE_ALERT_DESCRIPTION in alert.keys():\n                alert_name = alert.get(consts.PARSE_ALERT_NAME)\n                alert_name_e = alert_name.lower()\n                alert_name_c = consts.ALERT_NAME_CONFIG.get(\n                    alert_name_e, alert_name)\n                alert_model = dict()\n                description = alert.get(consts.PARSE_ALERT_DESCRIPTION)\\\n                    .encode('iso-8859-1').decode('gbk')\n                alert_model['alert_id'] = alert.get(\n                    consts.PARSE_ALERT_ALERT_ID)\n                alert_model['severity'] = consts.PARSE_ALERT_SEVERITY_MAP.get(\n                    alert.get(consts.PARSE_ALERT_SEVERITY),\n                    constants.Severity.NOT_SPECIFIED)\n                alert_model['category'] = constants.Category.FAULT\n                alert_model['occur_time'] = Tools().time_str_to_timestamp(\n                    alert.get(consts.PARSE_ALERT_TIME), consts.TIME_PATTERN)\n                alert_model['description'] = description\n                alert_model['location'] = '{}:{}'.format(alert.get(\n                    consts.PARSE_ALERT_STORAGE),\n                    alert.get(consts.PARSE_ALERT_LOCATION))\n                alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n                alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n                alert_model['alert_name'] = alert_name_c\n                match_key = '{}{}'.format(alert_name_c, description)\n                alert_model['match_key'] = hashlib.md5(\n                    match_key.encode()).hexdigest()\n                return alert_model\n        except Exception as e:\n            err_msg = \"Failed to parse alert from \" \\\n                      \"macro_san ms: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_storage_host_initiators(self, storage_id):\n        initiators_list = []\n        initiators = self.get_data_list(\n            consts.CLIENT_INITIATOR_GETLIST, consts.FIELDS_INITIATOR_ALIAS)\n        for initiator in initiators:\n            host_name = initiator.get('MappedClient') \\\n                if initiator.get('MappedClient') else initiator.get(\n                'MappedHost')\n            wwn = initiator.get('InitiatorWWN')\n            online_status = initiator.get('OnlineStatus').lower() \\\n                if initiator.get('OnlineStatus') else None\n            initiator_type = initiator.get('Type').lower() \\\n                if initiator.get('Type') else None\n            initiator_d = {\n                'native_storage_host_initiator_id': wwn,\n                'name': wwn,\n                'alias': initiator.get('InitiatorAlias'),\n                'type': consts.INITIATOR_TYPE_MAP.get(\n                    initiator_type, constants.InitiatorType.UNKNOWN),\n                'status': consts.INITIATOR_STATUS_MAP.get(\n                    online_status, constants.InitiatorStatus.UNKNOWN),\n                'wwn': wwn,\n                'storage_id': storage_id\n            }\n            if consts.FIELDS_INITIATOR_HOST != host_name:\n                initiator_d['native_storage_host_id'] = host_name\n            initiators_list.append(initiator_d)\n        return initiators_list\n\n    def list_storage_hosts_old(self, storage_id):\n        host_list = []\n        initiators_host_relation = self.get_initiators_host_relation()\n        hosts = self.get_data_list(consts.CLIENT_LIST, consts.FIELDS_NAME, '')\n        for host in hosts:\n            host_name = host.get('Name')\n            initiators = initiators_host_relation.get(host_name)\n            os_type = constants.HostOSTypes.UNKNOWN\n            if initiators:\n                os_str = initiators.get('OS').lower() \\\n                    if initiators.get('OS') else None\n                os_type = consts.HOST_OS_TYPES_MAP.get(\n                    os_str, constants.HostOSTypes.UNKNOWN)\n            host_d = {\n                'name': host_name,\n                'storage_id': storage_id,\n                'native_storage_host_id': host_name,\n                'os_type': os_type,\n                'status': constants.HostStatus.NORMAL,\n                'description': host.get('Description')\n            }\n            host_list.append(host_d)\n        return host_list\n\n    def list_storage_hosts_new(self, storage_id):\n        hosts_new = self.get_data_list(consts.CLIENT_HOST,\n                                       consts.FIELDS_HOST_NAME, '')\n        host_list = []\n        for host in hosts_new:\n            host_name = host.get('Host Name')\n            os = host.get('OS').lower() if host.get('OS') else None\n            host_d = {\n                'name': host_name,\n                'storage_id': storage_id,\n                'native_storage_host_id': host_name,\n                'os_type': consts.HOST_OS_TYPES_MAP.get(\n                    os, constants.HostOSTypes.UNKNOWN),\n                'status': constants.HostStatus.NORMAL,\n                'description': host.get('Description')\n            }\n            if consts.FIELDS_INITIATOR_HOST != host.get('IP Address'):\n                host_d['ip_address'] = host.get('IP Address')\n            host_list.append(host_d)\n        return host_list\n\n    def list_storage_host_groups(self, storage_id):\n        host_groups = self.get_data_list(consts.HOST_GROUP,\n                                         consts.FIELDS_HOST_GROUP_NAME, '')\n        storage_host_groups = []\n        host_grp_relation_list = []\n        for host_group in host_groups:\n            host_group_name = host_group.get('Host Group Name')\n            host_g = {\n                'name': host_group_name,\n                'storage_id': storage_id,\n                'native_storage_host_group_id': host_group_name,\n                'description': host_group.get('Description')\n            }\n            storage_host_groups.append(host_g)\n            hosts = self.get_data_list(\n                consts.HOST_GROUP_N.format(host_group_name),\n                consts.FIELDS_HOST_NAME_TWO)\n            for host in hosts:\n                host_name = host.get('HostName')\n                host_group_relation = {\n                    'storage_id': storage_id,\n                    'native_storage_host_group_id': host_group_name,\n                    'native_storage_host_id': host_name\n                }\n                host_grp_relation_list.append(host_group_relation)\n        result = {\n            'storage_host_groups': storage_host_groups,\n            'storage_host_grp_host_rels': host_grp_relation_list\n        }\n        return result\n\n    def list_volume_groups(self, storage_id):\n        volume_groups = self.get_data_list(consts.VOLUME_GROUP,\n                                           consts.FIELDS_VOLUME_GROUP_NAME, '')\n        volume_group_list = []\n        volume_grp_relation_list = []\n        for volume_group in volume_groups:\n            volume_group_name = volume_group.get('LUN Group Name')\n            volume_g = {\n                'name': volume_group_name,\n                'storage_id': storage_id,\n                'native_volume_group_id': volume_group_name,\n                'description': volume_group.get('Description')\n            }\n            volume_group_list.append(volume_g)\n            volumes = self.get_data_list(\n                consts.VOLUME_GROUP_N.format(volume_group_name),\n                consts.FIELDS_LUN_NAME)\n            for volume in volumes:\n                volume_name = volume.get('LUNName')\n                volume_group_relation = {\n                    'storage_id': storage_id,\n                    'native_volume_group_id': volume_group_name,\n                    'native_volume_id': volume_name\n                }\n                volume_grp_relation_list.append(volume_group_relation)\n        result = {\n            'volume_groups': volume_group_list,\n            'vol_grp_vol_rels': volume_grp_relation_list\n        }\n        return result\n\n    def list_masking_views_old(self, storage_id):\n        views = []\n        hosts = self.get_data_list(consts.CLIENT_LIST, consts.FIELDS_NAME)\n        for host in hosts:\n            host_name = host.get('Name')\n            masking_list = self.get_data_list(\n                consts.SHARE_LUN_LIST.format(host_name),\n                consts.FIELDS_LUN_NAME)\n            for masking_object in masking_list:\n                volume_id = masking_object.get('LUNID')\n                native_masking_view_id = '{}{}'.format(host_name, volume_id)\n                view = {\n                    'native_masking_view_id': native_masking_view_id,\n                    'name': native_masking_view_id,\n                    'native_storage_host_id': host_name,\n                    'native_volume_id': volume_id,\n                    'storage_id': storage_id\n                }\n                views.append(view)\n        return views\n\n    def list_masking_views_new(self, storage_id):\n        views = self.get_data_list(consts.MAPVIEW, consts.FIELDS_MAPVIEW_NAME,\n                                   '')\n        views_list = []\n        for view in views:\n            mapview_name = view.get('Mapview Name')\n            view_d = {\n                'native_masking_view_id': mapview_name,\n                'name': mapview_name,\n                'native_storage_host_group_id': view.get('Host Group Name'),\n                'native_volume_group_id': view.get('LUN Group Name'),\n                'description': view.get('Description'),\n                'storage_id': storage_id\n            }\n            views_list.append(view_d)\n        return views_list\n\n    def do_exec(self, command_str, sleep_time=0.5, mix_time=consts.TIME_LIMIT):\n        if self.down_lock:\n            try:\n                res = self.ssh_pool.do_exec_shell(\n                    [consts.ODSP_SH, command_str], sleep_time)\n            except Exception as e:\n                LOG.error('ssh Command(%s) execution info: %s' % (\n                    command_str, six.text_type(e)))\n                raise e\n        else:\n            try:\n                res = self.ssh_pool.do_exec_shell([command_str], sleep_time)\n            except Exception as e:\n                LOG.error('cli Command(%s) execution info: %s' % (\n                    command_str, six.text_type(e)))\n                raise e\n        if consts.FAILED_TAG in res or consts.UNKNOWN_COMMAND_TAG in res:\n            return None\n        if consts.SUCCESSFUL_TAG not in res:\n            LOG.info('Command(%s) sleep(%s) return info: %s' %\n                     (command_str, sleep_time, res))\n            if sleep_time > mix_time:\n                return None\n            res = self.do_exec(command_str, sleep_time + 2, mix_time)\n        return res\n\n    def get_data_query(self, command):\n        data_map = {}\n        res = self.do_exec(command)\n        if res is not None:\n            row_res_list = res.strip().replace('\\r', '').split('\\n')\n            for row_res in (row_res_list or []):\n                if consts.COLON not in row_res:\n                    continue\n                row_data_list = row_res.replace(' ', '').split(\n                    consts.COLON, digital_constant.ONE_INT)\n                key = row_data_list[digital_constant.ZERO_INT]\n                data_map[key] = row_data_list[digital_constant.ONE_INT]\n        return data_map\n\n    def get_storage_version(self):\n        version_res = self.do_exec(consts.SYSTEM_VERSION)\n        sp_map = {}\n        if version_res:\n            version_res_list = version_res.strip(). \\\n                replace('\\r', '').split('\\n')\n            sp_version_map = {}\n            sp = None\n            bag = True\n            for row_version in (version_res_list or []):\n                row_pattern = re.compile(consts.SYSTEM_VERSION_SP_REGULAR)\n                row_search = row_pattern.search(row_version)\n                if row_search:\n                    bag = False\n                    sp = row_version.replace(\n                        consts.LEFT_HALF_BRACKET, '').replace(\n                        consts.AFTER_HALF_BRACKET, '').replace(' ', '')\n                if bag:\n                    continue\n                if consts.COLON in row_version:\n                    row_version_list = row_version.replace(' ', '').split(\n                        consts.COLON, digital_constant.ONE_INT)\n                    key = row_version_list[digital_constant.ZERO_INT]\n                    sp_version_map[key] = row_version_list[\n                        digital_constant.ONE_INT]\n                    if consts.ODSP_DRIVER_VERSION_KEY in key:\n                        sp_map[sp] = sp_version_map\n                        sp_version_map = {}\n        return sp_map\n\n    def get_data_list(self, command, contains_fields, space=' ',\n                      sleep_time=0.5, mix_time=consts.TIME_LIMIT):\n        data_list = []\n        res = self.do_exec(command, sleep_time, mix_time)\n        if res:\n            res_list = res.strip().replace('\\r', '').split('\\n\\n')\n            for object_str in (res_list or []):\n                object_str = object_str.replace(space, '')\n                if contains_fields not in object_str:\n                    continue\n                object_list = object_str.split('\\n')\n                data_map = {}\n                for row_str in (object_list or []):\n                    if consts.COLON not in row_str:\n                        continue\n                    row_list = row_str.split(\n                        consts.COLON, digital_constant.ONE_INT)\n                    key = row_list[digital_constant.ZERO_INT].strip()\n                    data_map[key] = row_list[digital_constant.ONE_INT].strip()\n                data_list.append(data_map)\n        return data_list\n\n    def get_volumes(self, storage_id):\n        pools = self.list_storage_pools(storage_id)\n        volumes = []\n        for pool in pools:\n            pool_name = pool.get('name')\n            lun_list = self.get_data_list(\n                consts.LUN_LIST.format(pool_name), consts.FIELDS_NAME)\n            for lun in lun_list:\n                lun_name = lun.get('Name')\n                lun_query = self.get_data_query(\n                    consts.LUN_QUERY.format(lun_name))\n                if lun_query:\n                    volumes.append(lun_query)\n        return volumes\n\n    def get_disks(self):\n        disk_list = []\n        dsu_list = self.get_data_list(consts.DSU_LIST, consts.FIELDS_NAME)\n        for dsu in dsu_list:\n            dsu_name = dsu.get('Name')\n            if not dsu_name:\n                continue\n            dsu_id = dsu_name.replace(consts.DSU, '')\n            disks = self.get_data_list(\n                consts.DISK_LIST.format(dsu_id), consts.FIELDS_NAME)\n            for disk in disks:\n                disk_name = disk.get('Name')\n                if not disk_name:\n                    continue\n                disk_id = disk_name.replace(consts.DISK, '')\n                disk_map = self.get_data_query(\n                    consts.DISK_QUERY.format(disk_id))\n                if disk_map:\n                    disk_list.append(disk_map)\n        return disk_list\n\n    def get_fc_port(self):\n        target_port_res = self.do_exec(consts.TARGET_QUERY_PORT_LIST)\n        fc_port = {}\n        if target_port_res:\n            bag = True\n            port_id = None\n            port_map = {}\n            target_port_list = target_port_res.replace('\\r', '').split('\\n')\n            for port_row_str in target_port_list:\n                port_row_str = port_row_str.replace(' ', '')\n                row_pattern = re.compile(consts.TARGET_PORT_REGULAR)\n                row_search = row_pattern.search(port_row_str)\n                if row_search:\n                    if port_map:\n                        fc_port[port_id] = port_map\n                        port_map = {}\n                    port_id = port_row_str.replace(consts.PORT, '')\n                    bag = False\n                    continue\n                if bag:\n                    continue\n                if consts.COLON in port_row_str:\n                    port_row_list = port_row_str.split(\n                        consts.COLON, digital_constant.ONE_INT)\n                    port_key = port_row_list[digital_constant.ZERO_INT]\n                    port_map[port_key] = port_row_list[\n                        digital_constant.ONE_INT]\n                if consts.PORT_SUCCESSFUL_TAG in port_row_str:\n                    fc_port[port_id] = port_map\n        return fc_port\n\n    def get_sas_port_data(self, storage_id):\n        sas_list = []\n        try:\n            ha_status_map = self.get_data_query(consts.HA_STATUS)\n            for ha_status_key in ha_status_map.keys():\n                if consts.SP not in ha_status_key:\n                    continue\n                sp_num = ha_status_key.replace(\n                    consts.HA_RUNNING_STATUS, '').replace(consts.SP, '')\n                dsu_list = self.get_data_list(consts.DSU_LIST,\n                                              consts.FIELDS_NAME)\n                for dsu in dsu_list:\n                    dsu_num = self.numbers_character(dsu.get('Name'))\n                    sas_data_map = self.get_sas_data_list(\n                        consts.SAS_PORT_LIST.format(sp_num, dsu_num),\n                        consts.FIELDS_LINK_STATUS)\n                    self.get_sas_encapsulation_data(sas_data_map, sas_list,\n                                                    storage_id)\n        finally:\n            return sas_list\n\n    def get_sas_encapsulation_data(self, sas_data_map, sas_list, storage_id):\n        for sas_port_id in sas_data_map.keys():\n            sas_object_map = sas_data_map.get(sas_port_id)\n            status = sas_object_map.get(\n                '{} Link Status'.format(sas_port_id))\n            max_speed = sas_object_map.get(\n                '{} PHY Max Speed'.format(sas_port_id))\n            speed = sas_object_map.get(\n                '{} PHY1 Speed'.format(sas_port_id))\n            native_parent_id = '{}{}'.format(\n                consts.SP, self.numbers_character(sas_port_id))\n            sas_port_m = {\n                'native_port_id': sas_port_id,\n                'name': sas_port_id,\n                'type': constants.PortType.SAS,\n                'logical_type': constants.PortLogicalType.PHYSICAL,\n                'connection_status': consts.PORT_CONNECTION_STATUS_MAP.get(\n                    status, constants.PortConnectionStatus.UNKNOWN),\n                'health_status': constants.PortHealthStatus.UNKNOWN,\n                'location': sas_port_id,\n                'storage_id': storage_id,\n                'native_parent_id': native_parent_id,\n                'max_speed': self.capacity_conversion(max_speed),\n                'speed': self.capacity_conversion(speed)\n            }\n            sas_list.append(sas_port_m)\n\n    @staticmethod\n    def capacity_conversion(capacity_str):\n        capacity_int = consts.digital_constant.ZERO_INT\n        if consts.GBPS in capacity_str:\n            capacity_int = int(capacity_str.replace(consts.GBPS, '')) * units.G\n        elif consts.MBPS in capacity_str:\n            capacity_int = int(capacity_str.replace(consts.GBPS, '')) * units.M\n        elif consts.KBPS in capacity_str:\n            capacity_int = int(capacity_str.replace(consts.GBPS, '')) * units.k\n        return capacity_int\n\n    def get_sas_data_list(self, command, contains_fields):\n        sas_data = {}\n        res = self.do_exec(command)\n        if res:\n            res_list = res.strip().replace('\\r', '').split('\\n\\n')\n            for object_str in (res_list or []):\n                if contains_fields not in object_str:\n                    continue\n                object_list = object_str.split('\\n')\n                sas_object = {}\n                sas_data_key = None\n                for row_str in (object_list or []):\n                    if consts.COLON not in row_str:\n                        continue\n                    object_num = row_str.rindex(consts.COLON)\n                    object_key = row_str[:object_num].strip()\n                    object_num_one = object_num + consts.digital_constant. \\\n                        ONE_INT\n                    sas_object[object_key] = row_str[object_num_one:].strip()\n                    if consts.FIELDS_LINK_STATUS in row_str:\n                        sas_data_num = row_str.index(' ')\n                        sas_data_key = row_str[:sas_data_num]\n                sas_data[sas_data_key] = sas_object\n        return sas_data\n\n    @staticmethod\n    def get_port_type(fc_port_id_lower):\n        if constants.PortType.FC in fc_port_id_lower:\n            port_type = constants.PortType.FC\n        elif constants.PortType.ISCSI in fc_port_id_lower:\n            port_type = constants.PortType.ISCSI\n        elif constants.PortType.SAS in fc_port_id_lower:\n            port_type = constants.PortType.SAS\n        elif constants.PortType.ETH in fc_port_id_lower:\n            port_type = constants.PortType.ETH\n        else:\n            port_type = constants.PortType.OTHER\n        return port_type\n\n    @staticmethod\n    def numbers_character(character_string):\n        for character in list(character_string):\n            if character.isdigit():\n                return character\n\n    def get_initiators_host_relation(self):\n        initiators_host = {}\n        initiators = self.get_data_list(\n            consts.CLIENT_INITIATOR_GETLIST, consts.FIELDS_INITIATOR_ALIAS)\n        for initiator in initiators:\n            host_id = initiator.get('MappedClient')\n            initiators_host[host_id] = initiator\n        return initiators_host\n\n    def collect_perf_metrics(self, storage_id, resource_metrics, start_time,\n                             end_time):\n        metrics = []\n        if not self.down_lock:\n            return metrics\n        LOG.info('The system(storage_id: %s) starts to collect macro_san'\n                 ' performance, start_time: %s, end_time: %s',\n                 storage_id, start_time, end_time)\n        resource_storage = resource_metrics.get(constants.ResourceType.STORAGE)\n        if resource_storage:\n            storage_metrics = self.get_storage_metrics(\n                end_time, resource_storage, start_time, storage_id)\n            metrics.extend(storage_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect storage'\n                     ' performance, The length is: %s',\n                     storage_id, len(storage_metrics))\n        resource_volume = resource_metrics.get(constants.ResourceType.VOLUME)\n        if resource_volume:\n            volume_metrics = self.get_volume_metrics(\n                end_time, resource_volume, start_time, storage_id)\n            metrics.extend(volume_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect volume'\n                     ' performance, The length is: %s',\n                     storage_id, len(volume_metrics))\n        file_name_map = self.get_identification()\n        resource_port = resource_metrics.get(constants.ResourceType.PORT)\n        if resource_port:\n            sas_port_metrics = self.get_port_metrics(\n                end_time, resource_port, start_time, storage_id,\n                consts.SAS_PORT, consts.SASPORT_REGULAR)\n            metrics.extend(sas_port_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect sas port'\n                     ' performance, The length is: %s',\n                     storage_id, len(sas_port_metrics))\n            if file_name_map:\n                fc_port_metrics = self.get_fc_port_metrics(\n                    end_time, resource_port, start_time, storage_id,\n                    file_name_map)\n                metrics.extend(fc_port_metrics)\n                LOG.info('The system(storage_id: %s) stop to collect fc port'\n                         ' performance, The length is: %s', storage_id,\n                         len(fc_port_metrics))\n        resource_disk = resource_metrics.get(constants.ResourceType.DISK)\n        if resource_disk and file_name_map:\n            disk_metrics = self.get_disk_metrics(\n                end_time, resource_disk, start_time, storage_id, file_name_map)\n            metrics.extend(disk_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect disk'\n                     ' performance, The length is: %s',\n                     storage_id, len(disk_metrics))\n        return metrics\n\n    def get_fc_port_metrics(self, end_time, resource_disk, start_time,\n                            storage_id, file_name_map):\n        local_path = self.down_perf_file(consts.FC_PORT, storage_id,\n                                         consts.FCPORT_REGULAR)\n        disk_metrics = []\n        if local_path:\n            metrics_data = None\n            try:\n                metrics_data = self.analysis_per_file(\n                    local_path, start_time, end_time,\n                    consts.FC_PORT, file_name_map)\n            except Exception as e:\n                LOG.error('Failed to fc port analysis per file %s' % (\n                    six.text_type(e)))\n            finally:\n                shutil.rmtree(local_path)\n            if metrics_data:\n                disk_metrics = self.packaging_metrics(\n                    storage_id, metrics_data, resource_disk,\n                    constants.ResourceType.PORT)\n        return disk_metrics\n\n    def get_disk_metrics(self, end_time, resource_disk, start_time,\n                         storage_id, file_name_map):\n        local_path = self.down_perf_file(\n            constants.ResourceType.DISK, storage_id,\n            consts.DISK_REGULAR)\n        disk_metrics = []\n        if local_path:\n            metrics_data = None\n            try:\n                metrics_data = self.analysis_per_file(\n                    local_path, start_time, end_time,\n                    constants.ResourceType.DISK, file_name_map)\n            except Exception as e:\n                LOG.error('Failed to disk analysis per file %s' % (\n                    six.text_type(e)))\n            finally:\n                shutil.rmtree(local_path)\n            if metrics_data:\n                disk_metrics = self.packaging_metrics(\n                    storage_id, metrics_data, resource_disk,\n                    constants.ResourceType.DISK)\n        return disk_metrics\n\n    def get_port_metrics(self, end_time, resource_port, start_time,\n                         storage_id, folder, pattern):\n        local_path = self.down_perf_file(folder, storage_id, pattern)\n        sas_port_metrics = []\n        if local_path:\n            metrics_data = None\n            try:\n                metrics_data = self.analysis_per_file(\n                    local_path, start_time, end_time, folder)\n            except Exception as e:\n                LOG.error('Failed to sas port analysis per file %s' % (\n                    six.text_type(e)))\n            finally:\n                shutil.rmtree(local_path)\n            if metrics_data:\n                sas_port_metrics = self.packaging_metrics(\n                    storage_id, metrics_data, resource_port,\n                    constants.ResourceType.PORT)\n        return sas_port_metrics\n\n    def get_volume_metrics(self, end_time, resource_volume, start_time,\n                           storage_id):\n        local_path = self.down_perf_file(\n            constants.ResourceType.VOLUME, storage_id, consts.LUN_REGULAR)\n        volume_metrics = []\n        if local_path:\n            metrics_data = None\n            try:\n                uuid_map = self.get_volume_uuid()\n                metrics_data = self.analysis_per_file(\n                    local_path, start_time, end_time,\n                    constants.ResourceType.VOLUME, uuid_map)\n            except Exception as e:\n                LOG.error('Failed to volume analysis per file %s' % (\n                    six.text_type(e)))\n            finally:\n                shutil.rmtree(local_path)\n            if metrics_data:\n                volume_metrics = self.packaging_metrics(\n                    storage_id, metrics_data, resource_volume,\n                    constants.ResourceType.VOLUME)\n        return volume_metrics\n\n    def get_storage_metrics(self, end_time, resource_storage, start_time,\n                            storage_id):\n        local_path = self.down_perf_file(constants.ResourceType.STORAGE,\n                                         storage_id, consts.STRAGE_REGULAR)\n        storage_metrics = []\n        if local_path:\n            metrics_data = None\n            try:\n                metrics_data = self.analysis_per_file(\n                    local_path, start_time, end_time,\n                    constants.ResourceType.STORAGE)\n            except Exception as e:\n                LOG.error('Failed to storage analysis per file %s' % (\n                    six.text_type(e)))\n            finally:\n                shutil.rmtree(local_path)\n            if metrics_data:\n                resource_id, resource_name = self.get_storages()\n                storage_metrics = self.storage_packaging_data(\n                    storage_id, metrics_data, resource_storage,\n                    resource_id, resource_name)\n        return storage_metrics\n\n    def get_storages(self):\n        storage_data_map = self.get_data_query(consts.SYSTEM_QUERY)\n        device_uuid = storage_data_map.get('DeviceUUID')\n        storage_name = storage_data_map.get('DeviceName')\n        resource_name = storage_name if storage_name else device_uuid\n        resource_id = '{}:{}'.format(self.ssh_host, device_uuid)\n        return resource_id, resource_name\n\n    def down_perf_file(self, folder, storage_id, pattern):\n        sftp = None\n        tar = None\n        local_path = ''\n        try:\n            ssh = self.ssh_pool.create()\n            sftp = ssh.open_sftp()\n            file_name_list = sftp.listdir(consts.FTP_PERF_PATH)\n            ms_path = os.getcwd()\n            localtime = int(round(time.time() * 1000))\n            local_path = consts.ADD_FOLDER.format(\n                ms_path, folder, storage_id, localtime)\n            os.mkdir(local_path)\n            for file_name in file_name_list:\n                title_pattern = re.compile(pattern)\n                title_search_obj = title_pattern.search(file_name)\n                if title_search_obj:\n                    local_path_file = '{}/{}'.format(local_path, file_name)\n                    ftp_path = '{}/{}'.format(consts.FTP_PERF_PATH, file_name)\n                    sftp.get(ftp_path, local_path_file)\n                    if consts.CSV in file_name:\n                        continue\n                    tar = tarfile.open(local_path_file)\n                    tar.extractall(local_path)\n        except Exception as e:\n            LOG.error('Failed to down perf file %s macro_san %s' %\n                      (folder, six.text_type(e)))\n        if sftp:\n            sftp.close()\n        if tar:\n            tar.close()\n        return local_path\n\n    def get_identification(self):\n        identification = {}\n        controller = self.get_controller()\n        if not controller:\n            return identification\n        files = self.get_data_list(\n            consts.SYSTEM_PERFORMANCE_FILE, consts.FIELDS_NAME,\n            sleep_time=consts.digital_constant.TWELVE_INT,\n            mix_time=consts.digital_constant.SIXTY)\n        for file in files:\n            sp = file.get('SPName')\n            file_name = file.get('FileName')\n            if controller != sp or not file_name:\n                continue\n            identification[file_name] = file.get('ObjectName')\n        return identification\n\n    def get_controller(self):\n        res = self.ssh_pool.do_exec_shell([consts.VERSION_SHOW],\n                                          consts.digital_constant.ONE_INT)\n        if res:\n            res_list = res.strip().replace('\\r', '').split('\\n')\n            for res in res_list:\n                if consts.SPECIAL_VERSION in res:\n                    controller = res.replace(' ', '').replace(\n                        consts.SPECIAL_VERSION, '')\n                    return controller\n\n    def get_volume_uuid(self):\n        uuid_map = {}\n        pools = self.get_data_list(consts.POOL_LIST, consts.FIELDS_NAME)\n        for pool in pools:\n            pool_name = pool.get('Name')\n            lun_list = self.get_data_list(\n                consts.LUN_LIST.format(pool_name), consts.FIELDS_NAME)\n            for lun in lun_list:\n                lun_name = lun.get('Name')\n                lun_query = self.get_data_query(\n                    consts.LUN_QUERY.format(lun_name))\n                uuid = lun_query.get('LUNUUID')\n                uuid_map[uuid] = lun_name\n        return uuid_map\n\n    def analysis_per_file(self, local_path, start_time, end_time,\n                          resource_type, uuid_map=None):\n        resource_key_data = {}\n        resource_key = None\n        if constants.ResourceType.STORAGE == resource_type:\n            resource_key = resource_type\n        list_dir = os.listdir(local_path)\n        data = {}\n        for dir_name in list_dir:\n            dir_name = dir_name.replace(' ', '')\n            if consts.CSV not in dir_name:\n                continue\n            resource_key = self.get_resource_key(dir_name, resource_key,\n                                                 resource_type, uuid_map)\n            resource_data = resource_key_data.get(resource_key)\n            if resource_data:\n                data = resource_data\n            with codecs.open('{}/{}'.format(local_path, dir_name),\n                             encoding='utf-8-sig') as f:\n                for row in csv.DictReader(\n                        line.replace('\\0', '') for line in f):\n                    time_str = row.get('')\n                    timestamp_s = self.get_timestamp_s(time_str)\n                    timestamp_ms = timestamp_s * units.k\n                    if timestamp_ms < start_time or timestamp_ms >= end_time:\n                        continue\n                    row_data, timestamp = self.get_perf_data(row, timestamp_s)\n                    data[timestamp] = row_data\n            resource_key_data[resource_key] = data\n        return resource_key_data\n\n    @staticmethod\n    def get_resource_key(dir_name, resource_key, resource_type, uuid_map):\n        if constants.ResourceType.VOLUME == resource_type:\n            uuid_list = dir_name.replace(consts.PERF_LUN, '').split(\n                consts.PERF_SP)\n            uuid = uuid_list[consts.digital_constant.ZERO_INT]\n            resource_key = uuid_map.get(uuid)\n        if consts.SAS_PORT == resource_type:\n            uuid_list = dir_name.replace(consts.PERF_SAS_PORT, '').split(\n                consts.PERF_SP)\n            resource_key = uuid_list[consts.digital_constant.ZERO_INT] \\\n                .replace('_', ':')\n        if constants.ResourceType.DISK == resource_type or \\\n                consts.FC_PORT == resource_type:\n            resource_key = uuid_map.get(dir_name) if \\\n                uuid_map.get(dir_name) else \\\n                uuid_map.get(dir_name.replace('.csv', '.tgz'))\n        return resource_key\n\n    @staticmethod\n    def get_perf_data(row, timestamp_s):\n        timestamp = int(timestamp_s / consts.SIXTY) * consts.SIXTY * units.k\n        throughput = round(\n            (int(row.get('r&w/throughput(B)')) / units.Mi), 3)\n        r_throughput = round(\n            (int(row.get('r/throughput(B)')) / units.Mi), 3)\n        w_throughput = round(\n            (int(row.get('w/throughput(B)')) / units.Mi), 3)\n        response = round(\n            int(row.get('r&w/avg_rsp_time(us)')) / units.k, 3)\n        r_response = round(\n            int(row.get('r/avg_rsp_time(us)')) / units.k, 3)\n        w_response = round(\n            int(row.get('w/avg_rsp_time(us)')) / units.k, 3)\n        cache_hit_ratio = round(\n            int(row.get('r&w/cacherate(%*100)')), 3)\n        r_cache_hit_ratio = round(\n            int(row.get('r/cacherate(%*100)')), 3)\n        w_cache_hit_ratio = round(\n            int(row.get('w/cacherate(%*100)')), 3)\n        row_data = {\n            constants.StorageMetric.IOPS.name: round(\n                int(row.get('r&w/iops')), 3),\n            constants.StorageMetric.READ_IOPS.name: round(\n                int(row.get('r/iops')), 3),\n            constants.StorageMetric.WRITE_IOPS.name: round(\n                int(row.get('w/iops')), 3),\n            constants.StorageMetric.THROUGHPUT.name: throughput,\n            constants.StorageMetric.READ_THROUGHPUT.name: r_throughput,\n            constants.StorageMetric.WRITE_THROUGHPUT.name: w_throughput,\n            constants.StorageMetric.RESPONSE_TIME.name: response,\n            constants.StorageMetric.READ_RESPONSE_TIME.name: r_response,\n            constants.StorageMetric.WRITE_RESPONSE_TIME.name: w_response,\n            constants.StorageMetric.CACHE_HIT_RATIO.name: cache_hit_ratio,\n            constants.StorageMetric.READ_CACHE_HIT_RATIO.name:\n                r_cache_hit_ratio,\n            constants.StorageMetric.WRITE_CACHE_HIT_RATIO.name:\n                w_cache_hit_ratio\n        }\n        return row_data, timestamp\n\n    @staticmethod\n    def storage_packaging_data(storage_id, metrics_data, resource_metrics,\n                               resource_id, resource_name):\n        metrics = []\n        for resource_key in resource_metrics.keys():\n            labels = {\n                'storage_id': storage_id,\n                'resource_type': constants.ResourceType.STORAGE,\n                'resource_id': resource_id,\n                'resource_name': resource_name,\n                'type': 'RAW',\n                'unit': resource_metrics[resource_key]['unit']\n            }\n            resource_value = {}\n            time_key_data = metrics_data.get(constants.ResourceType.STORAGE)\n            for time_key in time_key_data.keys():\n                resource_key_data = time_key_data.get(time_key)\n                resource_data = resource_key_data.get(resource_key)\n                resource_value[time_key] = resource_data\n            metrics_res = constants.metric_struct(\n                name=resource_key, labels=labels, values=resource_value)\n            metrics.append(metrics_res)\n        return metrics\n\n    @staticmethod\n    def packaging_metrics(storage_id, metrics_data, resource_metrics,\n                          resource_type):\n        metrics = []\n        for resource_id in metrics_data.keys():\n            for resource_key in resource_metrics.keys():\n                labels = {\n                    'storage_id': storage_id,\n                    'resource_type': resource_type,\n                    'resource_id': resource_id,\n                    'resource_name': resource_id,\n                    'type': 'RAW',\n                    'unit': resource_metrics[resource_key]['unit']\n                }\n                resource_value = {}\n                resource_data = metrics_data.get(resource_id)\n                for time_key in resource_data.keys():\n                    resource_value[time_key] = \\\n                        resource_data.get(time_key, {}).get(resource_key)\n                if resource_value:\n                    metrics_res = constants.metric_struct(\n                        name=resource_key, labels=labels,\n                        values=resource_value)\n                    metrics.append(metrics_res)\n        return metrics\n\n    @staticmethod\n    def get_timestamp_s(time_str):\n        timestamp_s = \\\n            int(datetime.datetime.strptime(\n                time_str, consts.MACRO_SAN_TIME_FORMAT).timestamp())\n        return timestamp_s\n\n    def get_latest_perf_timestamp(self):\n        timestamp = None\n        if not self.down_lock:\n            return timestamp\n        res = self.ssh_pool.do_exec_shell([consts.GET_DATE])\n        if res:\n            res_list = res.strip().replace('\\r', '').split('\\n')\n            for row in res_list:\n                if row.isdigit():\n                    timestamp = int(\n                        int(row) / consts.SIXTY) * consts.SIXTY * units.k\n        return timestamp\n"
  },
  {
    "path": "delfin/drivers/macro_san/ms/ms_stor.py",
    "content": "# Copyright 2022 The SODA Authors.\n# Copyright (c) 2022 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nfrom oslo_log import log\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.macro_san.ms import ms_handler, consts\nfrom delfin.drivers.macro_san.ms.ms_handler import MsHandler\n\nLOG = log.getLogger(__name__)\n\n\nclass MacroSanDriver(driver.StorageDriver):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.ms_handler = ms_handler.MsHandler(**kwargs)\n        self.login = self.ms_handler.login()\n\n    def get_storage(self, context):\n        return self.ms_handler.get_storage(self.storage_id)\n\n    def list_storage_pools(self, context):\n        return self.ms_handler.list_storage_pools(self.storage_id)\n\n    def list_volumes(self, context):\n        return self.ms_handler.list_volumes(self.storage_id)\n\n    def list_controllers(self, context):\n        return self.ms_handler.list_controllers(self.storage_id)\n\n    def list_disks(self, context):\n        return self.ms_handler.list_disks(self.storage_id)\n\n    def list_ports(self, context):\n        return self.ms_handler.list_ports(self.storage_id)\n\n    def list_alerts(self, context, query_para=None):\n        raise NotImplementedError(\n            \"Macro_SAN Driver SSH list_alerts() is not Implemented\")\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return MsHandler.parse_alert(alert)\n\n    def clear_alert(self, context, alert):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def reset_connection(self, context, **kwargs):\n        pass\n\n    def collect_perf_metrics(self, context, storage_id,\n                             resource_metrics, start_time, end_time):\n        return self.ms_handler.collect_perf_metrics(\n            self.storage_id, resource_metrics, start_time, end_time)\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        return {\n            'is_historic': True,\n            'resource_metrics': {\n                constants.ResourceType.STORAGE: consts.STORAGE_CAP,\n                constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n                constants.ResourceType.PORT: consts.PORT_CAP,\n                constants.ResourceType.DISK: consts.DISK_CAP\n            }\n        }\n\n    def get_latest_perf_timestamp(self, context):\n        return self.ms_handler.get_latest_perf_timestamp()\n\n    def list_storage_host_initiators(self, context):\n        return self.ms_handler.list_storage_host_initiators(self.storage_id)\n\n    def list_storage_hosts(self, context):\n        host_list = self.ms_handler.list_storage_hosts_new(self.storage_id)\n        if not host_list:\n            host_list = self.ms_handler.list_storage_hosts_old(self.storage_id)\n        return host_list\n\n    def list_storage_host_groups(self, context):\n        return self.ms_handler.list_storage_host_groups(self.storage_id)\n\n    def list_volume_groups(self, context):\n        return self.ms_handler.list_volume_groups(self.storage_id)\n\n    def list_masking_views(self, context):\n        views = self.ms_handler.list_masking_views_new(self.storage_id)\n        if not views:\n            views = self.ms_handler.list_masking_views_old(self.storage_id)\n        return views\n"
  },
  {
    "path": "delfin/drivers/manager.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport six\nimport stevedore\nimport threading\n\nfrom oslo_log import log\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import utils\nfrom delfin import ssl_utils\nfrom delfin.common import constants\n\nLOG = log.getLogger(__name__)\n\n\n@six.add_metaclass(utils.Singleton)\nclass DriverManager(stevedore.ExtensionManager):\n    _instance_lock = threading.Lock()\n    NAMESPACE = 'delfin.storage.drivers'\n\n    def __init__(self):\n        super(DriverManager, self).__init__(self.NAMESPACE)\n        # The driver_factory will keep the driver instance for\n        # each of storage systems so that the session between driver\n        # and storage system is effectively used.\n        self.driver_factory = dict()\n\n    def get_driver(self, context, invoke_on_load=True,\n                   cache_on_load=True, **kwargs):\n        \"\"\"Get a driver from manager.\n\n        :param context: The context of delfin.\n        :type context: delfin.context.RequestContext\n        :param invoke_on_load: Boolean to decide whether to return the\n            driver object.\n        :type invoke_on_load: bool\n        :param cache_on_load: Boolean to decide whether save driver object\n            in driver_factory when generating a new driver object.\n            It takes effect when invoke_on_load is True.\n        :type cache_on_load: bool\n        :param kwargs: Parameters from access_info.\n        \"\"\"\n        context.storage_id = kwargs.get('storage_id')\n        kwargs = copy.deepcopy(kwargs)\n        kwargs['verify'] = False\n        ca_path = ssl_utils.get_storage_ca_path()\n        if ca_path:\n            ssl_utils.verify_ca_path(ca_path)\n            kwargs['verify'] = ca_path\n\n        if not invoke_on_load:\n            return self._get_driver_cls(**kwargs)\n        else:\n            return self._get_driver_obj(context, cache_on_load, **kwargs)\n\n    def update_driver(self, storage_id, driver):\n        self.driver_factory[storage_id] = driver\n\n    def remove_driver(self, storage_id):\n        \"\"\"Clear driver instance from driver factory.\"\"\"\n        self.driver_factory.pop(storage_id, None)\n\n    def _get_driver_obj(self, context, cache_on_load=True, **kwargs):\n        if not cache_on_load or not kwargs.get('storage_id'):\n            if kwargs['verify']:\n                ssl_utils.reload_certificate(kwargs['verify'])\n            cls = self._get_driver_cls(**kwargs)\n            return cls(**kwargs)\n\n        if kwargs['storage_id'] in self.driver_factory:\n            return self.driver_factory[kwargs['storage_id']]\n\n        with self._instance_lock:\n            if kwargs['storage_id'] in self.driver_factory:\n                return self.driver_factory[kwargs['storage_id']]\n\n            if kwargs['verify']:\n                ssl_utils.reload_certificate(kwargs['verify'])\n\n            access_info = copy.deepcopy(kwargs)\n            storage_id = access_info.pop('storage_id')\n            access_info.pop('verify')\n            if access_info:\n                cls = self._get_driver_cls(**kwargs)\n                driver = cls(**kwargs)\n            else:\n                access_info = db.access_info_get(\n                    context, storage_id).to_dict()\n\n                access_info_dict = copy.deepcopy(access_info)\n                remove_fields = ['created_at', 'updated_at',\n                                 'storage_id', 'storage_name',\n                                 'extra_attributes']\n                # Remove unrelated query fields\n                for field in remove_fields:\n                    if access_info_dict.get(field):\n                        access_info_dict.pop(field)\n                for access in constants.ACCESS_TYPE:\n                    if access_info_dict.get(access):\n                        access_info_dict.pop(access)\n\n                access_info_list = db.access_info_get_all(\n                    context, filters=access_info_dict)\n                for _access_info in access_info_list:\n                    if _access_info['storage_id'] in self.driver_factory:\n                        driver = self.driver_factory[\n                            _access_info['storage_id']]\n                        driver.add_storage(access_info)\n                        self.driver_factory[storage_id] = driver\n                        return driver\n                access_info['verify'] = kwargs.get('verify')\n                cls = self._get_driver_cls(**access_info)\n                driver = cls(**access_info)\n\n            self.driver_factory[storage_id] = driver\n            return driver\n\n    def _get_driver_cls(self, **kwargs):\n        \"\"\"Get driver class from entry points.\"\"\"\n        name = '%s %s' % (kwargs.get('vendor'), kwargs.get('model'))\n        if name in self.names():\n            return self[name].plugin\n\n        msg = \"Storage driver '%s' could not be found.\" % name\n        LOG.error(msg)\n        raise exception.StorageDriverNotFound(name)\n"
  },
  {
    "path": "delfin/drivers/netapp/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/netapp/dataontap/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/netapp/dataontap/cluster_mode.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.drivers import driver\nfrom delfin.drivers.netapp.dataontap import netapp_handler\nfrom delfin.drivers.netapp.dataontap.netapp_handler import NetAppHandler\n\n\nclass NetAppCmodeDriver(driver.StorageDriver):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.netapp_handler = netapp_handler.NetAppHandler(**kwargs)\n        self.netapp_handler.login()\n\n    def reset_connection(self, context, **kwargs):\n        self.netapp_handler.login()\n\n    def get_storage(self, context):\n        return self.netapp_handler.get_storage()\n\n    def list_storage_pools(self, context):\n        return self.netapp_handler.list_storage_pools(self.storage_id)\n\n    def list_volumes(self, context):\n        return self.netapp_handler.list_volumes(self.storage_id)\n\n    def list_controllers(self, context):\n        return self.netapp_handler.list_controllers(self.storage_id)\n\n    def list_ports(self, context):\n        return self.netapp_handler.list_ports(self.storage_id)\n\n    def list_disks(self, context):\n        return self.netapp_handler.list_disks(self.storage_id)\n\n    def list_alerts(self, context, query_para=None):\n        return self.netapp_handler.list_alerts(query_para)\n\n    def list_qtrees(self, context):\n        return self.netapp_handler.list_qtrees(self.storage_id)\n\n    def list_quotas(self, context):\n        return self.netapp_handler.list_quotas(self.storage_id)\n\n    def list_filesystems(self, context):\n        return self.netapp_handler.list_filesystems(self.storage_id)\n\n    def list_shares(self, context):\n        return self.netapp_handler.list_shares(self.storage_id)\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        return NetAppHandler.parse_alert(alert)\n\n    def clear_alert(self, context, alert):\n        return self.netapp_handler.clear_alert(alert)\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n\n    def get_alert_sources(self, context):\n        return self.netapp_handler.get_alert_sources()\n\n    def collect_perf_metrics(self, context, storage_id,\n                             resource_metrics, start_time, end_time):\n        return self.netapp_handler.collect_perf_metrics(\n            storage_id, resource_metrics, start_time, end_time)\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        return NetAppHandler.get_capabilities(filters)\n\n    def get_latest_perf_timestamp(self, context):\n        return self.netapp_handler.get_latest_perf_timestamp()\n\n    def list_storage_host_initiators(self, context):\n        return self.netapp_handler.\\\n            list_storage_host_initiators(self.storage_id)\n\n    def list_port_groups(self, context):\n        return self.netapp_handler.list_port_groups(self.storage_id)\n\n    def list_masking_views(self, context):\n        return self.netapp_handler.list_masking_views(self.storage_id)\n\n    def list_storage_hosts(self, context):\n        return self.netapp_handler.list_storage_hosts(self.storage_id)\n"
  },
  {
    "path": "delfin/drivers/netapp/dataontap/constants.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport re\n\nfrom delfin.common import constants\n\nSOCKET_TIMEOUT = 15\nAUTH_KEY = 'Authorization'\n\nRETURN_SUCCESS_CODE = 200\nCREATED_SUCCESS_CODE = 201\nACCEPTED_RETURN_CODE = 202\nBAD_REQUEST_RETURN_CODE = 400\nUNAUTHORIZED_RETURN_CODE = 401\nFORBIDDEN_RETURN_CODE = 403\nNOT_FOUND_RETURN_CODE = 404\nMETHOD_NOT_ALLOWED_CODE = 405\nCONFLICT_RETURN_CODE = 409\nINTERNAL_ERROR_CODE = 500\n\nHOUR_STAMP = '1h'\nDAY_STAMP = '1d'\nMONTH_STAMP = '1m'\nWEEK_STAMP = '1w'\nYEAR_STAMP = '1y'\n\nCLUSTER_PERF_URL = '/api/cluster/metrics?interval=1h&fields=iops,' \\\n                   'throughput,latency'\n\nPOOL_PERF_URL = '/api/storage/aggregates/%s/metrics?interval=1h&'\\\n                'fields=iops,throughput,latency'\nVOLUME_PERF_URL = '/api/storage/luns/%s/metrics?interval=1h&fields=iops,'\\\n                  'throughput,latency'\nFS_PERF_URL = '/api/storage/volumes/%s/metrics?interval=1h&fields=iops,'\\\n              'throughput,latency'\nFC_PERF_URL = '/api/network/fc/ports/%s/metrics?interval=1h&fields=iops,'\\\n              'throughput,latency'\nETH_PERF_URL = '/api/network/ethernet/ports/%s/metrics?interval=1h&'\\\n               'fields=throughput'\n\nFS_INFO_URL = '/api/storage/volumes?fields=svm'\nFC_INFO_URL = '/api/network/fc/ports'\nETH_INFO_URL = '/api/network/ethernet/ports?fields=node'\nPERF_MAP = {\n    'iops': ['iops', 'total'],\n    'readIops': ['iops', 'read'],\n    'writeIops': ['iops', 'write'],\n    'throughput': ['throughput', 'total'],\n    'readThroughput': ['throughput', 'read'],\n    'writeThroughput': ['throughput', 'write'],\n    'responseTime': ['latency', 'total']\n}\n\nPATTERN = re.compile('^[-]{3,}')\nFLOAT_PATTERN = r\"\\d\\.\\d\"\nIP_PATTERN = re.compile(r'(([01]{0,1}\\d{0,1}\\d|2[0-4]\\d|25[0-5])\\.){3}'\n                        r'([01]{0,1}\\d{0,1}\\d|2[0-4]\\d|25[0-5])$')\nIQN_PATTERN = re.compile('^[i][q][n].')\nWWN_PATTERN = re.compile('^(([a-z|0-9]){2}:){7}(([a-z|0-9]){2})')\nINITIATOR_KEY = 'loggedin'\nCLUSTER_SHOW_COMMAND = \"cluster identity show\"\nVERSION_SHOW_COMMAND = \"version\"\nSTORAGE_STATUS_COMMAND = \"system health status show\"\n\nPOOLS_SHOW_DETAIL_COMMAND = \"storage pool show -instance\"\nAGGREGATE_SHOW_DETAIL_COMMAND = \"storage aggregate show -instance\"\n\nFS_SHOW_DETAIL_COMMAND = \"vol show -instance\"\n\nTHIN_FS_SHOW_COMMAND = \"vol show -space-guarantee none\"\n\nALTER_SHOW_DETAIL_COMMAND = \"system health alert show -instance\"\nEVENT_SHOW_DETAIL_COMMAND = \"event show -instance -severity EMERGENCY\"\nEVENT_TIME_TYPE = '%m/%d/%Y %H:%M:%S'\n\nALTER_TIME_TYPE = '%a %b %d %H:%M:%S %Y'\n\nCLEAR_ALERT_COMMAND = \\\n    \"system health alert delete -alerting-resource * -alert-id\"\n\nDISK_SHOW_DETAIL_COMMAND = \"disk show -instance\"\nDISK_SHOW_PHYSICAL_COMMAND = \"disk show -physical\"\nDISK_ERROR_COMMAND = \"disk error show\"\n\nLUN_SHOW_DETAIL_COMMAND = \"lun show -instance\"\n\nCONTROLLER_SHOW_DETAIL_COMMAND = \"node show -instance\"\n\nPORT_SHOW_DETAIL_COMMAND = \"network port show -instance\"\nINTERFACE_SHOW_DETAIL_COMMAND = \"network interface show -instance\"\nFC_PORT_SHOW_DETAIL_COMMAND = \"fcp adapter show -instance\"\n\nQTREE_SHOW_DETAIL_COMMAND = \"qtree show -instance\"\n\nCIFS_SHARE_SHOW_DETAIL_COMMAND = \"vserver cifs share show -instance\" \\\n                                 \" -vserver %(vserver_name)s\"\nSHARE_AGREEMENT_SHOW_COMMAND = \"vserver show -fields Allowed-protocols\"\nVSERVER_SHOW_COMMAND = \"vserver show -type data\"\nNFS_SHARE_SHOW_COMMAND = \"volume show -junction-active true -instance\"\n\nSTORAGE_VENDOR = \"NetApp\"\nSTORAGE_MODEL = \"cmodel\"\n\nQUOTA_SHOW_DETAIL_COMMAND = \"volume quota policy rule show -instance\"\n\nMGT_IP_COMMAND = \"network interface show -fields address -role cluster-mgmt\"\nNODE_IP_COMMAND = \"network interface show -fields address -role node-mgmt\"\n\nCONTROLLER_IP_COMMAND = \"network interface show -fields \" \\\n                        \"curr-node,address -role node-mgmt\"\n\nHOST_COMMAND = \"igroup show -instance\"\n\nPORT_GROUP_COMMAND = \"portset show -instance\"\nLIF_COMMAND = \"network interface show -instance\"\n\nFC_INITIATOR_COMMAND = \"fcp initiator show -instance\"\n\nISCSI_INITIATOR_COMMAND = \"iscsi initiator show -instance\"\n\nLUN_MAPPING_COMMAND = 'lun mapping show -instance'\n\nSECURITY_STYLE = {\n    'mixed': constants.NASSecurityMode.MIXED,\n    'ntfs': constants.NASSecurityMode.NTFS,\n    'unix': constants.NASSecurityMode.UNIX\n}\n\nSTORAGE_STATUS = {\n    'ok': constants.StorageStatus.NORMAL,\n    'ok-with-suppressed': constants.StorageStatus.NORMAL,\n    'degraded': constants.StorageStatus.DEGRADED,\n    'unreachable': constants.StorageStatus.ABNORMAL,\n    'unknown': constants.StorageStatus.ABNORMAL\n}\n\nAGGREGATE_STATUS = {\n    'online': constants.StoragePoolStatus.NORMAL,\n    'creating': constants.StoragePoolStatus.NORMAL,\n    'mounting': constants.StoragePoolStatus.NORMAL,\n    'relocating': constants.StoragePoolStatus.NORMAL,\n    'quiesced': constants.StoragePoolStatus.NORMAL,\n    'quiescing': constants.StoragePoolStatus.NORMAL,\n    'unmounted': constants.StoragePoolStatus.OFFLINE,\n    'unmounting': constants.StoragePoolStatus.OFFLINE,\n    'destroying': constants.StoragePoolStatus.ABNORMAL,\n    'partial': constants.StoragePoolStatus.ABNORMAL,\n    'frozen': constants.StoragePoolStatus.ABNORMAL,\n    'reverted': constants.StoragePoolStatus.NORMAL,\n    'restricted': constants.StoragePoolStatus.NORMAL,\n    'inconsistent': constants.StoragePoolStatus.ABNORMAL,\n    'iron_restricted': constants.StoragePoolStatus.ABNORMAL,\n    'unknown': constants.StoragePoolStatus.ABNORMAL,\n    'offline': constants.StoragePoolStatus.OFFLINE,\n    'failed': constants.StoragePoolStatus.ABNORMAL,\n    'remote_cluster': constants.StoragePoolStatus.NORMAL,\n}\n\nVOLUME_STATUS = {\n    'online': constants.VolumeStatus.AVAILABLE,\n    'offline': constants.VolumeStatus.ERROR,\n    'nvfail': constants.VolumeStatus.ERROR,\n    'space-error': constants.VolumeStatus.ERROR,\n    'foreign-lun-error': constants.VolumeStatus.ERROR,\n}\n\nALERT_SEVERITY = {\n    'Unknown': constants.Severity.NOT_SPECIFIED,\n    'Other': constants.Severity.NOT_SPECIFIED,\n    'Information': constants.Severity.INFORMATIONAL,\n    'Degraded': constants.Severity.WARNING,\n    'Minor': constants.Severity.MINOR,\n    'Major': constants.Severity.MAJOR,\n    'Critical': constants.Severity.CRITICAL,\n    'Fatal': constants.Severity.FATAL,\n}\n\nDISK_TYPE = {\n    'ATA': constants.DiskPhysicalType.ATA,\n    'BSAS': constants.DiskPhysicalType.SATA,\n    'FCAL': constants.DiskPhysicalType.FC,\n    'FSAS': constants.DiskPhysicalType.NL_SAS,\n    'LUN': constants.DiskPhysicalType.LUN,\n    'SAS': constants.DiskPhysicalType.SAS,\n    'MSATA': constants.DiskPhysicalType.SATA,\n    'SSD': constants.DiskPhysicalType.SSD,\n    'VMDISK': constants.DiskPhysicalType.VMDISK,\n    'unknown': constants.DiskPhysicalType.UNKNOWN,\n}\n\nDISK_LOGICAL = {\n    'aggregate': constants.DiskLogicalType.AGGREGATE,\n    'spare': constants.DiskLogicalType.SPARE,\n    'unknown': constants.DiskLogicalType.UNKNOWN,\n    'free': constants.DiskLogicalType.FREE,\n    'broken': constants.DiskLogicalType.BROKEN,\n    'foreign': constants.DiskLogicalType.FOREIGN,\n    'labelmaint': constants.DiskLogicalType.LABELMAINT,\n    'maintenance': constants.DiskLogicalType.MAINTENANCE,\n    'shared': constants.DiskLogicalType.SHARED,\n    'unassigned': constants.DiskLogicalType.UNASSIGNED,\n    'unsupported': constants.DiskLogicalType.UNSUPPORTED,\n    'remote': constants.DiskLogicalType.REMOTE,\n    'mediator': constants.DiskLogicalType.MEDIATOR,\n}\n\nFS_STATUS = {\n    'online': constants.FilesystemStatus.NORMAL,\n    'restricted': constants.FilesystemStatus.FAULTY,\n    'offline': constants.FilesystemStatus.NORMAL,\n    'force-online': constants.FilesystemStatus.FAULTY,\n    'force-offline': constants.FilesystemStatus.FAULTY,\n}\n\nNETWORK_LOGICAL_TYPE = {\n    'data': constants.PortLogicalType.DATA,\n    'cluster': constants.PortLogicalType.CLUSTER,\n    'node-mgmt': constants.PortLogicalType.NODE_MGMT,\n    'cluster-mgmt': constants.PortLogicalType.CLUSTER_MGMT,\n    'intercluster': constants.PortLogicalType.INTERCLUSTER,\n}\n\nETH_LOGICAL_TYPE = {\n    'physical': constants.PortLogicalType.PHYSICAL,\n    'if-group': constants.PortLogicalType.IF_GROUP,\n    'vlan': constants.PortLogicalType.VLAN,\n    'undef': constants.PortLogicalType.OTHER\n}\n\nFC_TYPE = {\n    'fibre-channel': constants.PortType.FC,\n    'ethernet': constants.PortType.FCOE\n}\n\nWORM_TYPE = {\n    'non-snaplock': constants.WORMType.NON_WORM,\n    'compliance': constants.WORMType.COMPLIANCE,\n    'enterprise': constants.WORMType.ENTERPRISE,\n    '-': constants.WORMType.NON_WORM\n}\n\nQUOTA_TYPE = {\n    'user': constants.QuotaType.USER,\n    'tree': constants.QuotaType.TREE,\n    'group': constants.QuotaType.GROUP\n}\n\nNETWORK_PORT_TYPE = {\n    'nfs': constants.PortType.NFS,\n    'cifs': constants.PortType.CIFS,\n    'iscsi': constants.PortType.ISCSI,\n    'fcp': constants.PortType.FC,\n    'fcache': constants.PortType.FCACHE,\n    'none': constants.PortType.OTHER,\n}\n\nSEVERITY_MAP = {\n    'AccessCache.ReachedLimits': 'EMERGENCY',\n    'LUN.inconsistent.filesystem': 'EMERGENCY',\n    'LUN.nvfail.vol.proc.failed': 'EMERGENCY',\n    'Nblade.DidNotInitialize': 'EMERGENCY',\n    'Nblade.cifsNoPrivShare': 'EMERGENCY',\n    'Nblade.nfsV4PoolExhaust': 'EMERGENCY',\n    'Nblade.vscanNoScannerConn': 'EMERGENCY',\n    'adt.dest.directory.full': 'EMERGENCY',\n    'adt.dest.directory.unavail': 'EMERGENCY',\n    'adt.dest.volume.offline': 'EMERGENCY',\n    'adt.service.block': 'EMERGENCY',\n    'adt.service.ro.filesystem': 'EMERGENCY',\n    'adt.stgvol.nospace': 'EMERGENCY',\n    'adt.stgvol.offline': 'EMERGENCY',\n    'api.engine.killed': 'EMERGENCY',\n    'app.log.emerg': 'EMERGENCY',\n    'arl.aggrOnlineFailed': 'EMERGENCY',\n    'bge.EepromCrc': 'EMERGENCY',\n    'boot.bootmenu.issue': 'EMERGENCY',\n    'boot.varfs.backup.issue': 'EMERGENCY',\n    'bootfs.env.issue': 'EMERGENCY',\n    'callhome.battery.failure': 'EMERGENCY',\n    'callhome.ch.ps.fan.bad.xmin': 'EMERGENCY',\n    'callhome.chassis.overtemp': 'EMERGENCY',\n    'callhome.chassis.undertemp': 'EMERGENCY',\n    'callhome.clam.node.ooq': 'EMERGENCY',\n    'callhome.client.app.emerg': 'EMERGENCY',\n    'callhome.fans.failed': 'EMERGENCY',\n    'callhome.hba.failed': 'EMERGENCY',\n    'callhome.ibretimerprog.fail': 'EMERGENCY',\n    'callhome.mcc.auso.trig.fail': 'EMERGENCY',\n    'callhome.mcc.switchback.failed': 'EMERGENCY',\n    'callhome.mcc.switchover.failed': 'EMERGENCY',\n    'callhome.mdb.recovery.unsuccessful': 'EMERGENCY',\n    'callhome.netinet.dup.clustIP': 'EMERGENCY',\n    'callhome.nvram.failure': 'EMERGENCY',\n    'callhome.partner.down': 'EMERGENCY',\n    'callhome.ps.removed': 'EMERGENCY',\n    'callhome.raid.no.recover': 'EMERGENCY',\n    'callhome.raidtree.assim': 'EMERGENCY',\n    'callhome.rlm.replace': 'EMERGENCY',\n    'callhome.rlm.replace.lan': 'EMERGENCY',\n    'callhome.root.vol.recovery.reqd': 'EMERGENCY',\n    'callhome.sblade.lu.resync.to': 'EMERGENCY',\n    'callhome.sblade.lu.rst.hung': 'EMERGENCY',\n    'callhome.sblade.prop.fail': 'EMERGENCY',\n    'callhome.sfo.takeover.panic': 'EMERGENCY',\n    'callhome.shlf.fan': 'EMERGENCY',\n    'callhome.vol.space.crit': 'EMERGENCY',\n    'cf.fm.panicInToMode': 'EMERGENCY',\n    'cf.fm.reserveDisksOff': 'EMERGENCY',\n    'cf.fsm.autoGivebackAttemptsExceeded': 'EMERGENCY',\n    'cf.takeover.missing.ptnrDiskInventory': 'EMERGENCY',\n    'cf.takeover.missing.ptnrDisks': 'EMERGENCY',\n    'cft.trans.commit.failed': 'EMERGENCY',\n    'clam.node.ooq': 'EMERGENCY',\n    'config.localswitch': 'EMERGENCY',\n    'config.noBconnect': 'EMERGENCY',\n    'config.noPartnerLUNs': 'EMERGENCY',\n    'coredump.dump.failed': 'EMERGENCY',\n    'ctran.group.reset.failed': 'EMERGENCY',\n    'ctran.jpc.multiple.nodes': 'EMERGENCY',\n    'ctran.jpc.split.brain': 'EMERGENCY',\n    'ctran.jpc.valid.failed': 'EMERGENCY',\n    'disk.dynamicqual.failure.shutdown': 'EMERGENCY',\n    'ds.sas.xfer.unknown.error': 'EMERGENCY',\n    'ems.eut.prilo0_log_emerg': 'EMERGENCY',\n    'ems.eut.privar0_log_emerg_var': 'EMERGENCY',\n    'fci.adapter.firmware.update.failed': 'EMERGENCY',\n    'ha.takeoverImpHotShelf': 'EMERGENCY',\n    'haosc.invalid.config': 'EMERGENCY',\n    'license.capac.eval.shutdown': 'EMERGENCY',\n    'license.capac.shutdown': 'EMERGENCY',\n    'license.capac.unl.shutdown': 'EMERGENCY',\n    'license.subscription.enforcement': 'EMERGENCY',\n    'lmgr.aggr.CA.locks.dropped': 'EMERGENCY',\n    'lun.metafile.OOVC.corrupt': 'EMERGENCY',\n    'lun.metafile.VTOC.corrupt': 'EMERGENCY',\n    'mcc.auso.trigFailed': 'EMERGENCY',\n    'mcc.auso.triggerFailed': 'EMERGENCY',\n    'mgmtgwd.rootvol.recovery.changed': 'EMERGENCY',\n    'mgmtgwd.rootvol.recovery.different': 'EMERGENCY',\n    'mgmtgwd.rootvol.recovery.low.space': 'EMERGENCY',\n    'mgmtgwd.rootvol.recovery.new': 'EMERGENCY',\n    'mgmtgwd.rootvol.recovery.takeover.changed': 'EMERGENCY',\n    'mgr.boot.floppy_media': 'EMERGENCY',\n    'mgr.boot.reason_abnormal': 'EMERGENCY',\n    'mlm.array.portMixedAddress': 'EMERGENCY',\n    'monitor.chassisFanFail.xMinShutdown': 'EMERGENCY',\n    'monitor.fan.critical': 'EMERGENCY',\n    'monitor.globalStatus.critical': 'EMERGENCY',\n    'monitor.globalStatus.nonRecoverable': 'EMERGENCY',\n    'monitor.ioexpansionTemperature.cool': 'EMERGENCY',\n    'monitor.mismatch.shutdown': 'EMERGENCY',\n    'monitor.nvramLowBatteries': 'EMERGENCY',\n    'monitor.power.degraded': 'EMERGENCY',\n    'monitor.shelf.accessError': 'EMERGENCY',\n    'monitor.shutdown.brokenDisk': 'EMERGENCY',\n    'monitor.shutdown.chassisOverTemp': 'EMERGENCY',\n    'monitor.shutdown.emergency': 'EMERGENCY',\n    'monitor.shutdown.ioexpansionOverTemp': 'EMERGENCY',\n    'monitor.shutdown.ioexpansionUnderTemp': 'EMERGENCY',\n    'monitor.shutdown.nvramLowBatteries': 'EMERGENCY',\n    'monitor.shutdown.nvramLowBattery': 'EMERGENCY',\n    'netif.badEeprom': 'EMERGENCY',\n    'netif.overTempError': 'EMERGENCY',\n    'netif.uncorEccError': 'EMERGENCY',\n    'netinet.ethr.dup.clustIP': 'EMERGENCY',\n    'nodewatchdog.node.failure': 'EMERGENCY',\n    'nodewatchdog.node.longreboot': 'EMERGENCY',\n    'nodewatchdog.node.panic': 'EMERGENCY',\n    'nonha.resvConflictHalt': 'EMERGENCY',\n    'nv.fio.write.err': 'EMERGENCY',\n    'nv.none': 'EMERGENCY',\n    'nv2flash.copy2NVMEM.failure': 'EMERGENCY',\n    'nv2flash.copy2flash.failure': 'EMERGENCY',\n    'nv2flash.hw.failure': 'EMERGENCY',\n    'nv2flash.initfail': 'EMERGENCY',\n    'nvmem.battery.capLowCrit': 'EMERGENCY',\n    'nvmem.battery.capacity.low': 'EMERGENCY',\n    'nvmem.battery.current.high': 'EMERGENCY',\n    'nvmem.battery.currentHigh': 'EMERGENCY',\n    'nvmem.battery.currentLow': 'EMERGENCY',\n    'nvmem.battery.discFET.off': 'EMERGENCY',\n    'nvmem.battery.fccLowCrit': 'EMERGENCY',\n    'nvmem.battery.packInvalid': 'EMERGENCY',\n    'nvmem.battery.powerFault': 'EMERGENCY',\n    'nvmem.battery.temp.high': 'EMERGENCY',\n    'nvmem.battery.tempHigh': 'EMERGENCY',\n    'nvmem.battery.unread': 'EMERGENCY',\n    'nvmem.battery.voltage.high': 'EMERGENCY',\n    'nvmem.battery.voltageHigh': 'EMERGENCY',\n    'nvmem.battery.voltageLow': 'EMERGENCY',\n    'nvmem.voltage.high': 'EMERGENCY',\n    'nvram.battery.capacity.low.critical': 'EMERGENCY',\n    'nvram.battery.charging.nocharge': 'EMERGENCY',\n    'nvram.battery.current.high': 'EMERGENCY',\n    'nvram.battery.current.low': 'EMERGENCY',\n    'nvram.battery.dischargeFET.off': 'EMERGENCY',\n    'nvram.battery.fault': 'EMERGENCY',\n    'nvram.battery.fcc.low.critical': 'EMERGENCY',\n    'nvram.battery.not.present': 'EMERGENCY',\n    'nvram.battery.power.fault': 'EMERGENCY',\n    'nvram.battery.sensor.unreadable': 'EMERGENCY',\n    'nvram.battery.temp.high': 'EMERGENCY',\n    'nvram.battery.voltage.high': 'EMERGENCY',\n    'nvram.battery.voltage.low': 'EMERGENCY',\n    'nvram.decryptionKey.unavail': 'EMERGENCY',\n    'nvram.encryptionKey.initfail': 'EMERGENCY',\n    'nvram.hw.initFail': 'EMERGENCY',\n    'platform.insufficientMemory': 'EMERGENCY',\n    'pvif.allLinksDown': 'EMERGENCY',\n    'pvif.initMemFail': 'EMERGENCY',\n    'pvif.initMesgFail': 'EMERGENCY',\n    'raid.assim.disk.nolabels': 'EMERGENCY',\n    'raid.assim.fatal': 'EMERGENCY',\n    'raid.assim.fatal.upgrade': 'EMERGENCY',\n    'raid.assim.rg.missingChild': 'EMERGENCY',\n    'raid.assim.tree.degradedDirty': 'EMERGENCY',\n    'raid.assim.tree.multipleRootVols': 'EMERGENCY',\n    'raid.assim.upgrade.aggr.fail': 'EMERGENCY',\n    'raid.config.online.req.unsup': 'EMERGENCY',\n    'raid.disk.owner.change.fail': 'EMERGENCY',\n    'raid.mirror.bigio.restrict.failed': 'EMERGENCY',\n    'raid.mirror.bigio.wafliron.nostart': 'EMERGENCY',\n    'raid.multierr.unverified.block': 'EMERGENCY',\n    'raid.mv.defVol.online.fail': 'EMERGENCY',\n    'raid.rg.readerr.bad.file.block': 'EMERGENCY',\n    'raid.rg.readerr.wc.blkErr': 'EMERGENCY',\n    'raid.vol.volinfo.mismatch': 'EMERGENCY',\n    'rdb.recovery.failed': 'EMERGENCY',\n    'repl.checker.block.missing': 'EMERGENCY',\n    'repl.physdiff.invalid.hole': 'EMERGENCY',\n    'sas.adapter.firmware.update.failed': 'EMERGENCY',\n    'sas.cable.unqualified': 'EMERGENCY',\n    'sas.cpr.failed': 'EMERGENCY',\n    'sas.cpr.recoveryThreshold': 'EMERGENCY',\n    'scsiblade.kernel.volume.limbo.group': 'EMERGENCY',\n    'scsiblade.kernel.vserver.limbo.group': 'EMERGENCY',\n    'scsiblade.mgmt.wedged': 'EMERGENCY',\n    'scsiblade.prop.done.error': 'EMERGENCY',\n    'scsiblade.unavailable': 'EMERGENCY',\n    'scsiblade.vol.init.failed': 'EMERGENCY',\n    'scsiblade.volume.event.lost': 'EMERGENCY',\n    'scsiblade.vs.purge.fail': 'EMERGENCY',\n    'scsiblade.vserver.op.timeout': 'EMERGENCY',\n    'scsitarget.fct.postFailed': 'EMERGENCY',\n    'scsitarget.slifct.rebootRequired': 'EMERGENCY',\n    'secd.ldap.noServers': 'EMERGENCY',\n    'secd.lsa.noServers': 'EMERGENCY',\n    'secd.netlogon.noServers': 'EMERGENCY',\n    'secd.nis.noServers': 'EMERGENCY',\n    'ses.badShareStorageConfigErr': 'EMERGENCY',\n    'ses.config.IllegalEsh270': 'EMERGENCY',\n    'ses.config.shelfMixError': 'EMERGENCY',\n    'ses.psu.powerReqError': 'EMERGENCY',\n    'ses.shelf.em.ctrlFailErr': 'EMERGENCY',\n    'ses.status.enclError': 'EMERGENCY',\n    'ses.status.fanError': 'EMERGENCY',\n    'ses.status.volError': 'EMERGENCY',\n    'ses.system.em.mmErr': 'EMERGENCY',\n    'ses.unsupported.shelf.psu': 'EMERGENCY',\n    'ses.unsupported.shelves.psus': 'EMERGENCY',\n    'sfo.reassignFailed': 'EMERGENCY',\n    'snapmirror.replay.failed': 'EMERGENCY',\n    'sp.ipmi.lost.shutdown': 'EMERGENCY',\n    'spm.mgwd.process.exit': 'EMERGENCY',\n    'spm.secd.process.exit': 'EMERGENCY',\n    'spm.vifmgr.process.exit': 'EMERGENCY',\n    'spm.vldb.process.exit': 'EMERGENCY',\n    'ups.battery.critical.goodlinepower': 'EMERGENCY',\n    'ups.battery.warning': 'EMERGENCY',\n    'ups.battery.warning.goodlinepower': 'EMERGENCY',\n    'ups.inputpower.failed': 'EMERGENCY',\n    'ups.systemshutdown': 'EMERGENCY',\n    'vifmgr.clus.linkdown': 'EMERGENCY',\n    'vifmgr.cluscheck.l2ping': 'EMERGENCY',\n    'vifmgr.ipspace.tooMany': 'EMERGENCY',\n    'vldb.update.duringsofail': 'EMERGENCY',\n    'vol.phys.overalloc': 'EMERGENCY',\n    'vsa.inadequateVM': 'EMERGENCY',\n    'vsa.unlicensed': 'EMERGENCY',\n    'wafl.aggr.rsv.low.nomount': 'EMERGENCY',\n    'wafl.aggrtrans.outofspace.offline': 'EMERGENCY',\n    'wafl.bad.aggr.buftree.type': 'EMERGENCY',\n    'wafl.bad.vol.buftree.type': 'EMERGENCY',\n    'wafl.buf.badHeader': 'EMERGENCY',\n    'wafl.buf.freeingFreeBlock': 'EMERGENCY',\n    'wafl.failed.mount': 'EMERGENCY',\n    'wafl.failed.mount.bad.fsid': 'EMERGENCY',\n    'wafl.inconsistent.dirent': 'EMERGENCY',\n    'wafl.inconsistent.threshold.reached': 'EMERGENCY',\n    'wafl.iron.abort.offlineFail': 'EMERGENCY',\n    'wafl.iron.badfsid': 'EMERGENCY',\n    'wafl.iron.oc.abort.bad_blk': 'EMERGENCY',\n    'wafl.iron.oc.abort.clog_full': 'EMERGENCY',\n    'wafl.iron.oc.deletedChangeLog': 'EMERGENCY',\n    'wafl.iron.oc.errorCommitLog': 'EMERGENCY',\n    'wafl.iron.oc.root.lowMemory': 'EMERGENCY',\n    'wafl.mcc.so.nvram.warn': 'EMERGENCY',\n    'wafl.nvlog.checkFail': 'EMERGENCY',\n    'wafl.nvsave.replaying.fail': 'EMERGENCY',\n    'wafl.nvsave.saving.fail': 'EMERGENCY',\n    'wafl.offline.versionMismatch': 'EMERGENCY',\n    'wafl.online.fail.vmalign': 'EMERGENCY',\n    'wafl.online.notCompatibleVer': 'EMERGENCY',\n    'wafl.online.vbnMismatch': 'EMERGENCY',\n    'wafl.raid.incons.xidata': 'EMERGENCY',\n    'wafl.scan.typebits.diffFail': 'EMERGENCY',\n    'wafl.takeover.root.fail': 'EMERGENCY',\n    'wafl.takeover.vol.fail': 'EMERGENCY',\n    'wafl.vol.nvfail.offline': 'EMERGENCY',\n    'wafl.vol.walloc.rsv.failmount': 'EMERGENCY'}\n\nIOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Input/output operations per second\"\n}\nREAD_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Read input/output operations per second\"\n}\nWRITE_IOPS_DESCRIPTION = {\n    \"unit\": \"IOPS\",\n    \"description\": \"Write input/output operations per second\"\n}\nTHROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data is \"\n                   \"successfully transferred in MB/s\"\n}\nREAD_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data read is \"\n                   \"successfully transferred in MB/s\"\n}\nWRITE_THROUGHPUT_DESCRIPTION = {\n    \"unit\": \"MB/s\",\n    \"description\": \"Represents how much data write is \"\n                   \"successfully transferred in MB/s\"\n}\nRESPONSE_TIME_DESCRIPTION = {\n    \"unit\": \"ms\",\n    \"description\": \"Average time taken for an IO \"\n                   \"operation in ms\"\n}\nCACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of io that are cache hits\"\n}\nREAD_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of read ops that are cache hits\"\n}\nWRITE_CACHE_HIT_RATIO_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of write ops that are cache hits\"\n}\nIO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of IO requests in KB\"\n}\nREAD_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of read IO requests in KB\"\n}\nWRITE_IO_SIZE_DESCRIPTION = {\n    \"unit\": \"KB\",\n    \"description\": \"The average size of write IO requests in KB\"\n}\nCPU_USAGE_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of CPU usage\"\n}\nMEMORY_USAGE_DESCRIPTION = {\n    \"unit\": \"%\",\n    \"description\": \"Percentage of DISK memory usage in percentage\"\n}\nSERVICE_TIME = {\n    \"unit\": 'ms',\n    \"description\": \"Service time of the resource in ms\"\n}\n\nCAP_MAP = {\n    \"iops\": IOPS_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"cacheHitRatio\": CACHE_HIT_RATIO_DESCRIPTION,\n    \"readCacheHitRatio\": READ_CACHE_HIT_RATIO_DESCRIPTION,\n    \"writeCacheHitRatio\": WRITE_CACHE_HIT_RATIO_DESCRIPTION,\n    \"ioSize\": IO_SIZE_DESCRIPTION,\n    \"readIoSize\": READ_IO_SIZE_DESCRIPTION,\n    \"writeIoSize\": WRITE_IO_SIZE_DESCRIPTION,\n}\n\nSTORAGE_CAPABILITIES = {\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"iops\": IOPS_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n}\n\nPOOL_CAPABILITIES = {\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"iops\": IOPS_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n}\n\nVOLUME_CAPABILITIES = {\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"iops\": IOPS_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n}\n\nPORT_CAPABILITIES = {\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n    \"iops\": IOPS_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n}\n\nFS_CAPABILITIES = {\n    \"throughput\": THROUGHPUT_DESCRIPTION,\n    \"iops\": IOPS_DESCRIPTION,\n    \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n    \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n    \"readIops\": READ_IOPS_DESCRIPTION,\n    \"writeIops\": WRITE_IOPS_DESCRIPTION,\n}\n\nHOST_OS_TYPE_MAP = {\n    'solaris': constants.HostOSTypes.SOLARIS,\n    'windows': constants.HostOSTypes.WINDOWS,\n    'hpux': constants.HostOSTypes.HP_UX,\n    'aix': constants.HostOSTypes.AIX,\n    'linux': constants.HostOSTypes.LINUX,\n    'netware': constants.HostOSTypes.UNKNOWN,\n    'vmware': constants.HostOSTypes.VMWARE_ESX,\n    'openvms': constants.HostOSTypes.OPEN_VMS,\n    'xen': constants.HostOSTypes.XEN_SERVER,\n    'hyper_v': constants.HostOSTypes.UNKNOWN\n}\n"
  },
  {
    "path": "delfin/drivers/netapp/dataontap/mapping_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom delfin.common import constants\nfrom delfin.drivers.netapp.dataontap import constants as constant\nfrom delfin.drivers.utils.tools import Tools\n\n\nclass MappingHandler(object):\n\n    @staticmethod\n    def format_initiators(initiator_list, initiator_info, storage_id,\n                          protocol_type, is_default=False):\n        initiator_map_list = []\n        Tools.split_value_map_list(\n            initiator_info, initiator_map_list, is_mapping=True, split=':')\n        if not is_default and protocol_type ==\\\n                constants.InitiatorType.FC:\n            MappingHandler.get_fc_initiator(\n                initiator_list, initiator_map_list, storage_id)\n        elif not is_default and protocol_type ==\\\n                constants.InitiatorType.ISCSI:\n            MappingHandler.get_iscsi_initiator(\n                initiator_list, initiator_map_list, storage_id)\n        if is_default:\n            MappingHandler.get_initiator_from_host(\n                initiator_list, initiator_map_list, storage_id)\n        return initiator_list\n\n    @staticmethod\n    def duplicate_removal(initiator_list, initiator_model):\n        is_same = False\n        for initiator in initiator_list:\n            if initiator['native_storage_host_initiator_id'] \\\n                    == initiator_model['native_storage_host_initiator_id']:\n                is_same = True\n                break\n        if not is_same:\n            initiator_list.append(initiator_model)\n\n    @staticmethod\n    def get_iscsi_initiator(initiator_list, initiator_map_list, storage_id):\n        for initiator_map in initiator_map_list:\n            if 'IgroupName' in initiator_map \\\n                    and initiator_map.get('IgroupName') == '-':\n                initiator_id = \\\n                    initiator_map.get('InitiatorName').replace(' ', '')\n                initiator_model = {\n                    'native_storage_host_initiator_id': initiator_id,\n                    'native_storage_host_id': None,\n                    'name': initiator_id,\n                    'alias': initiator_map.get('InitiatorAlias'),\n                    'type': constants.InitiatorType.ISCSI,\n                    'status': constants.InitiatorStatus.ONLINE,\n                    'wwn': initiator_id,\n                    'storage_id': storage_id,\n                }\n                MappingHandler.duplicate_removal(\n                    initiator_list, initiator_model)\n\n    @staticmethod\n    def get_fc_initiator(initiator_list, initiator_map_list, storage_id):\n        for initiator_map in initiator_map_list:\n            if 'IgroupName' in initiator_map \\\n                    and initiator_map.get('IgroupName') == '-':\n                initiator_id = \\\n                    initiator_map.get('InitiatorWWPN').replace(' ', '')\n                initiator_model = {\n                    'native_storage_host_initiator_id': initiator_id,\n                    'native_storage_host_id': None,\n                    'name': initiator_id,\n                    'alias': initiator_map.get('InitiatorWWPNAlias'),\n                    'type': constants.InitiatorType.FC,\n                    'status': constants.InitiatorStatus.ONLINE,\n                    'wwn': initiator_map.get('InitiatorWWPN'),\n                    'storage_id': storage_id,\n                }\n                MappingHandler.duplicate_removal(\n                    initiator_list, initiator_model)\n\n    @staticmethod\n    def get_initiator_type(protocol_type, initiator_name):\n        if protocol_type != 'mixed':\n            return constant.NETWORK_PORT_TYPE.get(protocol_type)\n        else:\n            if constant.IQN_PATTERN.search(initiator_name):\n                return constants.PortType.ISCSI\n            elif constant.WWN_PATTERN.search(initiator_name):\n                return constants.PortType.FC\n            return None\n\n    @staticmethod\n    def format_initiator(data_map, initiator_id, storage_id):\n        initiator_id = initiator_id.split('(')[0]\n        protocol_type = \\\n            MappingHandler.get_initiator_type(\n                data_map.get('Protocol'), initiator_id)\n        host_id = '%s_%s' % (data_map.get('VserverName'),\n                             data_map.get('IgroupName'))\n        initiator_model = {\n            'native_storage_host_initiator_id': initiator_id,\n            'native_storage_host_id': host_id,\n            'name': initiator_id,\n            'type': protocol_type,\n            'status': constants.InitiatorStatus.ONLINE,\n            'storage_id': storage_id,\n            'wwn': initiator_id\n        }\n        return initiator_model\n\n    @staticmethod\n    def get_initiator_from_host(\n            initiator_list, initiator_map_list, storage_id):\n        for initiator_map in initiator_map_list:\n            if 'IgroupName' in initiator_map:\n                initiator_id = \\\n                    initiator_map.get('Initiators').replace(' ', '')\n                if initiator_map.get('Initiators') != '-':\n                    initiator_list.append(\n                        MappingHandler.format_initiator(\n                            initiator_map, initiator_id, storage_id))\n                for key in initiator_map:\n                    if constant.INITIATOR_KEY in key and key != '-':\n                        initiator_list.append(\n                            MappingHandler.format_initiator(\n                                initiator_map, key, storage_id))\n\n    @staticmethod\n    def format_host(initiator_info, storage_id):\n        initiator_map_list, initiator_list = [], []\n        Tools.split_value_map_list(initiator_info,\n                                   initiator_map_list,\n                                   split=':')\n        for initiator_map in initiator_map_list:\n            if 'IgroupName' in initiator_map:\n                host_id = '%s_%s' % (initiator_map.get('VserverName'),\n                                     initiator_map.get('IgroupName'))\n                initiator_model = {\n                    'native_storage_host_id': host_id,\n                    'name': initiator_map.get('IgroupName'),\n                    'os_type':\n                        constant.HOST_OS_TYPE_MAP.get(\n                            initiator_map.get('OSType')),\n                    'status': constants.HostStatus.NORMAL,\n                    'storage_id': storage_id,\n                }\n                initiator_list.append(initiator_model)\n        return initiator_list\n\n    @staticmethod\n    def format_port_group(port_set_info, lif_info, storage_id):\n        port_map_list, port_group_list = [], []\n        lif_map_list, port_group_relation_list = [], []\n        Tools.split_value_map_list(port_set_info, port_map_list, split=':')\n        Tools.split_value_map_list(lif_info, lif_map_list, split=':')\n        for port_map in port_map_list:\n            if 'PortsetName' in port_map:\n                port_group_id = \"%s-%s-%s\" % \\\n                                (port_map.get('VserverName'),\n                                 port_map.get('PortsetName'),\n                                 port_map.get('Protocol'))\n                ports = \\\n                    port_map.get('LIFOrTPGName').replace(' ', '').split(',')\n                ports_str = ''\n                for lif_map in lif_map_list:\n                    if 'LogicalInterfaceName' in lif_map:\n                        if lif_map.get('LogicalInterfaceName') in ports:\n                            port_id = \"%s_%s\" % \\\n                                      (lif_map['CurrentNode'],\n                                       lif_map['CurrentPort'])\n                            port_group_relation = {\n                                'storage_id': storage_id,\n                                'native_port_group_id': port_group_id,\n                                'native_port_id': port_id\n                            }\n                            port_group_relation_list.append(\n                                port_group_relation)\n                            if ports_str:\n                                ports_str = \\\n                                    \"{0},{1}\".format(ports_str, port_id)\n                            else:\n                                ports_str = \"{0}\".format(port_id)\n\n                port_group_model = {\n                    'native_port_group_id': port_group_id,\n                    'name': port_map.get('PortsetName'),\n                    'ports': ports_str,\n                    'storage_id': storage_id,\n                }\n                port_group_list.append(port_group_model)\n        result = {\n            'port_groups': port_group_list,\n            'port_grp_port_rels': port_group_relation_list\n        }\n        return result\n\n    @staticmethod\n    def format_mapping_view(mapping_info, volume_info, storage_id, host_list):\n        mapping_map_list, mapping_view_list, volume_map_list = [], [], []\n        Tools.split_value_map_list(mapping_info, mapping_map_list, split=\":\")\n        Tools.split_value_map_list(volume_info, volume_map_list, split=\":\")\n        for mapping_map in mapping_map_list:\n            if 'LUNPath' in mapping_map:\n                host_id = '%s_%s' % (mapping_map.get('VserverName'),\n                                     mapping_map.get('IgroupName'))\n                native_masking_view_id = \\\n                    '%s_%s_%s_%s' % (mapping_map.get('LUNNode'),\n                                     mapping_map.get('VserverName'),\n                                     mapping_map.get('IgroupName'),\n                                     mapping_map.get('LUNName'))\n                name = '%s_%s' % (mapping_map.get('IgroupName'),\n                                  mapping_map.get('LUNName'))\n                port_group_id = \"%s-%s-%s\" % \\\n                                (mapping_map.get('VserverName'),\n                                 mapping_map.get('PortsetBindingIgroup'),\n                                 mapping_map.get('IgroupProtocolType'))\n                native_volume_id = None\n                for volume_map in volume_map_list:\n                    if 'LUNName' in volume_map:\n                        if volume_map.get('LUNName') == \\\n                                mapping_map.get('LUNName') \\\n                                and volume_map.get('VserverName') == \\\n                                mapping_map.get('VserverName') \\\n                                and volume_map.get('LUNPath') == \\\n                                mapping_map.get('LUNPath'):\n                            native_volume_id = volume_map['SerialNumber']\n                mapping_view = {\n                    'native_masking_view_id':\n                        native_masking_view_id,\n                    'name': name,\n                    'native_port_group_id': port_group_id,\n                    'native_storage_host_id': host_id,\n                    'native_volume_id': native_volume_id,\n                    'storage_id': storage_id,\n                }\n                mapping_view_list.append(mapping_view)\n        return mapping_view_list\n"
  },
  {
    "path": "delfin/drivers/netapp/dataontap/netapp_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WarrayANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport re\nimport time\n\nimport requests\nimport six\nimport hashlib\n\nfrom oslo_log import log as logging\nfrom oslo_utils import units\n\nfrom delfin import cryptor\nfrom delfin.drivers.netapp.dataontap import constants as constant\nfrom delfin import exception, utils\nfrom delfin.common import constants\nfrom delfin.drivers.netapp.dataontap.mapping_handler import MappingHandler\nfrom delfin.drivers.netapp.dataontap.performance_handler \\\n    import PerformanceHandler\nfrom delfin.drivers.utils.rest_client import RestClient\nfrom delfin.drivers.utils.ssh_client import SSHPool\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = logging.getLogger(__name__)\n\n\nclass NetAppHandler(object):\n    OID_SERIAL_NUM = '1.3.6.1.4.1.789.1.1.9.0'\n    OID_TRAP_DATA = '1.3.6.1.4.1.789.1.1.12.0'\n    NODE_NAME = 'controller_name'\n    SECONDS_TO_MS = 1000\n    NETAPP_NAA = '60a98000'\n\n    def __init__(self, **kwargs):\n        self.ssh_pool = SSHPool(**kwargs)\n\n        self.rest_client = RestClient(**kwargs)\n\n        self.rest_client.verify = kwargs.get('verify', False)\n        self.rest_client.init_http_head()\n        self.rest_client.session.auth = requests.auth.HTTPBasicAuth(\n            self.rest_client.rest_username,\n            cryptor.decode(self.rest_client.rest_password))\n\n    @staticmethod\n    def get_table_data(values):\n        header_index = 0\n        table = values.split(\"\\r\\n\")\n        for i in range(0, len(table)):\n            if constant.PATTERN.search(table[i]):\n                header_index = i\n        return table[(header_index + 1):]\n\n    @staticmethod\n    def get_fs_id(vserver, volume):\n        return vserver + '_' + volume\n\n    @staticmethod\n    def get_qt_id(vserver, volume, qtree):\n        qt_id = vserver + '/' + volume\n        if qtree != '':\n            qt_id += '/' + qtree\n        return qt_id\n\n    @staticmethod\n    def get_size(limit, is_calculate=False):\n        if limit == '0B':\n            return 0\n        if limit == '-':\n            return 0 if is_calculate else '-'\n        return int(Tools.get_capacity_size(limit))\n\n    @staticmethod\n    def parse_alert(alert):\n        try:\n            alert_info = alert.get(NetAppHandler.OID_TRAP_DATA)\n            node_name = alert.get(NetAppHandler.NODE_NAME)\n            alert_info = alert_info.replace(\"]\", '')\n            alert_array = alert_info.split(\"[\")\n            alert_model = {}\n            alert_map = {}\n            if len(alert_array) > 1:\n                category = constants.Category.FAULT \\\n                    if 'created' in alert_array[0] \\\n                    else constants.Category.RECOVERY\n                alert_values = alert_array[1].split(\",\")\n                for alert_value in alert_values:\n                    array = alert_value.split(\"=\")\n                    if len(array) > 1:\n                        key = array[0].replace(' ', '')\n                        value = array[1].replace(' ', '').replace('.', '')\n                        alert_map[key] = value\n                if alert_map and category == constants.Category.RECOVERY:\n                    alert_model = {\n                        'alert_id': alert_map.get('AlertId'),\n                        'alert_name': alert_map.get('AlertId'),\n                        'severity': None,\n                        'category': category,\n                        'type': constants.EventType.EQUIPMENT_ALARM,\n                        'occur_time': utils.utcnow_ms(),\n                        'description': None,\n                        'match_key': hashlib.md5(\n                            (alert_map.get('AlertId') + node_name +\n                             alert_map['AlertingResource']\n                             ).encode()).hexdigest(),\n                        'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n                        'location': None\n                    }\n                else:\n                    raise exception.IncompleteTrapInformation(\n                        constant.STORAGE_VENDOR)\n            return alert_model\n        except exception.IncompleteTrapInformation as err:\n            raise err\n        except Exception as err:\n            err_msg = \"Failed to parse alert from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def login(self):\n        try:\n            result = self.ssh_pool.do_exec('cluster identity show')\n            if 'is not a recognized command' in result \\\n                    or 'command not found' in result:\n                raise exception.InvalidIpOrPort()\n            version = self.get_storage_version()\n            if version >= 9.6:\n                self.rest_client.do_call(\n                    constant.CLUSTER_PERF_URL, None, 'GET')\n        except Exception as e:\n            LOG.error(\"Failed to login netapp %s\" %\n                      (six.text_type(e)))\n            raise e\n\n    def get_storage(self):\n        try:\n            raw_capacity = total_capacity = used_capacity = free_capacity = 0\n            controller_map_list = []\n            system_info = self.ssh_pool.do_exec(\n                constant.CLUSTER_SHOW_COMMAND)\n            version_info = self.ssh_pool.do_exec(\n                constant.VERSION_SHOW_COMMAND)\n            status_info = self.ssh_pool.do_exec(\n                constant.STORAGE_STATUS_COMMAND)\n            controller_info = self.ssh_pool.do_exec(\n                constant.CONTROLLER_SHOW_DETAIL_COMMAND)\n            Tools.split_value_map_list(\n                controller_info, controller_map_list, \":\")\n            version_array = version_info.split(\"\\r\\n\")\n            storage_version = ''\n            for version in version_array:\n                if 'NetApp' in version:\n                    storage_version = version.split(\":\")\n                    break\n            status = self.get_table_data(status_info)\n            status = constant.STORAGE_STATUS.get(status[0].split()[0])\n            disk_list = self.get_disks(None)\n            pool_list = self.list_storage_pools(None)\n            storage_map_list = []\n            Tools.split_value_map_list(\n                system_info, storage_map_list, split=':')\n            if len(storage_map_list) > 0:\n                storage_map = storage_map_list[-1]\n                controller = None\n                for controller_map in controller_map_list[1:]:\n                    if controller_map['Model'] != '-':\n                        controller = controller_map\n                        continue\n                    controller = controller_map_list[1]\n                for disk in disk_list:\n                    raw_capacity += disk['capacity']\n                for pool in pool_list:\n                    total_capacity += pool['total_capacity']\n                    free_capacity += pool['free_capacity']\n                    used_capacity += pool['used_capacity']\n                storage_model = {\n                    \"name\": storage_map['ClusterName'],\n                    \"vendor\": constant.STORAGE_VENDOR,\n                    \"model\": controller['Model'],\n                    \"status\": status,\n                    \"serial_number\":\n                        storage_map['ClusterUUID'] +\n                        ':' + storage_map['ClusterSerialNumber'],\n                    \"firmware_version\": storage_version[0],\n                    \"location\": controller['Location'],\n                    \"total_capacity\": total_capacity,\n                    \"raw_capacity\": raw_capacity,\n                    \"used_capacity\": used_capacity,\n                    \"free_capacity\": free_capacity\n                }\n                return storage_model\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e.msg))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_aggregate(self, storage_id):\n        agg_list = []\n        agg_info = self.ssh_pool.do_exec(\n            constant.AGGREGATE_SHOW_DETAIL_COMMAND)\n        agg_map_list = []\n        Tools.split_value_map_list(agg_info, agg_map_list, split=':')\n        for agg_map in agg_map_list:\n            if agg_map and 'Aggregate' in agg_map.keys():\n                status = constant.AGGREGATE_STATUS.get(agg_map['State'])\n                pool_model = {\n                    'name': agg_map['Aggregate'],\n                    'storage_id': storage_id,\n                    'native_storage_pool_id': agg_map['UUIDString'],\n                    'description': None,\n                    'status': status,\n                    'storage_type': constants.StorageType.UNIFIED,\n                    'total_capacity': self.get_size(agg_map['Size'], True),\n                    'used_capacity': self.get_size(agg_map['UsedSize'], True),\n                    'free_capacity':\n                        self.get_size(agg_map['AvailableSize'], True),\n                }\n                agg_list.append(pool_model)\n        return agg_list\n\n    def get_pool(self, storage_id):\n        pool_list = []\n        pool_info = self.ssh_pool.do_exec(\n            constant.POOLS_SHOW_DETAIL_COMMAND)\n        pool_map_list = []\n        Tools.split_value_map_list(pool_info, pool_map_list, split=':')\n        for pool_map in pool_map_list:\n            if pool_map and 'StoragePoolName' in pool_map.keys():\n                status = constants.StoragePoolStatus.ABNORMAL\n                if pool_map['IsPoolHealthy?'] == 'true':\n                    status = constants.StoragePoolStatus.NORMAL\n                pool_model = {\n                    'name': pool_map['StoragePoolName'],\n                    'storage_id': storage_id,\n                    'native_storage_pool_id': pool_map['UUIDofStoragePool'],\n                    'description': None,\n                    'status': status,\n                    'storage_type': constants.StorageType.UNIFIED,\n                    'total_capacity':\n                        self.get_size(pool_map['StoragePoolTotalSize'], True),\n                    'used_capacity':\n                        self.get_size(pool_map['StoragePoolTotalSize'], True) -\n                        self.get_size(pool_map['StoragePoolUsableSize'], True),\n                    'free_capacity':\n                        self.get_size(pool_map['StoragePoolUsableSize'], True)\n                }\n                pool_list.append(pool_model)\n        return pool_list\n\n    def list_storage_pools(self, storage_id):\n        try:\n            pool_list = self.get_pool(storage_id)\n            agg_list = self.get_aggregate(storage_id)\n            return agg_list + pool_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage pool from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage pool from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_volumes(self, storage_id):\n        try:\n            volume_list = []\n            volume_info = self.ssh_pool.do_exec(\n                constant.LUN_SHOW_DETAIL_COMMAND)\n            fs_list = self.get_filesystems(storage_id)\n            volume_map_list = []\n            Tools.split_value_map_list(volume_info, volume_map_list, split=':')\n            for volume_map in volume_map_list:\n                if volume_map and 'LUNName' in volume_map.keys():\n                    pool_id = None\n                    status = 'normal' if volume_map['State'] == 'online' \\\n                        else 'offline'\n                    for fs in fs_list:\n                        if fs['name'] == volume_map['VolumeName']:\n                            pool_id = fs['native_pool_id']\n                    type = constants.VolumeType.THIN \\\n                        if volume_map['SpaceAllocation'] == 'enabled' \\\n                        else constants.VolumeType.THICK\n                    volume_model = {\n                        'name': volume_map['LUNName'],\n                        'storage_id': storage_id,\n                        'description': None,\n                        'status': status,\n                        'native_volume_id': volume_map['SerialNumber'],\n                        'native_storage_pool_id': pool_id,\n                        'wwn':\n                            NetAppHandler.NETAPP_NAA +\n                            volume_map['SerialNumber(Hex)'],\n                        'compressed': None,\n                        'deduplicated': None,\n                        'type': type,\n                        'total_capacity':\n                            self.get_size(volume_map['LUNSize'], True),\n                        'used_capacity':\n                            self.get_size(volume_map['UsedSize'], True),\n                        'free_capacity':\n                            self.get_size(volume_map['LUNSize'], True) -\n                            self.get_size(volume_map['UsedSize'], True)\n                    }\n                    volume_list.append(volume_model)\n            return volume_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage volume from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage volume from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_alerts(self, query_para):\n        alert_list = []\n        alert_info = self.ssh_pool.do_exec(\n            constant.ALTER_SHOW_DETAIL_COMMAND)\n        alert_map_list = []\n        Tools.split_value_map_list(\n            alert_info, alert_map_list, True, split=':')\n        for alert_map in alert_map_list:\n            if alert_map and 'AlertID' in alert_map.keys():\n                occur_time = int(time.mktime(time.strptime(\n                    alert_map['IndicationTime'],\n                    constant.ALTER_TIME_TYPE)))\n                if not query_para or \\\n                        (int(query_para['begin_time'])\n                         <= occur_time\n                         <= int(query_para['end_time'])):\n                    alert_model = {\n                        'alert_id': alert_map['AlertID'],\n                        'alert_name': alert_map['AlertID'],\n                        'severity': constant.ALERT_SEVERITY\n                        [alert_map['PerceivedSeverity']],\n                        'category': constants.Category.FAULT,\n                        'type': constants.EventType.EQUIPMENT_ALARM,\n                        'occur_time': occur_time * 1000,\n                        'description': alert_map['Description'],\n                        'sequence_number': alert_map['AlertID'],\n                        'match_key': hashlib.md5(\n                            (alert_map['AlertID'] +\n                             alert_map['Node'] +\n                             alert_map['AlertingResource']\n                             ).encode()).hexdigest(),\n                        'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n                        'location':\n                            alert_map['ProbableCause'] +\n                            ':' + alert_map['PossibleEffect']\n                    }\n                    alert_list.append(alert_model)\n        return alert_list\n\n    def list_alerts(self, query_para):\n        try:\n            \"\"\"Query the two alarms separately\"\"\"\n            alert_list = self.get_alerts(query_para)\n            return alert_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage alert from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage alert from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def clear_alert(self, alert):\n        try:\n            ssh_command = \\\n                constant.CLEAR_ALERT_COMMAND + alert['alert_id']\n            self.ssh_pool.do_exec(ssh_command)\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage alert from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage alert from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_disks(self, storage_id):\n        disks_list = []\n        physicals_list = []\n        disks_info = self.ssh_pool.do_exec(\n            constant.DISK_SHOW_DETAIL_COMMAND)\n        physicals_info = self.ssh_pool.do_exec(\n            constant.DISK_SHOW_PHYSICAL_COMMAND)\n        error_disk = self.ssh_pool.do_exec(\n            constant.DISK_ERROR_COMMAND\n        )\n        error_disk_list = []\n        error_disk_array = self.get_table_data(error_disk)\n        for error_disk in error_disk_array:\n            error_array = error_disk.split()\n            if len(error_array) > 2:\n                error_disk_list.append(error_array[0])\n        disks_map_list = []\n        physical_array = self.get_table_data(physicals_info)\n        for physical in physical_array:\n            physicals_list.append(physical.split())\n        Tools.split_value_map_list(disks_info, disks_map_list, split=':')\n        for disks_map in disks_map_list:\n            if disks_map and 'Disk' in disks_map.keys():\n                speed = physical_type = firmware = None\n                logical_type = constant.DISK_LOGICAL. \\\n                    get(disks_map['ContainerType'])\n                \"\"\"Map disk physical information\"\"\"\n                for physical_info in physicals_list:\n                    if len(physical_info) > 6 and \\\n                            physical_info[0] == disks_map['Disk']:\n                        physical_type = \\\n                            constant.DISK_TYPE.get(physical_info[1])\n                        speed = physical_info[5] \\\n                            if physical_info[5] != '-' else 0\n                        firmware = physical_info[4]\n                status = constants.DiskStatus.NORMAL\n                if disks_map['Disk'] in error_disk_list:\n                    status = constants.DiskStatus.ABNORMAL\n                disk_model = {\n                    'name': disks_map['Disk'],\n                    'storage_id': storage_id,\n                    'native_disk_id': disks_map['Disk'],\n                    'serial_number': disks_map['SerialNumber'],\n                    'manufacturer': disks_map['Vendor'],\n                    'model': disks_map['Model'],\n                    'firmware': firmware,\n                    'speed': speed,\n                    'capacity': self.get_size(disks_map['PhysicalSize'], True),\n                    'status': status,\n                    'physical_type': physical_type,\n                    'logical_type': logical_type,\n                    'native_disk_group_id': disks_map['Aggregate'],\n                    'location': None,\n                }\n                disks_list.append(disk_model)\n        return disks_list\n\n    def get_filesystems(self, storage_id):\n        fs_list = []\n        fs_info = self.ssh_pool.do_exec(\n            constant.FS_SHOW_DETAIL_COMMAND)\n        thin_fs_info = self.ssh_pool.do_exec(\n            constant.THIN_FS_SHOW_COMMAND)\n        pool_list = self.list_storage_pools(storage_id)\n        thin_fs_array = self.get_table_data(thin_fs_info)\n        fs_map_list = []\n        Tools.split_value_map_list(fs_info, fs_map_list, split=':')\n        for fs_map in fs_map_list:\n            type = constants.FSType.THICK\n            if fs_map and 'VolumeName' in fs_map.keys():\n                pool_id = \"\"\n                \"\"\"get pool id\"\"\"\n                for pool in pool_list:\n                    if pool['name'] == fs_map['AggregateName']:\n                        pool_id = pool['native_storage_pool_id']\n                deduplicated = True\n                if fs_map['SpaceSavedbyDeduplication'] == '0B':\n                    deduplicated = False\n                if len(thin_fs_array) > 2:\n                    for thin_vol in thin_fs_array:\n                        thin_array = thin_vol.split()\n                        if len(thin_array) > 4:\n                            if thin_array[1] == fs_map['VolumeName']:\n                                type = constants.VolumeType.THIN\n                compressed = True\n                if fs_map['VolumeContainsSharedorCompressedData'] == \\\n                        'false':\n                    compressed = False\n                status = constant.FS_STATUS.get(fs_map['VolumeState'])\n                fs_id = self.get_fs_id(\n                    fs_map['VserverName'], fs_map['VolumeName'])\n                fs_model = {\n                    'name': fs_map['VolumeName'],\n                    'storage_id': storage_id,\n                    'native_filesystem_id': fs_id,\n                    'native_pool_id': pool_id,\n                    'compressed': compressed,\n                    'deduplicated': deduplicated,\n                    'worm': constant.WORM_TYPE.get(fs_map['SnapLockType']),\n                    'status': status,\n                    'security_mode':\n                        constant.SECURITY_STYLE.get(\n                            fs_map['SecurityStyle'], fs_map['SecurityStyle']),\n                    'type': type,\n                    'total_capacity': self.get_size(fs_map['VolumeSize']),\n                    'used_capacity':\n                        self.get_size(fs_map['VolumeSize'], True) -\n                        self.get_size(fs_map['AvailableSize'], True),\n                    'free_capacity': self.get_size(fs_map['AvailableSize'])\n                }\n                if fs_model['total_capacity'] != '-' \\\n                        and fs_model['total_capacity'] > 0:\n                    fs_list.append(fs_model)\n        return fs_list\n\n    def list_controllers(self, storage_id):\n        try:\n            controller_list = []\n            controller_info = self.ssh_pool.do_exec(\n                constant.CONTROLLER_SHOW_DETAIL_COMMAND)\n            controller_ips = self.ssh_pool.do_exec(\n                constant.CONTROLLER_IP_COMMAND)\n            ips_array = self.get_table_data(controller_ips)\n            ip_map = {}\n            controller_map_list = []\n            Tools.split_value_map_list(\n                controller_info, controller_map_list, split=':')\n            for controller_map in controller_map_list:\n                if controller_map and 'Node' in controller_map.keys():\n                    for ips in ips_array:\n                        ip_array = ips.split()\n                        key = value = ''\n                        if len(ip_array) == 4:\n                            for ip in ip_array:\n                                if ip == controller_map['Node']:\n                                    key = ip\n                                if constant.IP_PATTERN.search(ip):\n                                    value = ip\n                                ip_map[key] = value\n                    status = constants.ControllerStatus.NORMAL \\\n                        if controller_map['Health'] == 'true' \\\n                        else constants.ControllerStatus.OFFLINE\n                    controller_model = {\n                        'name': controller_map['Node'],\n                        'storage_id': storage_id,\n                        'native_controller_id': controller_map['SystemID'],\n                        'status': status,\n                        'location': controller_map['Location'],\n                        'soft_version': None,\n                        'cpu_info': None,\n                        'memory_size': None,\n                        'mgmt_ip': ip_map.get(controller_map['Node'])\n                    }\n                    controller_list.append(controller_model)\n            return controller_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage controllers from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n        except Exception as err:\n            err_msg = \"Failed to get storage controllers from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_eth_port(self, storage_id):\n        try:\n            eth_list = []\n            eth_info = self.ssh_pool.do_exec(\n                constant.PORT_SHOW_DETAIL_COMMAND)\n\n            eth_map_list = []\n            Tools.split_value_map_list(eth_info, eth_map_list, split=':')\n            for eth_map in eth_map_list:\n                if eth_map and 'Port' in eth_map.keys():\n                    logical_type = constant.ETH_LOGICAL_TYPE.get(\n                        eth_map['PortType'])\n                    port_id = \\\n                        eth_map['Node'] + '_' + eth_map['Port']\n                    eth_model = {\n                        'name': eth_map['Node'] + ':' + eth_map['Port'],\n                        'storage_id': storage_id,\n                        'native_port_id': port_id,\n                        'location':\n                            eth_map['Node'] +\n                            ':' + eth_map['Port'],\n                        'connection_status':\n                            constants.PortConnectionStatus.CONNECTED\n                            if eth_map['Link'] == 'up'\n                            else constants.PortConnectionStatus.DISCONNECTED,\n                        'health_status':\n                            constants.PortHealthStatus.NORMAL\n                            if eth_map['PortHealthStatus'] == 'healthy'\n                            else constants.PortHealthStatus.ABNORMAL,\n                        'type': constants.PortType.ETH,\n                        'logical_type': logical_type,\n                        'speed': int(eth_map['SpeedOperational']) * units.Mi\n                        if eth_map['SpeedOperational'] != '-' else 0,\n                        'max_speed':\n                            int(eth_map['SpeedOperational']) * units.Mi\n                        if eth_map['SpeedOperational'] != '-' else 0,\n                        'native_parent_id': None,\n                        'wwn': None,\n                        'mac_address': eth_map['MACAddress'],\n                        'ipv4': None,\n                        'ipv4_mask': None,\n                        'ipv6': None,\n                        'ipv6_mask': None,\n                    }\n                    eth_list.append(eth_model)\n            return eth_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage ports from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage ports from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_fc_port(self, storage_id):\n        try:\n            fc_list = []\n            fc_info = self.ssh_pool.do_exec(\n                constant.FC_PORT_SHOW_DETAIL_COMMAND)\n            fc_map_list = []\n            Tools.split_value_map_list(fc_info, fc_map_list, split=':')\n            for fc_map in fc_map_list:\n                if fc_map and 'Node' in fc_map.keys():\n                    type = constant.FC_TYPE.get(fc_map['PhysicalProtocol'])\n                    port_id = \\\n                        fc_map['Node'] + '_' + fc_map['Adapter']\n                    fc_model = {\n                        'name':\n                            fc_map['Node'] +\n                            ':' + fc_map['Adapter'],\n                        'storage_id': storage_id,\n                        'native_port_id': port_id,\n                        'location':\n                            fc_map['Node'] +\n                            ':' + fc_map['Adapter'],\n                        'connection_status':\n                            constants.PortConnectionStatus.CONNECTED\n                            if fc_map['AdministrativeStatus'] == 'up'\n                            else constants.PortConnectionStatus.DISCONNECTED,\n                        'health_status':\n                            constants.PortHealthStatus.NORMAL\n                            if fc_map['OperationalStatus'] == 'online'\n                            else constants.PortHealthStatus.ABNORMAL,\n                        'type': type,\n                        'logical_type': None,\n                        'speed': int(fc_map['DataLinkRate(Gbit)']) * units.Gi\n                        if fc_map['DataLinkRate(Gbit)'] != '-' else 0,\n                        'max_speed': int(fc_map['MaximumSpeed']) * units.Gi\n                        if fc_map['MaximumSpeed'] != '-' else 0,\n                        'native_parent_id': None,\n                        'wwn': fc_map['AdapterWWPN'],\n                        'mac_address': None,\n                        'ipv4': None,\n                        'ipv4_mask': None,\n                        'ipv6': None,\n                        'ipv6_mask': None,\n                    }\n                    fc_list.append(fc_model)\n            return fc_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage ports from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n        except Exception as err:\n            err_msg = \"Failed to get storage ports from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_ports(self, storage_id):\n        ports_list = \\\n            self.get_fc_port(storage_id) + \\\n            self.get_eth_port(storage_id)\n        return ports_list\n\n    def list_disks(self, storage_id):\n        try:\n            return self.get_disks(storage_id)\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage disks from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n\n        except Exception as err:\n            err_msg = \"Failed to get storage disks from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_qtrees(self, storage_id):\n        try:\n            qt_list = []\n            qt_info = self.ssh_pool.do_exec(\n                constant.QTREE_SHOW_DETAIL_COMMAND)\n            fs_info = self.ssh_pool.do_exec(\n                constant.FS_SHOW_DETAIL_COMMAND)\n            fs_map_list = []\n            qt_map_list = []\n            Tools.split_value_map_list(fs_info, fs_map_list, split=':')\n            Tools.split_value_map_list(qt_info, qt_map_list, split=':')\n            for qt_map in qt_map_list:\n                if qt_map and 'QtreeName' in qt_map.keys():\n                    fs_id = self.get_fs_id(qt_map['VserverName'],\n                                           qt_map['VolumeName'])\n                    qtree_path = None\n                    for fs_map in fs_map_list:\n                        if fs_map and 'VserverName' in fs_map.keys() \\\n                                and fs_id == self.get_fs_id(\n                                fs_map['VserverName'],\n                                fs_map['VolumeName']) \\\n                                and fs_map['JunctionPath'] != '-':\n                            qtree_path = fs_map['JunctionPath']\n                            break\n                    qt_id = self.get_qt_id(\n                        qt_map['VserverName'],\n                        qt_map['VolumeName'],\n                        qt_map['QtreeName'])\n                    qtree_name = qt_map['QtreeName']\n                    if qt_map['QtreeName'] and qtree_path:\n                        qtree_path += '/' + qt_map['QtreeName']\n                        qtree_path = qtree_path.replace('//', '/')\n                    else:\n                        qtree_name = qt_id\n                    qt_model = {\n                        'name': qtree_name,\n                        'storage_id': storage_id,\n                        'native_qtree_id': qt_id,\n                        'path': qtree_path,\n                        'native_filesystem_id': fs_id,\n                        'security_mode': qt_map['SecurityStyle'],\n                    }\n                    qt_list.append(qt_model)\n            return qt_list\n        except exception.DelfinException as err:\n            err_msg = \"Failed to get storage qtrees from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise err\n\n        except Exception as err:\n            err_msg = \"Failed to get storage qtrees from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_nfs_shares(self, storage_id, qtree_list, protocol_map):\n        try:\n            nfs_info = self.ssh_pool.do_exec(\n                constant.NFS_SHARE_SHOW_COMMAND)\n            nfs_list = []\n            fs_map_list = []\n            Tools.split_value_map_list(nfs_info, fs_map_list, split=':')\n            for fs_map in fs_map_list:\n                if fs_map and 'VserverName' in fs_map.keys():\n                    protocol = protocol_map.get(fs_map['VserverName'])\n                    if constants.ShareProtocol.NFS in protocol:\n                        fs_id = self.get_fs_id(fs_map['VserverName'],\n                                               fs_map['VolumeName'])\n                        share_name = \\\n                            fs_map['VserverName'] + '/' + fs_map['VolumeName']\n                        qt_id = self.get_qt_id(fs_map['VserverName'],\n                                               fs_map['VolumeName'], '')\n                        qtree_id = None\n                        for qtree in qtree_list:\n                            if qtree['native_qtree_id'] == qt_id:\n                                qtree_id = qt_id\n                            if fs_id == qtree['native_filesystem_id']\\\n                                    and qtree['name'] != \"\"\\\n                                    and qtree['name'] != \\\n                                    qtree['native_qtree_id']:\n                                qt_share_name = \\\n                                    share_name + '/' + qtree['name']\n                                share = {\n                                    'name': qt_share_name,\n                                    'storage_id': storage_id,\n                                    'native_share_id':\n                                        qt_share_name + '_' +\n                                        constants.ShareProtocol.NFS,\n                                    'native_qtree_id':\n                                        qtree['native_qtree_id'],\n                                    'native_filesystem_id':\n                                        qtree['native_filesystem_id'],\n                                    'path': qtree['path'],\n                                    'protocol': constants.ShareProtocol.NFS\n                                }\n                                nfs_list.append(share)\n                        share = {\n                            'name': share_name,\n                            'storage_id': storage_id,\n                            'native_share_id':\n                                share_name + '_' + constants.ShareProtocol.NFS,\n                            'native_qtree_id': qtree_id,\n                            'native_filesystem_id': fs_id,\n                            'path': fs_map['JunctionPath'],\n                            'protocol': constants.ShareProtocol.NFS\n                        }\n                        nfs_list.append(share)\n            return nfs_list\n        except exception.DelfinException as err:\n            err_msg = \"Failed to get storage nfs share from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise err\n        except Exception as err:\n            err_msg = \"Failed to get storage nfs share from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_cifs_shares(self, storage_id, vserver_name,\n                        qtree_list, protocol_map):\n        shares_list = []\n        share_info = self.ssh_pool.do_exec(\n            (constant.CIFS_SHARE_SHOW_DETAIL_COMMAND %\n             {'vserver_name': vserver_name}))\n        share_map_list = []\n        Tools.split_value_map_list(share_info, share_map_list, split=':')\n        for share_map in share_map_list:\n            if share_map and 'VolumeName' in share_map.keys() and \\\n                    share_map['VolumeName'] != '-':\n                protocol_str = protocol_map.get(\n                    share_map['Vserver'])\n                fs_id = self.get_fs_id(share_map['Vserver'],\n                                       share_map['VolumeName'])\n                share_id = fs_id + '_' + share_map['Share'] + '_'\n                qtree_id = None\n                for qtree in qtree_list:\n                    name_array = share_map['Path'].split('/')\n                    if len(name_array) > 0:\n                        qtree_name = name_array[len(name_array) - 1]\n                        if qtree_name == share_map['VolumeName']:\n                            qtree_name = ''\n                        qt_id = self.get_qt_id(\n                            share_map['Vserver'],\n                            share_map['VolumeName'], qtree_name)\n                    else:\n                        break\n                    if qtree['native_qtree_id'] == qt_id:\n                        qtree_id = qt_id\n                        break\n                if constants.ShareProtocol.CIFS in protocol_str:\n                    share = {\n                        'name': share_map['Share'],\n                        'storage_id': storage_id,\n                        'native_share_id':\n                            share_id + constants.ShareProtocol.CIFS,\n                        'native_qtree_id': qtree_id,\n                        'native_filesystem_id': fs_id,\n                        'path': share_map['Path'],\n                        'protocol': constants.ShareProtocol.CIFS\n                    }\n                    shares_list.append(share)\n        return shares_list\n\n    def list_shares(self, storage_id):\n        try:\n            shares_list = []\n            qtree_list = self.list_qtrees(None)\n            protocol_info = self.ssh_pool.do_exec(\n                constant.SHARE_AGREEMENT_SHOW_COMMAND)\n            protocol_map = {}\n            protocol_arr = self.get_table_data(protocol_info)\n            for protocol in protocol_arr:\n                agr_arr = protocol.split()\n                if len(agr_arr) > 1:\n                    protocol_map[agr_arr[0]] = agr_arr[1]\n            vserver_info = self.ssh_pool.do_exec(\n                constant.VSERVER_SHOW_COMMAND)\n            vserver_array = self.get_table_data(vserver_info)\n            for vserver in vserver_array:\n                vserver_name = vserver.split()\n                if len(vserver_name) > 1:\n                    shares_list += self.get_cifs_shares(\n                        storage_id, vserver_name[0], qtree_list, protocol_map)\n            shares_list += self.get_nfs_shares(\n                storage_id, qtree_list, protocol_map)\n            return shares_list\n        except exception.DelfinException as err:\n            err_msg = \"Failed to get storage shares from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise err\n\n        except Exception as err:\n            err_msg = \"Failed to get storage shares from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_filesystems(self, storage_id):\n        try:\n            fs_list = self.get_filesystems(storage_id)\n            return fs_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage volume from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage volume from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_quotas(self, storage_id):\n        try:\n            quota_list = []\n            quotas_info = self.ssh_pool.do_exec(\n                constant.QUOTA_SHOW_DETAIL_COMMAND)\n            quota_map_list = []\n            Tools.split_value_map_list(quotas_info, quota_map_list, \":\")\n            for quota_map in quota_map_list:\n                user_group_name = None\n                if quota_map and 'VolumeName' in quota_map.keys():\n                    quota_id = \\\n                        quota_map['Vserver'] + '_' + \\\n                        quota_map['VolumeName'] + '_' + \\\n                        quota_map['Type'] + '_' + \\\n                        quota_map['QtreeName'] + '_' + \\\n                        quota_map['Target']\n                    type = constant.QUOTA_TYPE.get(quota_map['Type'])\n                    qt_id = self.get_qt_id(\n                        quota_map['Vserver'],\n                        quota_map['VolumeName'], '')\n                    if type == 'tree' and quota_map['Target'] != '':\n                        qt_id += '/' + quota_map['Target']\n                    else:\n                        if type == 'user' or 'group':\n                            user_group_name = quota_map['Target']\n                        if quota_map['QtreeName'] != '':\n                            qt_id += '/' + quota_map['QtreeName']\n                    fs_id = self.get_fs_id(quota_map['Vserver'],\n                                           quota_map['VolumeName'])\n                    quota = {\n                        'native_quota_id': quota_id,\n                        'type': type,\n                        'storage_id': storage_id,\n                        'native_filesystem_id': fs_id,\n                        'native_qtree_id': qt_id,\n                        'capacity_hard_limit':\n                            self.get_size(quota_map['DiskLimit']),\n                        'capacity_soft_limit':\n                            self.get_size(quota_map['SoftDiskLimit']),\n                        'file_hard_limit':\n                            int(quota_map['FilesLimit'])\n                            if quota_map['FilesLimit'] != '-' else '-',\n                        'file_soft_limit':\n                            int(quota_map['SoftFilesLimit'])\n                            if quota_map['SoftFilesLimit'] != '-' else '-',\n                        'file_count': None,\n                        'used_capacity': None,\n                        'user_group_name': user_group_name\n                    }\n                    quota_list.append(quota)\n            return quota_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage volume from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage volume from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_alert_sources(self):\n        try:\n            ip_list = []\n            mgt_ip = self.ssh_pool.do_exec(constant.MGT_IP_COMMAND)\n            controller_list = self.list_controllers(None)\n            for controller in controller_list:\n                ip_list.append({'host': controller['mgmt_ip']})\n            mgt_ip_array = self.get_table_data(mgt_ip)\n            ip_list.append({'host': mgt_ip_array[0].split()[2]})\n            return ip_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage ip from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage ip from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def do_rest_call(self, url, data):\n        try:\n            res = self.rest_client.do_call(\n                url, data, 'GET', constant.SOCKET_TIMEOUT)\n            if res.status_code == constant.RETURN_SUCCESS_CODE \\\n                    or res.status_code == constant.CREATED_SUCCESS_CODE \\\n                    or res.status_code == constant.ACCEPTED_RETURN_CODE:\n                result_json = res.json()\n                return result_json.get('records')\n            elif res.status_code == constant.BAD_REQUEST_RETURN_CODE:\n                raise exception.BadRequest()\n            elif res.status_code == constant.UNAUTHORIZED_RETURN_CODE:\n                raise exception.NotAuthorized()\n            elif res.status_code == constant.FORBIDDEN_RETURN_CODE:\n                raise exception.InvalidUsernameOrPassword()\n            elif res.status_code == constant.NOT_FOUND_RETURN_CODE:\n                LOG.error('Url did not get results url:%s' % url)\n                return []\n            elif res.status_code == constant.METHOD_NOT_ALLOWED_CODE:\n                raise exception.Invalid()\n            elif res.status_code == constant.CONFLICT_RETURN_CODE:\n                raise exception.Invalid()\n            elif res.status_code == constant.INTERNAL_ERROR_CODE:\n                raise exception.BadResponse()\n        except exception.DelfinException as e:\n            err_msg = \"Failed to rest call from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to rest call from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def collect_perf_metrics(self, storage_id,\n                             resource_metrics, start_time, end_time):\n        try:\n            metrics = []\n            if start_time and end_time:\n                metrics_keys = resource_metrics.keys()\n                # storage metrics\n                if constants.ResourceType.STORAGE in metrics_keys:\n                    metrics.extend(\n                        self.get_storage_perf(\n                            resource_metrics,\n                            storage_id, start_time, end_time))\n                # pool metrics\n                if constants.ResourceType.STORAGE_POOL in metrics_keys:\n                    metrics.extend(\n                        self.get_pool_perf(\n                            resource_metrics,\n                            storage_id, start_time, end_time))\n                # volume metrics\n                if constants.ResourceType.VOLUME in metrics_keys:\n                    metrics.extend(\n                        self.get_volume_perf(\n                            resource_metrics,\n                            storage_id, start_time, end_time))\n                # port metrics\n                if constants.ResourceType.PORT in metrics_keys:\n                    metrics.extend(\n                        self.get_port_perf(\n                            resource_metrics,\n                            storage_id, start_time, end_time))\n                # filesystem metrics\n                if constants.ResourceType.FILESYSTEM in metrics_keys:\n                    metrics.extend(\n                        self.get_fs_perf(\n                            resource_metrics,\n                            storage_id, start_time, end_time))\n            return metrics\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage performance from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage performance from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def get_storage_perf(self, metrics, storage_id, start_time, end_time):\n        json_info = self.do_rest_call(constant.CLUSTER_PERF_URL, None)\n        if json_info:\n            system_info = self.ssh_pool.do_exec(\n                constant.CLUSTER_SHOW_COMMAND)\n            storage_map_list = []\n            Tools.split_value_map_list(\n                system_info, storage_map_list, split=':')\n            storage = storage_map_list[-1]\n            storage_metrics = PerformanceHandler.\\\n                get_perf_value(metrics, storage_id,\n                               start_time, end_time,\n                               json_info,\n                               storage['ClusterUUID'] + ':'\n                               + storage['ClusterSerialNumber'],\n                               storage['ClusterName'],\n                               constants.ResourceType.STORAGE)\n            return storage_metrics\n        return []\n\n    def get_pool_perf(self, metrics, storage_id, start_time, end_time):\n        agg_info = self.ssh_pool.do_exec(\n            constant.AGGREGATE_SHOW_DETAIL_COMMAND)\n        agg_map_list = []\n        pool_metrics = []\n        Tools.split_value_map_list(agg_info, agg_map_list, split=':')\n        for agg_map in agg_map_list:\n            if 'UUIDString' in agg_map:\n                uuid = agg_map['UUIDString']\n                json_info = self.do_rest_call(\n                    constant.POOL_PERF_URL % uuid, None)\n                pool_metrics.extend(\n                    PerformanceHandler.get_perf_value(\n                        metrics,\n                        storage_id,\n                        start_time,\n                        end_time,\n                        json_info,\n                        agg_map['UUIDString'],\n                        agg_map['Aggregate'],\n                        constants.ResourceType.STORAGE_POOL))\n        return pool_metrics\n\n    def get_volume_perf(self, metrics, storage_id, start_time, end_time):\n        volume_info = \\\n            self.ssh_pool.do_exec(constant.LUN_SHOW_DETAIL_COMMAND)\n        volume_map_list = []\n        volume_metrics = []\n        Tools.split_value_map_list(volume_info, volume_map_list, split=':')\n        for volume in volume_map_list:\n            if 'LUNUUID' in volume:\n                uuid = volume['LUNUUID']\n                json_info = self.do_rest_call(\n                    constant.VOLUME_PERF_URL % uuid, None)\n                volume_metrics.extend(\n                    PerformanceHandler.get_perf_value(\n                        metrics, storage_id,\n                        start_time, end_time,\n                        json_info, volume['SerialNumber'],\n                        volume['LUNName'],\n                        constants.ResourceType.VOLUME))\n        return volume_metrics\n\n    def get_fs_perf(self, metrics, storage_id, start_time, end_time):\n        fs_info = self.do_rest_call(\n            constant.FS_INFO_URL, {})\n        fs_metrics = []\n        for fs in fs_info:\n            if 'uuid' in fs:\n                uuid = fs['uuid']\n                json_info = self.do_rest_call(\n                    constant.FS_PERF_URL % uuid, None)\n                fs_id = self.get_fs_id(\n                    fs['svm']['name'], fs['name'])\n                fs_metrics.extend(\n                    PerformanceHandler.get_perf_value(\n                        metrics, storage_id, start_time,\n                        end_time, json_info, fs_id,\n                        fs['name'],\n                        constants.ResourceType.FILESYSTEM))\n        return fs_metrics\n\n    def get_port_perf(self, metrics, storage_id, start_time, end_time):\n        fc_port = self.do_rest_call(constant.FC_INFO_URL, None)\n        port_metrics = []\n        for fc in fc_port:\n            if 'uuid' in fc:\n                uuid = fc['uuid']\n                json_info = self.do_rest_call(\n                    constant.FC_PERF_URL % uuid, None)\n                port_id = fc['node']['name'] + '_' + fc['name']\n                port_metrics.extend(\n                    PerformanceHandler.get_perf_value(\n                        metrics, storage_id,\n                        start_time, end_time,\n                        json_info, port_id,\n                        fc['name'], constants.ResourceType.PORT))\n        eth_port = self.do_rest_call(constant.ETH_INFO_URL, {})\n        for eth in eth_port:\n            if 'uuid' in eth:\n                uuid = eth['uuid']\n                json_info = self.do_rest_call(\n                    constant.ETH_PERF_URL % uuid, None)\n                port_id = eth['node']['name'] + '_' + eth['name']\n                port_metrics.extend(\n                    PerformanceHandler.get_perf_value(\n                        metrics, storage_id,\n                        start_time, end_time,\n                        json_info, port_id,\n                        eth['name'], constants.ResourceType.PORT))\n        return port_metrics\n\n    def get_storage_version(self):\n        version_info = self.ssh_pool.do_exec(\n            constant.VERSION_SHOW_COMMAND)\n        version_array = version_info.split(\"\\r\\n\")\n        for version in version_array:\n            if 'NetApp' in version:\n                storage_version = version.split(\":\")\n                version_list = \\\n                    re.findall(constant.FLOAT_PATTERN, storage_version[0])\n                for ver_info in version_list:\n                    if float(ver_info) >= 9.0:\n                        return float(ver_info)\n        return 9.0\n\n    @staticmethod\n    def get_cap_by_version(version, capabilities):\n        if version >= 9.6:\n            capabilities['resource_metrics']['storage'] = \\\n                constant.STORAGE_CAPABILITIES\n            if version >= 9.7:\n                capabilities['resource_metrics']['storagePool'] = \\\n                    constant.POOL_CAPABILITIES\n                capabilities['resource_metrics']['port'] = \\\n                    constant.PORT_CAPABILITIES\n                capabilities['resource_metrics']['filesystem'] = \\\n                    constant.FS_CAPABILITIES\n            if version >= 9.8:\n                capabilities['resource_metrics']['volume'] = \\\n                    constant.VOLUME_CAPABILITIES\n        return capabilities\n\n    @staticmethod\n    def get_capabilities(filters):\n        if filters:\n            capabilities = {\n                'is_historic': True,\n                'resource_metrics': {}\n            }\n            version_List = \\\n                re.findall(\n                    constant.FLOAT_PATTERN, filters.get('firmware_version'))\n            version = 9.0\n            for ver_info in version_List:\n                if float(ver_info) >= 9.0:\n                    version = float(ver_info)\n                    break\n            NetAppHandler.get_cap_by_version(version, capabilities)\n            return capabilities\n        cap_map = {}\n        for i in range(0, 10):\n            capabilities = {\n                'is_historic': True,\n                'resource_metrics': {}\n            }\n            version = float('9.' + str(i))\n            NetAppHandler.get_cap_by_version(version, capabilities)\n            cap_map[version] = capabilities\n        return cap_map\n\n    def get_latest_perf_timestamp(self):\n        try:\n            timestamp = 0\n            json_info = self.do_rest_call(constant.CLUSTER_PERF_URL, None)\n            for perf_info in json_info:\n                occur_time = \\\n                    int(time.mktime(time.strptime(\n                        perf_info.get('timestamp'),\n                        PerformanceHandler.TIME_TYPE)))\n                second_offset = \\\n                    (time.mktime(time.localtime()) -\n                     time.mktime(time.gmtime()))\n                occur_time = \\\n                    (occur_time + int(second_offset)) * 1000\n                if timestamp < occur_time:\n                    timestamp = occur_time\n            if timestamp == 0:\n                return None\n            return timestamp\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage perf timestamp from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage perf timestamp from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_storage_host_initiators(self, storage_id):\n        try:\n            initiator_list = []\n            iscsi_initiator_info = self.ssh_pool.do_exec(\n                constant.ISCSI_INITIATOR_COMMAND)\n            fc_initiator_info = self.ssh_pool.do_exec(\n                constant.FC_INITIATOR_COMMAND)\n            new_initiator_info = self.ssh_pool.do_exec(\n                constant.HOST_COMMAND)\n            MappingHandler.format_initiators(\n                initiator_list, new_initiator_info,\n                storage_id, '', is_default=True)\n            MappingHandler.format_initiators(\n                initiator_list, iscsi_initiator_info,\n                storage_id, constants.InitiatorType.ISCSI)\n            MappingHandler.format_initiators(\n                initiator_list, fc_initiator_info,\n                storage_id, constants.InitiatorType.FC)\n            return initiator_list\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage initiators from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage initiators from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_storage_hosts(self, storage_id):\n        try:\n            host_info = self.ssh_pool.do_exec(constant.HOST_COMMAND)\n            return MappingHandler.format_host(host_info, storage_id)\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage port groups from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage por groups from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_port_groups(self, storage_id):\n        try:\n            port_set_info = self.ssh_pool.do_exec(\n                constant.PORT_GROUP_COMMAND)\n            lif_info = self.ssh_pool.do_exec(\n                constant.LIF_COMMAND)\n            return MappingHandler.format_port_group(port_set_info,\n                                                    lif_info,\n                                                    storage_id)\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage port groups from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage por groups from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n\n    def list_masking_views(self, storage_id):\n        try:\n            mapping_info = self.ssh_pool.do_exec(\n                constant.LUN_MAPPING_COMMAND)\n            volume_info = self.ssh_pool.do_exec(\n                constant.LUN_SHOW_DETAIL_COMMAND)\n            host_list = self.list_storage_hosts(None)\n            return MappingHandler.format_mapping_view(mapping_info,\n                                                      volume_info,\n                                                      storage_id,\n                                                      host_list)\n        except exception.DelfinException as e:\n            err_msg = \"Failed to get storage masking views from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(e))\n            LOG.error(err_msg)\n            raise e\n        except Exception as err:\n            err_msg = \"Failed to get storage masking views from \" \\\n                      \"netapp cmode: %s\" % (six.text_type(err))\n            LOG.error(err_msg)\n            raise exception.InvalidResults(err_msg)\n"
  },
  {
    "path": "delfin/drivers/netapp/dataontap/performance_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\n\nfrom oslo_log import log\n\nfrom delfin.common import constants\nfrom delfin.drivers.netapp.dataontap import constants as constant\nfrom delfin.drivers.utils.tools import Tools\n\nLOG = log.getLogger(__name__)\n\n\nclass PerformanceHandler(object):\n    TIME_TYPE = '%Y-%m-%dT%H:%M:%SZ'\n\n    @staticmethod\n    def get_value(value, key):\n        if key == 'iops' or key == 'readIops' or key == 'writeIops':\n            return int(value)\n        elif key == 'throughput' or key == 'readThroughput' \\\n                or key == 'writeThroughput':\n            unit = constant.CAP_MAP[key]['unit']\n            return PerformanceHandler.get_unit_size(value, unit)\n        elif key == 'responseTime':\n            return round(int(value) / 1000, 3)\n        else:\n            return value\n\n    @staticmethod\n    def get_unit_size(value, unit):\n        if value is None:\n            return None\n        if value == '0' or value == 0:\n            return 0\n        unit_array = unit.split('/')\n        capacity = Tools.change_capacity_to_bytes(unit_array[0])\n        if capacity == 1:\n            return value\n        return round(int(value) / capacity, 3)\n\n    @staticmethod\n    def get_perf_value(metrics, storage_id, start_time, end_time,\n                       data_info, resource_id, resource_name, resource_type):\n        fs_metrics = []\n        selection = metrics.get(resource_type)\n        for key in selection:\n            labels = {\n                'storage_id': storage_id,\n                'resource_type': resource_type,\n                'resource_id': resource_id,\n                'resource_name': resource_name,\n                'type': 'RAW',\n                'unit': constant.CAP_MAP[key]['unit']\n            }\n            values = {}\n            for perf_info in data_info:\n                if perf_info.get('timestamp'):\n                    occur_time = \\\n                        int(time.mktime(time.strptime(\n                            perf_info.get('timestamp'),\n                            PerformanceHandler.TIME_TYPE)))\n                    second_offset = \\\n                        (time.mktime(time.localtime()) -\n                         time.mktime(time.gmtime()))\n                    timestamp = \\\n                        (occur_time + int(second_offset)) * 1000\n                    if int(start_time) <= timestamp <= int(end_time) \\\n                            and timestamp % 60000 == 0:\n                        key_list = constant.PERF_MAP.get(key, [])\n                        if len(key_list) > 0:\n                            value = perf_info.get(key_list[0], {}) \\\n                                .get(key_list[1], None)\n                            if value is not None:\n                                value = PerformanceHandler. \\\n                                    get_value(value, key)\n                                values[timestamp] = value\n            if values:\n                m = constants.metric_struct(name=key, labels=labels,\n                                            values=values)\n                fs_metrics.append(m)\n        return fs_metrics\n"
  },
  {
    "path": "delfin/drivers/pure/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/pure/flasharray/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/pure/flasharray/consts.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom delfin.common import constants\n\n# The default volume\nDEFAULT_CAPACITY = 0\n\n# The default speed\nDEFAULT_SPEED = 0\n\n# The default list_alerts time conversion\nDEFAULT_LIST_ALERTS_TIME_CONVERSION = 1000\n\n# The default count for the get_volumes_info function\nDEFAULT_COUNT_GET_VOLUMES_INFO = 0\n\n# Number of re-logins\nRE_LOGIN_TIMES = 3\n\n# Constant one\nCONSTANT_ONE = 1\n# Constant zero\nCONSTANT_ZERO = 0\n\n# Success status code\nSUCCESS_STATUS_CODE = 200\n\n# Status code of no permission\nPERMISSION_DENIED_STATUS_CODE = 401\n\n# Custom token of Pure\nCUSTOM_TOKEN = 'x-next-token'\n\n# The default get_storage model\nCONTROLLER_PRIMARY = 'primary'\n\n# Normal value of the controller status\nNORMAL_CONTROLLER_STATUS = 'ready'\n\n# disk type\nDISK_TYPE_NVRAM = 'NVRAM'\n\n# The account password is incorrect during login.\nLOGIN_PASSWORD_ERR = 'invalid credentials'\n\n# list_port: Add \":\" to the WWN every 2 sequences.\nSPLICE_WWN_SERIAL = 2\nSPLICE_WWN_COLON = ':'\n\nSEVERITY_MAP = {'fatal': constants.Severity.FATAL,\n                'critical': constants.Severity.CRITICAL,\n                'major': constants.Severity.MAJOR,\n                'minor': constants.Severity.MINOR,\n                'warning': constants.Severity.WARNING,\n                'informational': constants.Severity.INFORMATIONAL,\n                'NotSpecified': constants.Severity.NOT_SPECIFIED}\nCATEGORY_MAP = {'fault': constants.Category.FAULT,\n                'event': constants.Category.EVENT,\n                'recovery': constants.Category.RECOVERY,\n                'notSpecified': constants.Category.NOT_SPECIFIED}\nCONTROLLER_STATUS_MAP = {'normal': constants.ControllerStatus.NORMAL,\n                         'ok': constants.ControllerStatus.NORMAL,\n                         'offline': constants.ControllerStatus.OFFLINE,\n                         'not_installed': constants.ControllerStatus.OFFLINE,\n                         'fault': constants.ControllerStatus.FAULT,\n                         'degraded': constants.ControllerStatus.DEGRADED,\n                         'unready': constants.ControllerStatus.UNKNOWN}\nDISK_STATUS_MAP = {'normal': constants.DiskStatus.NORMAL,\n                   'healthy': constants.DiskStatus.NORMAL,\n                   'abnormal': constants.DiskStatus.ABNORMAL,\n                   'unhealthy': constants.DiskStatus.ABNORMAL,\n                   'offline': constants.DiskStatus.OFFLINE}\nPORT_STATUS_MAP = {'ok': constants.PortHealthStatus.NORMAL,\n                   'not_installed': constants.PortHealthStatus.ABNORMAL\n                   }\n\nPARSE_ALERT_ALERT_ID = '1.3.6.1.2.1.1.3.0'\nPARSE_ALERT_STORAGE_NAME = '1.3.6.1.4.1.40482.3.1'\nPARSE_ALERT_CONTROLLER_NAME = '1.3.6.1.4.1.40482.3.3'\nPARSE_ALERT_ALERT_NAME = '1.3.6.1.4.1.40482.3.5'\nPARSE_ALERT_DESCRIPTION = '1.3.6.1.4.1.40482.3.6'\nPARSE_ALERT_SEVERITY = '1.3.6.1.4.1.40482.3.7'\n\nPARSE_ALERT_SEVERITY_MAP = {'1': constants.Severity.WARNING,\n                            '2': constants.Severity.INFORMATIONAL}\n\n# collect_perf_metrics method\nSIXTY = 60\nLIST_METRICS = -1\n\nSTORAGE_CAP = {\n    constants.StorageMetric.IOPS.name: {\n        \"unit\": constants.StorageMetric.IOPS.unit,\n        \"description\": constants.StorageMetric.IOPS.description\n    },\n    constants.StorageMetric.READ_IOPS.name: {\n        \"unit\": constants.StorageMetric.READ_IOPS.unit,\n        \"description\": constants.StorageMetric.READ_IOPS.description\n    },\n    constants.StorageMetric.WRITE_IOPS.name: {\n        \"unit\": constants.StorageMetric.WRITE_IOPS.unit,\n        \"description\": constants.StorageMetric.WRITE_IOPS.description\n    },\n    constants.StorageMetric.THROUGHPUT.name: {\n        \"unit\": constants.StorageMetric.THROUGHPUT.unit,\n        \"description\": constants.StorageMetric.THROUGHPUT.description\n    },\n    constants.StorageMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.StorageMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.StorageMetric.READ_THROUGHPUT.description\n    },\n    constants.StorageMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.StorageMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.StorageMetric.WRITE_THROUGHPUT.description\n    },\n    constants.StorageMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.StorageMetric.READ_RESPONSE_TIME.unit,\n        \"description\": constants.StorageMetric.READ_RESPONSE_TIME.description\n    },\n    constants.StorageMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.StorageMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\": constants.StorageMetric.WRITE_RESPONSE_TIME.description\n    }\n}\nVOLUME_CAP = {\n    constants.VolumeMetric.IOPS.name: {\n        \"unit\": constants.VolumeMetric.IOPS.unit,\n        \"description\": constants.VolumeMetric.IOPS.description\n    },\n    constants.VolumeMetric.READ_IOPS.name: {\n        \"unit\": constants.VolumeMetric.READ_IOPS.unit,\n        \"description\": constants.VolumeMetric.READ_IOPS.description\n    },\n    constants.VolumeMetric.WRITE_IOPS.name: {\n        \"unit\": constants.VolumeMetric.WRITE_IOPS.unit,\n        \"description\": constants.VolumeMetric.WRITE_IOPS.description\n    },\n    constants.VolumeMetric.THROUGHPUT.name: {\n        \"unit\": constants.VolumeMetric.THROUGHPUT.unit,\n        \"description\": constants.VolumeMetric.THROUGHPUT.description\n    },\n    constants.VolumeMetric.READ_THROUGHPUT.name: {\n        \"unit\": constants.VolumeMetric.READ_THROUGHPUT.unit,\n        \"description\": constants.VolumeMetric.READ_THROUGHPUT.description\n    },\n    constants.VolumeMetric.WRITE_THROUGHPUT.name: {\n        \"unit\": constants.VolumeMetric.WRITE_THROUGHPUT.unit,\n        \"description\": constants.VolumeMetric.WRITE_THROUGHPUT.description\n    },\n    constants.VolumeMetric.READ_RESPONSE_TIME.name: {\n        \"unit\": constants.VolumeMetric.READ_RESPONSE_TIME.unit,\n        \"description\": constants.VolumeMetric.READ_RESPONSE_TIME.description\n    },\n    constants.VolumeMetric.WRITE_RESPONSE_TIME.name: {\n        \"unit\": constants.VolumeMetric.WRITE_RESPONSE_TIME.unit,\n        \"description\": constants.VolumeMetric.WRITE_RESPONSE_TIME.description\n    }\n}\n\n# Timestamp format conversion\nPURE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'\n\nHOST_OS_TYPES_MAP = {\n    'linux': constants.HostOSTypes.LINUX,\n    'windows': constants.HostOSTypes.WINDOWS,\n    'solaris': constants.HostOSTypes.SOLARIS,\n    'hp-ux': constants.HostOSTypes.HP_UX,\n    'hpux': constants.HostOSTypes.HP_UX,\n    'aix': constants.HostOSTypes.AIX,\n    'xenserver': constants.HostOSTypes.XEN_SERVER,\n    'vmware esx': constants.HostOSTypes.VMWARE_ESX,\n    'esxi': constants.HostOSTypes.VMWARE_ESX,\n    'linux_vis': constants.HostOSTypes.LINUX_VIS,\n    'windows server 2012': constants.HostOSTypes.WINDOWS_SERVER_2012,\n    'oracle vm': constants.HostOSTypes.ORACLE_VM,\n    'oracle-vm-server': constants.HostOSTypes.ORACLE_VM,\n    'open vms': constants.HostOSTypes.OPEN_VMS,\n    'vms': constants.HostOSTypes.OPEN_VMS,\n    'unknown': constants.HostOSTypes.UNKNOWN\n}\n"
  },
  {
    "path": "delfin/drivers/pure/flasharray/pure_flasharray.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport datetime\nimport hashlib\nimport time\n\nfrom oslo_log import log\nfrom oslo_utils import units\n\nfrom delfin import exception, utils\nfrom delfin.common import constants\nfrom delfin.drivers import driver\nfrom delfin.drivers.pure.flasharray import rest_handler, consts\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass PureFlashArrayDriver(driver.StorageDriver):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.rest_handler = rest_handler.RestHandler(**kwargs)\n        self.rest_handler.login()\n\n    def list_volumes(self, context):\n        list_volumes = []\n        volumes = self.rest_handler.get_volumes()\n        if volumes:\n            for volume in volumes:\n                volume_name = volume.get('name')\n                total_capacity = int(volume.get('size',\n                                                consts.DEFAULT_CAPACITY))\n                used_capacity = int(volume.get('volumes',\n                                               consts.DEFAULT_CAPACITY))\n                volume_dict = {\n                    'native_volume_id': volume_name,\n                    'name': volume_name,\n                    'total_capacity': total_capacity,\n                    'used_capacity': used_capacity,\n                    'free_capacity': total_capacity - used_capacity,\n                    'storage_id': self.storage_id,\n                    'status': constants.StorageStatus.NORMAL,\n                    'type': constants.VolumeType.THIN if\n                    volume.get('thin_provisioning') is not None\n                    else constants.VolumeType.THICK\n                }\n                list_volumes.append(volume_dict)\n        return list_volumes\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def clear_alert(self, context, alert):\n        pass\n\n    def get_storage(self, context):\n        storages = self.rest_handler.rest_call(\n            self.rest_handler.REST_STORAGE_URL)\n        total_capacity = None\n        used_capacity = None\n        if storages:\n            for storage in storages:\n                used_capacity = int(storage.get('total',\n                                                consts.DEFAULT_CAPACITY))\n                total_capacity = int(storage.get('capacity',\n                                                 consts.DEFAULT_CAPACITY))\n                break\n        raw_capacity = consts.DEFAULT_CAPACITY\n        disks = self.list_disks(context)\n        if disks:\n            for disk in disks:\n                raw_capacity = raw_capacity + disk.get('capacity')\n        arrays = self.rest_handler.rest_call(self.rest_handler.REST_ARRAY_URL)\n        storage_name = None\n        serial_number = None\n        version = None\n        if arrays:\n            storage_name = arrays.get('array_name')\n            serial_number = arrays.get('id')\n            version = arrays.get('version')\n        model = None\n        status = constants.StorageStatus.NORMAL\n        controllers = self.rest_handler.rest_call(\n            self.rest_handler.REST_CONTROLLERS_URL)\n        if controllers:\n            for controller in controllers:\n                if controller.get('mode') == consts.CONTROLLER_PRIMARY:\n                    model = controller.get('model')\n                    if controller.get('status') != \\\n                            consts.NORMAL_CONTROLLER_STATUS:\n                        status = constants.StorageStatus.ABNORMAL\n        if not all((storages, arrays, controllers)):\n            LOG.error('get_storage error, Unable to obtain data.')\n            raise exception.StorageBackendException('Unable to obtain data')\n        storage_result = {\n            'model': model,\n            'total_capacity': total_capacity,\n            'raw_capacity': raw_capacity,\n            'used_capacity': used_capacity,\n            'free_capacity': total_capacity - used_capacity,\n            'vendor': 'PURE',\n            'name': storage_name,\n            'serial_number': serial_number,\n            'firmware_version': version,\n            'status': status\n        }\n        return storage_result\n\n    def list_alerts(self, context, query_para=None):\n        alerts = self.rest_handler.rest_call(self.rest_handler.REST_ALERTS_URL)\n        alerts_list = []\n        if alerts:\n            for alert in alerts:\n                alerts_model = dict()\n                opened = alert.get('opened')\n                time_difference = self.get_time_difference()\n                timestamp = (int(datetime.datetime.strptime(\n                    opened, consts.PURE_TIME_FORMAT).timestamp()\n                    + time_difference) * consts.\n                    DEFAULT_LIST_ALERTS_TIME_CONVERSION)\\\n                    if opened is not None else None\n                if query_para is not None:\n                    try:\n                        if timestamp is None or timestamp \\\n                                < int(query_para.get('begin_time')) or \\\n                                timestamp > int(query_para.get('end_time')):\n                            continue\n                    except Exception as e:\n                        LOG.error(e)\n                alerts_model['occur_time'] = timestamp\n                alerts_model['alert_id'] = alert.get('id')\n                alerts_model['severity'] = consts.SEVERITY_MAP.get(\n                    alert.get('current_severity'),\n                    constants.Severity.NOT_SPECIFIED)\n                alerts_model['category'] = constants.Category.FAULT\n                component_name = alert.get('component_name')\n                alerts_model['location'] = component_name\n                alerts_model['type'] = constants.EventType.EQUIPMENT_ALARM\n                alerts_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n                event = alert.get('event')\n                alerts_model['alert_name'] = event\n                alerts_model['match_key'] = hashlib.md5(str(alert.get('id')).\n                                                        encode()).hexdigest()\n                alerts_model['description'] = '({}:{}): {}'. \\\n                    format(alert.get('component_type'), component_name, event)\n                alerts_list.append(alerts_model)\n        return alerts_list\n\n    @staticmethod\n    def get_time_difference():\n        time_difference = time.mktime(\n            time.localtime()) - time.mktime(time.gmtime())\n        return time_difference\n\n    @staticmethod\n    def parse_alert(context, alert):\n        try:\n            alert_model = dict()\n            alert_model['alert_id'] = alert.get(consts.PARSE_ALERT_ALERT_ID)\n            alert_model['severity'] = consts.PARSE_ALERT_SEVERITY_MAP.get(\n                alert.get(consts.PARSE_ALERT_SEVERITY),\n                constants.Severity.NOT_SPECIFIED)\n            alert_model['category'] = constants.Category.FAULT\n            alert_model['occur_time'] = utils.utcnow_ms()\n            alert_model['description'] = '({}:{}): {}'.format(alert.get(\n                consts.PARSE_ALERT_STORAGE_NAME),\n                alert.get(consts.PARSE_ALERT_CONTROLLER_NAME),\n                alert.get(consts.PARSE_ALERT_DESCRIPTION))\n            alert_model['location'] = alert.get(\n                consts.PARSE_ALERT_CONTROLLER_NAME)\n            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n            alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n            alert_model['alert_name'] = alert.get(\n                consts.PARSE_ALERT_ALERT_NAME)\n            alert_model['sequence_number'] = alert.get(\n                consts.PARSE_ALERT_ALERT_ID)\n            alert_model['match_key'] = hashlib.md5(str(alert.get(\n                consts.PARSE_ALERT_ALERT_ID)).encode()).hexdigest()\n            return alert_model\n        except Exception as e:\n            LOG.error(e)\n            msg = (_(\"Failed to build alert model as some attributes missing\"))\n            raise exception.InvalidResults(msg)\n\n    def list_controllers(self, context):\n        list_controllers = []\n        controllers = self.rest_handler.rest_call(\n            self.rest_handler.REST_CONTROLLERS_URL)\n        hardware = self.get_hardware()\n        if controllers:\n            for controller in controllers:\n                controllers_dict = dict()\n                controller_name = controller.get('name')\n                controllers_dict['name'] = controller_name\n                controllers_dict['status'] = consts.CONTROLLER_STATUS_MAP.get(\n                    hardware.get(controller_name, {}).get('status'),\n                    constants.ControllerStatus.UNKNOWN)\n                controllers_dict['soft_version'] = controller.get('version')\n                controllers_dict['storage_id'] = self.storage_id\n                controllers_dict['native_controller_id'] = controller_name\n                controllers_dict['location'] = controller_name\n                list_controllers.append(controllers_dict)\n        return list_controllers\n\n    def list_disks(self, context):\n        hardware_dict = self.get_hardware()\n        list_disks = []\n        disks = self.rest_handler.rest_call(self.rest_handler.REST_DISK_URL)\n        if disks:\n            for disk in disks:\n                disk_type = disk.get('type')\n                if disk_type == consts.DISK_TYPE_NVRAM or disk_type is None:\n                    continue\n                disk_dict = dict()\n                drive_name = disk.get('name')\n                disk_dict['name'] = drive_name\n                physical_type = disk_type.lower() if disk_type is not None \\\n                    else None\n                disk_dict['physical_type'] = physical_type \\\n                    if physical_type in constants.DiskPhysicalType.ALL else \\\n                    constants.DiskPhysicalType.UNKNOWN\n                disk_dict['status'] = consts.DISK_STATUS_MAP. \\\n                    get(disk.get('status'), constants.DiskStatus.OFFLINE)\n                disk_dict['storage_id'] = self.storage_id\n                disk_dict['capacity'] = int(disk.get('capacity',\n                                                     consts.DEFAULT_CAPACITY))\n                hardware_object = hardware_dict.get(drive_name, {})\n                speed = hardware_object.get('speed')\n                disk_dict['speed'] = int(speed) if speed is not None else None\n                disk_dict['model'] = hardware_object.get('model')\n                disk_dict['serial_number'] = hardware_object. \\\n                    get('serial_number')\n                disk_dict['native_disk_id'] = drive_name\n                disk_dict['location'] = drive_name\n                disk_dict['manufacturer'] = \"PURE\"\n                disk_dict['firmware'] = \"\"\n                list_disks.append(disk_dict)\n        return list_disks\n\n    def get_hardware(self):\n        hardware_dict = dict()\n        hardware = self.rest_handler.rest_call(\n            self.rest_handler.REST_HARDWARE_URL)\n        if hardware:\n            for hardware_value in hardware:\n                hardware_map = dict()\n                hardware_map['speed'] = hardware_value.get('speed')\n                hardware_map['serial_number'] = hardware_value.get('serial')\n                hardware_map['model'] = hardware_value.get('model')\n                hardware_map['status'] = hardware_value.get('status')\n                hardware_dict[hardware_value.get('name')] = hardware_map\n        return hardware_dict\n\n    def list_ports(self, context):\n        list_ports = []\n        networks = self.get_network()\n        ports = self.get_ports()\n        hardware_dict = self.rest_handler.rest_call(\n            self.rest_handler.REST_HARDWARE_URL)\n        if not hardware_dict:\n            return list_ports\n        for hardware in hardware_dict:\n            hardware_result = dict()\n            hardware_name = hardware.get('name')\n            if 'FC' in hardware_name:\n                hardware_result['type'] = constants.PortType.FC\n            elif 'ETH' in hardware_name:\n                hardware_result['type'] = constants.PortType.ETH\n            elif 'SAS' in hardware_name:\n                hardware_result['type'] = constants.PortType.SAS\n            else:\n                continue\n            hardware_result['name'] = hardware_name\n            hardware_result['native_port_id'] = hardware_name\n            hardware_result['storage_id'] = self.storage_id\n            hardware_result['location'] = hardware_name\n            speed = hardware.get('speed')\n            if speed is None:\n                hardware_result['connection_status'] = \\\n                    constants.PortConnectionStatus.UNKNOWN\n            elif speed == consts.CONSTANT_ZERO:\n                hardware_result['connection_status'] = \\\n                    constants.PortConnectionStatus.DISCONNECTED\n                hardware_result['speed'] = speed\n            else:\n                hardware_result['connection_status'] = \\\n                    constants.PortConnectionStatus.CONNECTED\n                hardware_result['speed'] = int(speed)\n            hardware_result['health_status'] = consts.PORT_STATUS_MAP.get(\n                hardware.get('status'), constants.PortHealthStatus.UNKNOWN)\n            port = ports.get(hardware_name)\n            if port:\n                hardware_result['wwn'] = port.get('wwn')\n            network = networks.get(hardware_name)\n            if network:\n                hardware_result['mac_address'] = network.get('mac_address')\n                hardware_result['logical_type'] = network.get('logical_type')\n                hardware_result['ipv4_mask'] = network.get('ipv4_mask')\n                hardware_result['ipv4'] = network.get('ipv4')\n            list_ports.append(hardware_result)\n        return list_ports\n\n    def get_network(self):\n        networks_object = dict()\n        networks = self.rest_handler.rest_call(\n            self.rest_handler.REST_NETWORK_URL)\n        if networks:\n            for network in networks:\n                network_dict = dict()\n                network_dict['mac_address'] = network.get('hwaddr')\n                services_list = network.get('services')\n                if services_list:\n                    for services in services_list:\n                        network_dict['logical_type'] = services if \\\n                            services in constants.PortLogicalType.ALL else None\n                        break\n                network_dict['ipv4_mask'] = network.get('netmask')\n                network_dict['ipv4'] = network.get('address')\n                network_name = network.get('name').upper()\n                networks_object[network_name] = network_dict\n        return networks_object\n\n    def get_ports(self):\n        ports_dict = dict()\n        ports = self.rest_handler.rest_call(self.rest_handler.REST_PORT_URL)\n        if ports:\n            for port in ports:\n                port_dict = dict()\n                port_name = port.get('name')\n                wwn = port.get('wwn')\n                port_dict['wwn'] = self.get_splice_wwn(wwn) \\\n                    if wwn is not None else port.get('iqn')\n                ports_dict[port_name] = port_dict\n        return ports_dict\n\n    @staticmethod\n    def get_splice_wwn(wwn):\n        wwn_list = list(wwn)\n        wwn_splice = wwn_list[0]\n        for serial in range(1, len(wwn_list)):\n            if serial % consts.SPLICE_WWN_SERIAL == consts.CONSTANT_ZERO:\n                wwn_splice = '{}{}'.format(wwn_splice, consts.SPLICE_WWN_COLON)\n            wwn_splice = '{}{}'.format(wwn_splice, wwn_list[serial])\n        return wwn_splice\n\n    def list_storage_pools(self, context):\n        return []\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    def reset_connection(self, context, **kwargs):\n        self.rest_handler.logout()\n        self.rest_handler.login()\n\n    @staticmethod\n    def get_access_url():\n        return 'https://{ip}'\n\n    def collect_perf_metrics(self, context, storage_id, resource_metrics,\n                             start_time, end_time):\n        LOG.info('The system(storage_id: %s) starts to collect storage and'\n                 ' volume performance, start_time: %s, end_time: %s',\n                 storage_id, start_time, end_time)\n        metrics = []\n        if resource_metrics.get(constants.ResourceType.STORAGE):\n            storage_metrics = self.get_storage_metrics(\n                storage_id,\n                resource_metrics.get(constants.ResourceType.STORAGE),\n                start_time, end_time,\n                constants.ResourceType.STORAGE)\n            metrics.extend(storage_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect storage'\n                     ' performance, The length is: %s',\n                     storage_id, len(storage_metrics))\n        if resource_metrics.get(constants.ResourceType.VOLUME):\n            volume_metrics = self.get_volume_metrics(\n                storage_id,\n                resource_metrics.get(constants.ResourceType.VOLUME),\n                start_time, end_time,\n                constants.ResourceType.VOLUME)\n            metrics.extend(volume_metrics)\n            LOG.info('The system(storage_id: %s) stop to collect volume'\n                     ' performance, The length is: %s',\n                     storage_id, len(volume_metrics))\n        return metrics\n\n    def get_storage_metrics(self, storage_id, resource_metrics, start_time,\n                            end_time, resource_type):\n        metrics = []\n        arrays_id, arrays_name = self.get_array()\n        packaging_data = self.get_packaging_storage_data(\n            end_time, start_time, resource_type)\n        if not arrays_id or not arrays_name or not packaging_data or\\\n                end_time < start_time:\n            return metrics\n        for resource_key in resource_metrics.keys():\n            labels = {\n                'storage_id': storage_id,\n                'resource_type': resource_type,\n                'resource_id': arrays_id,\n                'resource_name': arrays_name,\n                'type': 'RAW',\n                'unit': resource_metrics[resource_key]['unit']\n            }\n            resource_value = {}\n            for about_timestamp in packaging_data.keys():\n                metrics_data = packaging_data.get(about_timestamp)\n                resource_value[about_timestamp] = \\\n                    metrics_data.get(resource_key)\n            metrics_res = constants.metric_struct(\n                name=resource_key, labels=labels, values=resource_value)\n            metrics.append(metrics_res)\n        return metrics\n\n    def get_packaging_storage_data(self, end_time, start_time, resource_type):\n        duplicate = set()\n        packaging_data = {}\n        list_metrics = self.rest_handler.rest_call(\n            self.rest_handler.REST_ARRAY_HISTORICAL_URL)\n        for storage_metrics in (list_metrics or []):\n            about_timestamp = self.checkout_data(\n                storage_metrics, start_time, end_time, resource_type,\n                duplicate)\n            if about_timestamp is None:\n                continue\n            metrics_data = self.get_metrics_data(\n                storage_metrics, about_timestamp)\n            packaging_data[about_timestamp] = metrics_data\n        return packaging_data\n\n    def checkout_data(self, storage_metrics, start_time, end_time,\n                      resource_type, duplicate):\n        opened = storage_metrics.get('time')\n        if opened is None:\n            return None\n        timestamp_s = self.get_timestamp_s(opened)\n        timestamp_ms = timestamp_s * units.k\n        if timestamp_ms < start_time or timestamp_ms >= end_time:\n            return None\n        about_timestamp = \\\n            int(timestamp_s / consts.SIXTY) * consts.SIXTY * units.k\n        duplicate_value = self.get_duplicate_value(\n            about_timestamp, resource_type, storage_metrics)\n        if duplicate_value in duplicate:\n            return None\n        duplicate.add(duplicate_value)\n        return about_timestamp\n\n    def get_volume_metrics(self, storage_id, resource_metrics, start_time,\n                           end_time, resource_type):\n        metrics = []\n        packaging_data = self.get_packaging_volume_data(\n            end_time, resource_type, start_time)\n        if end_time < start_time or not packaging_data:\n            return metrics\n        for volume_name in packaging_data.keys():\n            for resource_key in resource_metrics.keys():\n                labels = {\n                    'storage_id': storage_id,\n                    'resource_type': resource_type,\n                    'resource_id': volume_name,\n                    'resource_name': volume_name,\n                    'type': 'RAW',\n                    'unit': resource_metrics[resource_key]['unit']\n                }\n                resource_value = {}\n                for volume_metrics in (packaging_data.get(volume_name) or []):\n                    resource_value[volume_metrics.get('time')] = \\\n                        volume_metrics.get(resource_key)\n                metrics_res = constants.metric_struct(\n                    name=resource_key, labels=labels, values=resource_value)\n                metrics.append(metrics_res)\n        return metrics\n\n    def get_packaging_volume_data(self, end_time, resource_type, start_time):\n        duplicate = set()\n        packaging_data = {}\n        list_metrics = self.rest_handler.rest_call(\n            self.rest_handler.REST_VOLUME_HISTORICAL_URL)\n        for volume_metrics in (list_metrics or []):\n            about_timestamp = self.checkout_data(\n                volume_metrics, start_time, end_time, resource_type,\n                duplicate)\n            if about_timestamp is None:\n                continue\n            volume_metrics_data = self.get_metrics_data(\n                volume_metrics, about_timestamp)\n            volume_metrics_list = packaging_data.get(\n                volume_metrics.get('name'))\n            if not volume_metrics_list:\n                volume_metrics_list = []\n            volume_metrics_list.append(volume_metrics_data)\n            packaging_data[volume_metrics.get('name')] = volume_metrics_list\n        return packaging_data\n\n    def get_timestamp_s(self, opened):\n        time_difference = self.get_time_difference()\n        timestamp_s = int(\n            datetime.datetime.strptime(opened, consts.PURE_TIME_FORMAT)\n            .timestamp() + time_difference)\n        return timestamp_s\n\n    @staticmethod\n    def get_duplicate_value(about_timestamp, resource_type, storage_metrics):\n        duplicate_value = None\n        if resource_type == constants.ResourceType.VOLUME:\n            duplicate_value = '{}{}'.format(\n                storage_metrics.get('name'), about_timestamp)\n        if resource_type == constants.ResourceType.STORAGE:\n            duplicate_value = about_timestamp\n        return duplicate_value\n\n    @staticmethod\n    def get_metrics_data(metrics, about_timestamp):\n        read_iop = metrics.get('reads_per_sec')\n        write_iop = metrics.get('writes_per_sec')\n        read_throughput = metrics.get('output_per_sec') / units.Mi\n        write_throughput = metrics.get('input_per_sec') / units.Mi\n        read_response_time = metrics.get('usec_per_read_op') / units.k\n        write_response_time = metrics.get('usec_per_write_op') / units.k\n        metrics_data = {\n            'iops': round(read_iop + write_iop, 3),\n            \"readIops\": round(read_iop, 3),\n            \"writeIops\": round(write_iop, 3),\n            \"throughput\": round(read_throughput + write_throughput, 3),\n            \"readThroughput\": round(read_throughput, 3),\n            \"writeThroughput\": round(write_throughput, 3),\n            \"readResponseTime\": round(read_response_time, 3),\n            \"writeResponseTime\": round(write_response_time, 3),\n            'time': about_timestamp\n        }\n        return metrics_data\n\n    def get_array(self):\n        arrays_id = None\n        arrays_name = None\n        arrays = self.rest_handler.rest_call(\n            self.rest_handler.REST_ARRAY_URL)\n        if arrays:\n            arrays_id = arrays.get('id')\n            arrays_name = arrays.get('array_name')\n        return arrays_id, arrays_name\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        return {\n            'is_historic': True,\n            'resource_metrics': {\n                constants.ResourceType.STORAGE: consts.STORAGE_CAP,\n                constants.ResourceType.VOLUME: consts.VOLUME_CAP\n            }\n        }\n\n    def get_latest_perf_timestamp(self, context):\n        list_metrics = self.rest_handler.rest_call(\n            self.rest_handler.REST_ARRAY_HISTORICAL_URL)\n        opened = list_metrics[consts.LIST_METRICS].get('time')\n        timestamp_s = self.get_timestamp_s(opened)\n        timestamp_ms = \\\n            int(timestamp_s / consts.SIXTY) * consts.SIXTY * units.k\n        return timestamp_ms\n\n    def list_storage_host_initiators(self, context):\n        list_initiators = []\n        initiators = self.rest_handler.rest_call(\n            self.rest_handler.REST_HOST_URL)\n        for initiator in (initiators or []):\n            host_id = initiator.get('name')\n            self.get_initiator(initiator, list_initiators, host_id, 'iqn',\n                               constants.InitiatorType.ISCSI)\n            self.get_initiator(initiator, list_initiators, host_id, 'wwn',\n                               constants.InitiatorType.FC)\n            self.get_initiator(initiator, list_initiators, host_id, 'nqn',\n                               constants.InitiatorType.NVME_OVER_FABRIC)\n        return list_initiators\n\n    def get_initiator(self, initiator, list_initiators, host_id, protocol,\n                      network):\n        protocol_list = initiator.get(protocol)\n        if protocol_list:\n            for initiator_protocol in (protocol_list or []):\n                if 'wwn' in protocol:\n                    initiator_protocol = self.get_splice_wwn(\n                        initiator_protocol)\n                initiator_d = {\n                    'native_storage_host_initiator_id': initiator_protocol,\n                    'native_storage_host_id': host_id,\n                    'name': initiator_protocol,\n                    'type': network,\n                    'status': constants.InitiatorStatus.UNKNOWN,\n                    'wwn': initiator_protocol,\n                    'storage_id': self.storage_id\n                }\n                list_initiators.append(initiator_d)\n\n    def list_storage_hosts(self, ctx):\n        host_list = []\n        hosts = self.rest_handler.rest_call(\n            self.rest_handler.REST_HOST_PERSONALITY_URL)\n        for host in (hosts or []):\n            name = host.get('name')\n            personality = host.get('personality').lower() \\\n                if host.get('personality') else None\n            h = {\n                \"name\": name,\n                \"storage_id\": self.storage_id,\n                \"native_storage_host_id\": name,\n                \"os_type\": consts.HOST_OS_TYPES_MAP.get(\n                    personality, constants.HostOSTypes.UNKNOWN),\n                \"status\": constants.HostStatus.NORMAL\n            }\n            host_list.append(h)\n        return host_list\n\n    def list_storage_host_groups(self, context):\n        host_groups = self.rest_handler.rest_call(\n            self.rest_handler.REST_HGROUP_URL)\n        host_group_list = []\n        storage_host_grp_relation_list = []\n        for hgroup in (host_groups or []):\n            name = hgroup.get('name')\n            hg = {\n                'native_storage_host_group_id': name,\n                'name': name,\n                'storage_id': self.storage_id\n            }\n            host_group_list.append(hg)\n            for host in (hgroup.get('hosts') or []):\n                host_relation = {\n                    'native_storage_host_group_id': name,\n                    'storage_id': self.storage_id,\n                    'native_storage_host_id': host\n                }\n                storage_host_grp_relation_list.append(host_relation)\n        result = {\n            'storage_host_groups': host_group_list,\n            'storage_host_grp_host_rels': storage_host_grp_relation_list\n        }\n        return result\n\n    def list_volume_groups(self, context):\n        volume_groups = self.rest_handler.rest_call(\n            self.rest_handler.REST_VOLUME_GROUP_URL)\n        vol_group_list = []\n        vol_grp_vol_relation_list = []\n        for volume_group in (volume_groups or []):\n            name = volume_group.get('name')\n            vol_g = {\n                'name': name,\n                'storage_id': self.storage_id,\n                'native_volume_group_id': name\n            }\n            vol_group_list.append(vol_g)\n            for volume_id in (volume_group.get('volumes') or []):\n                volume_group_relation = {\n                    'storage_id': self.storage_id,\n                    'native_volume_group_id': name,\n                    'native_volume_id': volume_id\n                }\n                vol_grp_vol_relation_list.append(volume_group_relation)\n        result = {\n            'volume_groups': vol_group_list,\n            'vol_grp_vol_rels': vol_grp_vol_relation_list\n        }\n        return result\n\n    def list_masking_views(self, context):\n        list_masking_views = []\n        view_id_dict = {}\n        hgroup_views = self.rest_handler.rest_call(\n            self.rest_handler.REST_HGROUP_CONNECT_URL)\n        for hgroup_view in (hgroup_views or []):\n            hgroup_name = hgroup_view.get('name')\n            native_volume_id = hgroup_view.get('vol')\n            native_masking_view_id = '{}{}'.format(\n                hgroup_name, native_volume_id)\n            if view_id_dict.get(hgroup_name):\n                continue\n            view_id_dict[native_masking_view_id] = hgroup_name\n            view = {\n                'native_masking_view_id': native_masking_view_id,\n                'name': native_masking_view_id,\n                'native_storage_host_group_id': hgroup_name,\n                'native_volume_id': native_volume_id,\n                'storage_id': self.storage_id\n            }\n            list_masking_views.append(view)\n\n        masking_views = self.rest_handler.rest_call(\n            self.rest_handler.REST_HOST_CONNECT_URL)\n        for masking_view in (masking_views or []):\n            hgroup = masking_view.get('hgroup')\n            host_id = masking_view.get('name')\n            native_volume_id = masking_view.get('vol')\n            hgroup_name = '{}{}'.format(hgroup, native_volume_id)\n            if view_id_dict.get(hgroup_name) is not None and \\\n                    view_id_dict.get(hgroup_name) in hgroup:\n                continue\n            native_masking_view_id = '{}{}{}'.format(\n                host_id, hgroup, native_volume_id)\n            if view_id_dict.get(native_masking_view_id):\n                continue\n            view_id_dict[native_masking_view_id] = native_masking_view_id\n            view = {\n                'native_masking_view_id': native_masking_view_id,\n                'name': native_masking_view_id,\n                'native_storage_host_id': host_id,\n                'native_volume_id': native_volume_id,\n                'storage_id': self.storage_id\n            }\n            list_masking_views.append(view)\n        return list_masking_views\n"
  },
  {
    "path": "delfin/drivers/pure/flasharray/rest_handler.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import exception, cryptor\nfrom delfin.drivers.pure.flasharray import consts\nfrom delfin.drivers.utils.rest_client import RestClient\n\nLOG = logging.getLogger(__name__)\n\n\nclass RestHandler(RestClient):\n    REST_STORAGE_URL = '/api/1.17/array?space=true'\n    REST_ARRAY_URL = '/api/1.17/array'\n    REST_VOLUME_URL = '/api/1.17/volume?space=true&limit=500&token=' \\\n                      'aWQgPSA5ODA1Mg=='\n    REST_VOLUME_TOKEN_URL = '/api/1.17/volume?space=true&limit=20&token='\n    REST_PORT_URL = '/api/1.17/port'\n    REST_NETWORK_URL = '/api/1.17/network'\n    REST_DISK_URL = '/api/1.17/drive'\n    REST_HARDWARE_URL = '/api/1.17/hardware'\n    REST_CONTROLLERS_URL = '/api/1.17/array?controllers=true'\n    REST_ALERTS_URL = '/api/1.17/message?flagged=true&open=true'\n    REST_AUTH_URL = '/api/1.17/auth/apitoken'\n    REST_SESSION_URL = '/api/1.17/auth/session'\n    REST_HOST_URL = '/api/1.17/host'\n    REST_HOST_PERSONALITY_URL = '/api/1.17/host?personality=true'\n    REST_HOST_CONNECT_URL = '/api/1.17/host?connect=true'\n    REST_HGROUP_CONNECT_URL = '/api/1.17/hgroup?connect=true'\n    REST_HGROUP_URL = '/api/1.17/hgroup'\n    REST_VOLUME_GROUP_URL = '/api/1.17/vgroup'\n    REST_ARRAY_HISTORICAL_URL = '/api/1.17/array?action=monitor&historical=1h'\n    REST_VOLUME_HISTORICAL_URL =\\\n        '/api/1.17/volume?action=monitor&historical=1h'\n\n    def __init__(self, **kwargs):\n        super(RestHandler, self).__init__(**kwargs)\n\n    def login(self):\n        try:\n            data = {'username': self.rest_username, 'password': cryptor.decode(\n                self.rest_password)}\n            self.init_http_head()\n            token_res = self.do_call(RestHandler.REST_AUTH_URL, data,\n                                     method='POST')\n            if token_res.json().get('msg') == consts.LOGIN_PASSWORD_ERR:\n                LOG.error(\"Login error, Obtaining the token is abnormal. \"\n                          \"status_code:%s, URL: %s\",\n                          token_res.status_code, RestHandler.REST_AUTH_URL)\n                raise exception.InvalidUsernameOrPassword(\n                    'Obtaining the token is abnormal')\n            if token_res.status_code != consts.SUCCESS_STATUS_CODE or not \\\n                    token_res.json().get('api_token'):\n                LOG.error(\"Login error, Obtaining the token is abnormal. \"\n                          \"status_code:%s, URL: %s\",\n                          token_res.status_code, RestHandler.REST_AUTH_URL)\n                raise exception.StorageBackendException(\n                    'Obtaining the token is abnormal')\n            session_res = self.do_call(RestHandler.REST_SESSION_URL,\n                                       token_res.json(), method='POST')\n            if session_res.status_code != consts.SUCCESS_STATUS_CODE or not \\\n                    session_res.json().get('username'):\n                LOG.error(\"Login error, Obtaining the session is abnormal.\"\n                          \"status_code:%s, URL: %s\", session_res.status_code,\n                          RestHandler.REST_SESSION_URL)\n                raise exception.StorageBackendException(\n                    'Obtaining the session is abnormal.')\n        except Exception as e:\n            LOG.error(\"Login error: %s\", six.text_type(e))\n            raise e\n        finally:\n            data = None\n            token_res = None\n\n    def logout(self):\n        res = self.do_call(RestHandler.REST_SESSION_URL, None, method='DELETE')\n        if res.status_code != consts.SUCCESS_STATUS_CODE\\\n                or not res.json().get('username'):\n            LOG.error(\"Logout error, Deleting a Token Exception.\"\n                      \"status_code:%s, URL: %s\",\n                      res.status_code, RestHandler.REST_SESSION_URL)\n            raise exception.StorageBackendException(res.text)\n\n    def rest_call(self, url, data=None, method='GET'):\n        result_json = None\n        res = self.do_call(url, data, method)\n        if res.status_code == consts.SUCCESS_STATUS_CODE:\n            result_json = res.json()\n        elif res.status_code == consts.PERMISSION_DENIED_STATUS_CODE:\n            self.login()\n            the_second_time_res = self.do_call(url, data, method)\n            if the_second_time_res.status_code == consts.SUCCESS_STATUS_CODE:\n                result_json = the_second_time_res.json()\n        return result_json\n\n    def get_volumes(self, url=REST_VOLUME_URL, data=None, volume_list=None,\n                    count=consts.DEFAULT_COUNT_GET_VOLUMES_INFO):\n        if volume_list is None:\n            volume_list = []\n        res = self.do_call(url, data, 'GET')\n        if res.status_code == consts.SUCCESS_STATUS_CODE:\n            result_json = res.json()\n            volume_list.extend(result_json)\n            next_token = res.headers.get(consts.CUSTOM_TOKEN)\n            if next_token:\n                url = '%s%s' % (RestHandler.REST_VOLUME_TOKEN_URL, next_token)\n                self.get_volumes(url, data, volume_list)\n        elif res.status_code == consts.PERMISSION_DENIED_STATUS_CODE:\n            self.login()\n            if count < consts.RE_LOGIN_TIMES:\n                count = count + consts.CONSTANT_ONE\n                self.get_volumes(url, data, volume_list, count)\n        return volume_list\n"
  },
  {
    "path": "delfin/drivers/utils/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/utils/performance_file/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/utils/performance_file/macro_san/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/utils/performance_file/svc/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/utils/performance_file/vnx_block/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/drivers/utils/rest_client.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2016 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport json\n\nimport requests\nimport six\nfrom oslo_log import log as logging\n\nfrom delfin import exception\nfrom delfin import ssl_utils\nfrom delfin.i18n import _\n\nLOG = logging.getLogger(__name__)\n\nSOCKET_TIMEOUT = 10\n\n\nclass RestClient(object):\n\n    def __init__(self, **kwargs):\n        rest_access = kwargs.get('rest')\n        if rest_access is None:\n            raise exception.InvalidInput('Input rest_access is missing')\n        self.rest_host = rest_access.get('host')\n        self.rest_port = rest_access.get('port')\n        self.rest_username = rest_access.get('username')\n        self.rest_password = rest_access.get('password')\n        self.san_address = 'https://%s:%s' % \\\n                           (self.rest_host, str(self.rest_port))\n        self.session = None\n        self.device_id = None\n\n        self.verify = kwargs.get('verify', False)\n        self.rest_auth_token = None\n\n    def init_http_head(self):\n        if self.session:\n            self.session.close()\n        self.session = requests.Session()\n        self.session.headers.update({\n            \"Connection\": \"keep-alive\",\n            'Accept': 'application/json',\n            \"Content-Type\": \"application/json\"})\n        if not self.verify:\n            self.session.verify = False\n        else:\n            LOG.debug(\"Enable certificate verification, ca_path: {0}\".format(\n                self.verify))\n            self.session.verify = self.verify\n        self.session.trust_env = False\n        self.session.mount(\"https://\",\n                           ssl_utils.get_host_name_ignore_adapter())\n\n    def do_call(self, url, data, method,\n                calltimeout=SOCKET_TIMEOUT):\n        if 'http' not in url:\n            if self.san_address:\n                url = '%s%s' % (self.san_address, url)\n\n        kwargs = {'timeout': calltimeout}\n        if data:\n            kwargs['data'] = json.dumps(data)\n\n        if method in ('POST', 'PUT', 'GET', 'DELETE'):\n            func = getattr(self.session, method.lower())\n        else:\n            msg = _(\"Request method %s is invalid.\") % method\n            LOG.error(msg)\n            raise exception.StorageBackendException(msg)\n        res = None\n        try:\n            res = func(url, **kwargs)\n        except requests.exceptions.ConnectTimeout as ct:\n            LOG.error('Connect Timeout error for url([{}]{}): {}'.format(\n                method, url, ct))\n            raise exception.InvalidIpOrPort()\n        except requests.exceptions.ReadTimeout as rt:\n            LOG.error('Read timed out error for url([{}]{}): {}'.format(\n                method, url, rt))\n            raise exception.StorageBackendException(six.text_type(rt))\n        except requests.exceptions.SSLError as e:\n            err_str = six.text_type(e)\n            LOG.error('SSLError for url([{}]{}): {}'.format(\n                method, url, err_str))\n            if 'certificate verify failed' in err_str:\n                raise exception.SSLCertificateFailed()\n            else:\n                raise exception.SSLHandshakeFailed()\n        except Exception as err:\n            LOG.error('Bad response from server for url([{}]{}): {}'.format(\n                method, url, err))\n            if 'WSAETIMEDOUT' in str(err):\n                raise exception.ConnectTimeout()\n            elif 'Failed to establish a new connection' in str(err):\n                raise exception.InvalidIpOrPort()\n            elif 'Read timed out' in str(err):\n                raise exception.StorageBackendException(six.text_type(err))\n            else:\n                raise exception.BadResponse()\n\n        return res\n"
  },
  {
    "path": "delfin/drivers/utils/ssh_client.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2011 OpenStack LLC\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\nimport time\n\nimport paramiko\nimport six\nfrom eventlet import pools\nfrom oslo_log import log as logging\nfrom paramiko.hostkeys import HostKeyEntry\n\nfrom delfin import cryptor\nfrom delfin import exception, utils\n\nLOG = logging.getLogger(__name__)\n\n\nclass SSHClient(object):\n    SOCKET_TIMEOUT = 10\n\n    def __init__(self, **kwargs):\n        ssh_access = kwargs.get('ssh')\n        if ssh_access is None:\n            raise exception.InvalidInput('Input ssh_access is missing')\n        self.ssh_host = ssh_access.get('host')\n        self.ssh_port = ssh_access.get('port')\n        self.ssh_username = ssh_access.get('username')\n        self.ssh_password = ssh_access.get('password')\n        self.ssh_pub_key_type = ssh_access.get('pub_key_type')\n        self.ssh_pub_key = ssh_access.get('pub_key')\n        self.ssh_conn_timeout = ssh_access.get('conn_timeout')\n        if self.ssh_conn_timeout is None:\n            self.ssh_conn_timeout = SSHClient.SOCKET_TIMEOUT\n\n    def connect(self):\n        self.ssh = paramiko.SSHClient()\n\n        if self.ssh_pub_key is None:\n            self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n        else:\n            host_key = '%s %s %s' % \\\n                       (self.ssh_host, self.ssh_pub_key_type, self.ssh_pub_key)\n            self.set_host_key(host_key)\n\n        self.ssh.connect(hostname=self.ssh_host, port=self.ssh_port,\n                         username=self.ssh_username,\n                         password=cryptor.decode(self.ssh_password),\n                         timeout=self.ssh_conn_timeout)\n\n    def set_host_key(self, host_key):\n        \"\"\"\n        Set public key,because input kwargs parameter host_key is string,\n        not a file path,we can not use load file to get public key,so we set\n        it as a string.\n        :param str host_key: the public key which as a string\n        \"\"\"\n        if (len(host_key) == 0) or (host_key[0] == \"#\"):\n            return\n        try:\n            e = HostKeyEntry.from_line(host_key)\n        except exception.SSHException:\n            return\n        if e is not None:\n            host_names = e.hostnames\n            for h in host_names:\n                if self.ssh._host_keys.check(h, e.key):\n                    e.hostnames.remove(h)\n            if len(e.hostnames):\n                self.ssh._host_keys._entries.append(e)\n\n    def exec_command(self, command_str):\n        result = None\n        try:\n            if command_str is not None:\n                if self.ssh is not None:\n                    stdin, stdout, stderr = self.ssh.exec_command(command_str)\n                    res, err = stdout.read(), stderr.read()\n                    re = res if res else err\n                    result = re.decode()\n        except Exception as e:\n            LOG.error(e)\n            result = e\n        return result\n\n    def close(self):\n        try:\n            if self.ssh is not None:\n                # Close connection\n                self.ssh.close()\n                self.ssh = None\n        except Exception as e:\n            LOG.error(e)\n\n    def do_exec(self, command_str):\n        \"\"\"Execute command\"\"\"\n        re = None\n        try:\n            if command_str is not None:\n                self.connect()\n                re = self.exec_command(command_str)\n        except paramiko.AuthenticationException as ae:\n            LOG.error('doexec Authentication error:{}'.format(ae))\n            raise exception.InvalidUsernameOrPassword()\n        except Exception as e:\n            LOG.error('doexec InvalidUsernameOrPassword error:{}'.format(e))\n            if 'WSAETIMEDOUT' in str(e):\n                raise exception.SSHConnectTimeout()\n            elif 'No authentication methods available' in str(e) \\\n                    or 'Authentication failed' in str(e):\n                raise exception.InvalidUsernameOrPassword()\n            elif 'not a valid RSA private key file' in str(e):\n                raise exception.InvalidPrivateKey()\n            elif 'not found in known_hosts' in str(e):\n                raise exception.SSHNotFoundKnownHosts(self.ssh_host)\n            else:\n                raise exception.SSHException()\n\n        finally:\n            self.close()\n        return re\n\n\nclass SSHPool(pools.Pool):\n    CONN_TIMEOUT = 60\n\n    def __init__(self, **kwargs):\n        ssh_access = kwargs.get('ssh')\n        if ssh_access is None:\n            raise exception.InvalidInput('Input ssh_access is missing')\n        self.ssh_host = ssh_access.get('host')\n        self.ssh_port = ssh_access.get('port')\n        self.ssh_username = ssh_access.get('username')\n        self.ssh_password = ssh_access.get('password')\n        self.ssh_pub_key_type = ssh_access.get('pub_key_type')\n        self.ssh_pub_key = ssh_access.get('pub_key')\n        self.ssh_conn_timeout = ssh_access.get('conn_timeout')\n        if self.ssh_conn_timeout is None:\n            self.ssh_conn_timeout = SSHPool.CONN_TIMEOUT\n        super(SSHPool, self).__init__(min_size=0, max_size=3)\n\n    def set_host_key(self, host_key, ssh):\n        \"\"\"\n        Set public key,because input kwargs parameter host_key is string,\n        not a file path,we can not use load file to get public key,so we set\n        it as a string.\n        :param str host_key: the public key which as a string\n        \"\"\"\n        if (len(host_key) == 0) or (host_key[0] == \"#\"):\n            return\n        try:\n            e = HostKeyEntry.from_line(host_key)\n        except exception.SSHException:\n            return\n        if e is not None:\n            host_names = e.hostnames\n            for h in host_names:\n                if ssh._host_keys.check(h, e.key):\n                    e.hostnames.remove(h)\n            if len(e.hostnames):\n                ssh._host_keys._entries.append(e)\n\n    def create(self):\n        ssh = paramiko.SSHClient()\n        try:\n            if self.ssh_pub_key is None:\n                ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n            else:\n                host_key = '%s %s %s' % \\\n                           (self.ssh_host, self.ssh_pub_key_type,\n                            self.ssh_pub_key)\n                self.set_host_key(host_key, ssh)\n\n            ssh.connect(hostname=self.ssh_host, port=self.ssh_port,\n                        username=self.ssh_username,\n                        password=cryptor.decode(self.ssh_password),\n                        timeout=self.ssh_conn_timeout)\n            transport = ssh.get_transport()\n            transport.set_keepalive(self.ssh_conn_timeout)\n            return ssh\n        except Exception as e:\n            err = six.text_type(e)\n            LOG.error(err)\n            if 'timed out' in err:\n                raise exception.InvalidIpOrPort()\n            elif 'No authentication methods available' in err \\\n                    or 'Authentication failed' in err:\n                raise exception.InvalidUsernameOrPassword()\n            elif 'not a valid RSA private key file' in err:\n                raise exception.InvalidPrivateKey()\n            elif 'not found in known_hosts' in err:\n                raise exception.SSHNotFoundKnownHosts(self.ssh_host)\n            else:\n                raise exception.SSHException(err)\n\n    def get(self):\n        \"\"\"Return an item from the pool, when one is available.\n\n        This may cause the calling greenthread to block. Check if a\n        connection is active before returning it. For dead connections\n        create and return a new connection.\n        \"\"\"\n        if self.free_items:\n            conn = self.free_items.popleft()\n            if conn:\n                if conn.get_transport().is_active():\n                    return conn\n                else:\n                    conn.close()\n                    self.current_size -= 1\n        if self.current_size < self.max_size:\n            try:\n                self.current_size += 1\n                created = self.create()\n            except Exception as e:\n                self.current_size -= 1\n                raise e\n\n            return created\n        return self.channel.get()\n\n    def remove(self, ssh):\n        \"\"\"Close an ssh client and remove it from free_items.\"\"\"\n        ssh.close()\n        if ssh in self.free_items:\n            self.free_items.remove(ssh)\n            if self.current_size > 0:\n                self.current_size -= 1\n\n    def put(self, conn):\n        if self.current_size > self.max_size:\n            conn.close()\n            self.current_size -= 1\n            return\n        super(SSHPool, self).put(conn)\n\n    def do_exec(self, command_str):\n        result = ''\n        try:\n            with self.item() as ssh:\n                utils.check_ssh_injection(command_str)\n                if command_str is not None and ssh is not None:\n                    stdin, stdout, stderr = ssh.exec_command(command_str)\n                    res, err = stdout.read(), stderr.read()\n                    re = res if res else err\n                    result = re.decode()\n        except paramiko.AuthenticationException as ae:\n            LOG.error('doexec Authentication error:{}'.format(ae))\n            raise exception.InvalidUsernameOrPassword()\n        except Exception as e:\n            err = six.text_type(e)\n            LOG.error(err)\n            if 'timed out' in err \\\n                    or 'SSH connect timeout' in err\\\n                    or 'Unable to connect to port' in err:\n                raise exception.ConnectTimeout()\n            elif 'No authentication methods available' in err \\\n                    or 'Authentication failed' in err \\\n                    or 'Invalid username or password' in err:\n                raise exception.InvalidUsernameOrPassword()\n            elif 'not a valid RSA private key file' in err \\\n                    or 'not a valid RSA private key' in err:\n                raise exception.InvalidPrivateKey()\n            else:\n                raise exception.SSHException(err)\n        if 'invalid command name' in result or 'login failed' in result or\\\n                'is not a recognized command' in result:\n            raise exception.StorageBackendException(result)\n        return result\n\n    def do_exec_shell(self, command_list, sleep_time=0.5):\n        result = ''\n        try:\n            with self.item() as ssh:\n                if command_list and ssh:\n                    channel = ssh.invoke_shell()\n                    for command in command_list:\n                        utils.check_ssh_injection(command)\n                        channel.send(command + '\\n')\n                        time.sleep(sleep_time)\n                    channel.send(\"exit\" + \"\\n\")\n                    channel.close()\n                    while True:\n                        resp = channel.recv(9999).decode('utf8')\n                        if not resp:\n                            break\n                        result += resp\n            if 'is not a recognized command' in result \\\n                    or 'Unknown command' in result:\n                raise exception.InvalidIpOrPort()\n        except paramiko.AuthenticationException as ae:\n            LOG.error('doexec Authentication error:{}'.format(ae))\n            raise exception.InvalidUsernameOrPassword()\n        except Exception as e:\n            err = six.text_type(e)\n            LOG.error(err)\n            if 'timed out' in err \\\n                    or 'SSH connect timeout' in err:\n                raise exception.SSHConnectTimeout()\n            elif 'No authentication methods available' in err \\\n                    or 'Authentication failed' in err \\\n                    or 'Invalid username or password' in err:\n                raise exception.InvalidUsernameOrPassword()\n            elif 'not a valid RSA private key file' in err \\\n                    or 'not a valid RSA private key' in err:\n                raise exception.InvalidPrivateKey()\n            elif 'Unable to connect to port' in err \\\n                    or 'Invalid ip or port' in err:\n                raise exception.InvalidIpOrPort()\n            else:\n                raise exception.SSHException(err)\n        return result\n"
  },
  {
    "path": "delfin/drivers/utils/tools.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport datetime\nimport os\nimport re\nimport time\n\nimport six\n\ntry:\n    import xml.etree.cElementTree as ET\nexcept ImportError:\n    import xml.etree.ElementTree as ET\n\nfrom scp import SCPClient\n\nfrom oslo_log import log as logging\nfrom oslo_utils import units\n\nLOG = logging.getLogger(__name__)\n\n\nclass Tools(object):\n\n    def time_str_to_timestamp(self, time_str, time_pattern):\n        \"\"\" Time str to time stamp conversion\n        \"\"\"\n        time_stamp = ''\n        if time_str:\n            time_array = time.strptime(time_str, time_pattern)\n            time_stamp = int(time.mktime(time_array) * units.k)\n        return time_stamp\n\n    def timestamp_to_time_str(self, time_stamp, time_pattern):\n        \"\"\" Time stamp to time str conversion\n        \"\"\"\n        time_str = ''\n        if time_stamp:\n            time_stamp = time_stamp / units.k\n            time_array = time.localtime(time_stamp)\n            time_str = time.strftime(time_pattern, time_array)\n        return time_str\n\n    @staticmethod\n    def timestamp_to_utc_time_str(time_stamp, time_pattern):\n        \"\"\" Time stamp to time str conversion\n        \"\"\"\n        time_str = ''\n        if time_stamp:\n            time_stamp = time_stamp / units.k\n            dateArray = datetime.datetime.utcfromtimestamp(time_stamp)\n            time_str = dateArray.strftime(time_pattern)\n        return time_str\n\n    @staticmethod\n    def change_capacity_to_bytes(unit):\n        unit = unit.upper()\n        if unit == 'TB':\n            res = units.Ti\n        elif unit == 'GB':\n            res = units.Gi\n        elif unit == 'MB':\n            res = units.Mi\n        elif unit == 'KB':\n            res = units.Ki\n        else:\n            res = 1\n        return int(res)\n\n    @staticmethod\n    def get_capacity_size(value):\n        capacity = 0\n        if value and value != '' and value != '-' and value != '0B':\n            if value.isdigit():\n                capacity = float(value)\n            else:\n                unit = value[-2:]\n                capacity = float(value[:-2]) * int(\n                    Tools.change_capacity_to_bytes(unit))\n        return capacity\n\n    @staticmethod\n    def split_value_map_list(value_info, map_list, is_mapping=False,\n                             is_alert=False, split=\":\"):\n        detail_array = value_info.split('\\r\\n')\n        value_map = {}\n        temp_key = ''\n        for detail in detail_array:\n            if detail:\n                string_info = detail.split(split + \" \")\n                key = string_info[0].replace(' ', '')\n                value = ''\n                if len(string_info) > 1 or is_mapping:\n                    for string in string_info[1:]:\n                        value = string.replace('\"\"', '')\n                    value_map[key] = value\n                if is_alert and key and len(string_info) > 1:\n                    temp_key = key\n                    continue\n                if is_alert and temp_key and 'entries' not in detail:\n                    if len(string_info) > 1:\n                        value_map[temp_key] += string_info[1]\n                    elif len(string_info) == 1:\n                        value_map[temp_key] += string_info[0]\n            else:\n                if value_map != {}:\n                    map_list.append(value_map)\n                value_map = {}\n        if value_map != {}:\n            map_list.append(value_map)\n        return map_list\n\n    @staticmethod\n    def get_numbers_in_brackets(source_info, pattern_str):\n        \"\"\"Get the contents in brackets through regular expressions.\n           source_info：Source data, example: \"collect time (1583012100)\"\n           pattern_str: regular expression. example：\"\\\\(\\\\d+\\\\)\"\n        \"\"\"\n        object_info = ''\n        object_infos = re.findall(pattern_str, source_info)\n        if object_infos:\n            object_info = object_infos[0].replace('(', '').replace(')', '')\n        return object_info\n\n    @staticmethod\n    def remove_file_with_same_type(file_name, file_path):\n        file_type = '%s_%s_%s' % (file_name.split('_')[0],\n                                  file_name.split('_')[1],\n                                  file_name.split('_')[2])\n        path_dir = os.listdir(file_path)\n        for file in path_dir:\n            if file_type in file:\n                local_file = '%s%s' % (file_path, file)\n                os.remove(local_file)\n\n    @staticmethod\n    def get_remote_file_to_xml(ssh, file, local_path, remote_path):\n        root_node = None\n        local_file = '%s%s' % (local_path, file)\n        try:\n            scp_client = SCPClient(ssh.get_transport(),\n                                   socket_timeout=15.0)\n            remote_file = '%s%s' % (remote_path, file)\n            scp_client.get(remote_file, local_path)\n            root_node = open(local_file).read()\n            root_node = ET.fromstring(root_node)\n        except Exception as e:\n            err_msg = \"Failed to copy statics file: %s\" % \\\n                      (six.text_type(e))\n            LOG.error(err_msg)\n        finally:\n            if os.path.exists(local_file):\n                Tools.remove_file_with_same_type(file, local_path)\n        return root_node\n"
  },
  {
    "path": "delfin/exception.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Delfin base exception handling.\n\nIncludes decorator for re-raising Delfin-type exceptions.\n\nSHOULD include dedicated exception logging.\n\n\"\"\"\n\nimport six\nimport webob.exc\n\nfrom oslo_log import log\n\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass ConvertedException(webob.exc.WSGIHTTPException):\n    def __init__(self, exception):\n        self.code = exception.code\n        self.title = ''\n        self.explanation = exception.msg\n        self.error_code = exception.error_code\n        self.error_args = exception.error_args\n        super(ConvertedException, self).__init__()\n\n\nclass DelfinException(Exception):\n    \"\"\"Base Delfin Exception\n\n    To correctly use this class, inherit from it and define\n    a 'msg_fmt' property. That msg_fmt will get printf'd\n    with the tuple arguments provided to the constructor.\n\n    \"\"\"\n    msg_fmt = _(\"An unknown exception occurred.\")\n    code = 500\n\n    def __init__(self, *args, **kwargs):\n        self.error_args = args\n        message = kwargs.get('message')\n        try:\n            if not message:\n                message = self.msg_fmt.format(*args)\n            else:\n                message = six.text_type(message)\n        except Exception:\n            LOG.error(\"Failed to format message: {0}\".format(args))\n            message = self.msg_fmt\n        self.msg = message\n        super(DelfinException, self).__init__(message)\n\n    @property\n    def error_code(self):\n        return self.__class__.__name__\n\n\nclass NotAuthorized(DelfinException):\n    msg_fmt = _(\"Not authorized.\")\n    code = 403\n\n\nclass Invalid(DelfinException):\n    msg_fmt = _(\"Unacceptable parameters.\")\n    code = 400\n\n\nclass BadRequest(Invalid):\n    msg_fmt = _('The server could not comply with the request since\\r\\n'\n                'it is either malformed or otherwise incorrect.\\r\\n')\n    code = 400\n\n\nclass MalformedRequestBody(Invalid):\n    msg_fmt = _(\"Malformed request body: {0}.\")\n\n\nclass MalformedRequestUrl(Invalid):\n    msg_fmt = _(\"Malformed request url.\")\n\n\nclass InvalidCredential(Invalid):\n    msg_fmt = _(\"The credentials are invalid.\")\n\n\nclass InvalidResults(Invalid):\n    msg_fmt = _(\"The results are invalid. {0}\")\n\n\nclass InvalidInput(Invalid):\n    msg_fmt = _(\"Invalid input received. {0}\")\n\n\nclass InvalidName(Invalid):\n    msg_fmt = _(\"An invalid 'name' value was provided. {0}\")\n\n\nclass InvalidContentType(Invalid):\n    msg_fmt = _(\"Invalid content type: {0}.\")\n\n\nclass StorageSerialNumberMismatch(Invalid):\n    msg_fmt = _(\"Storage serial number mismatch. {0}\")\n\n\nclass StorageAlreadyExists(Invalid):\n    msg_fmt = _(\"Storage already exists.\")\n\n\nclass InvalidSNMPConfig(Invalid):\n    msg_fmt = _(\"Invalid SNMP configuration: {0}\")\n\n\nclass NotFound(DelfinException):\n    msg_fmt = _(\"Resource could not be found.\")\n    code = 404\n\n\nclass NoSuchAction(NotFound):\n    msg_fmt = _(\"There is no such action: {0}\")\n\n\nclass AccessInfoNotFound(NotFound):\n    msg_fmt = _(\"Access information for storage {0} could not be found.\")\n\n\nclass AlertSourceNotFound(NotFound):\n    msg_fmt = _(\"Alert source for storage {0} could not be found.\")\n\n\nclass AlertSourceNotFoundWithHost(NotFound):\n    msg_fmt = _(\"Alert source could not be found with host {0}.\")\n\n\nclass SNMPConnectionFailed(BadRequest):\n    msg_fmt = _(\"Connection to SNMP server failed: {0}\")\n\n\nclass StorageNotFound(NotFound):\n    msg_fmt = _(\"Storage {0} could not be found.\")\n\n\nclass StorageBackendNotFound(NotFound):\n    msg_fmt = _(\"Storage backend could not be found.\")\n\n\nclass StoragePoolNotFound(NotFound):\n    msg_fmt = _(\"Storage pool {0} could not be found.\")\n\n\nclass VolumeNotFound(NotFound):\n    msg_fmt = _(\"Volume {0} could not be found.\")\n\n\nclass StorageHostInitiatorNotFound(NotFound):\n    msg_fmt = _(\"Storage host initiator {0} could not be found.\")\n\n\nclass StorageHostNotFound(NotFound):\n    msg_fmt = _(\"Storage host {0} could not be found.\")\n\n\nclass StorageHostGroupNotFound(NotFound):\n    msg_fmt = _(\"Storage host group {0} could not be found.\")\n\n\nclass PortGroupNotFound(NotFound):\n    msg_fmt = _(\"Port group {0} could not be found.\")\n\n\nclass VolumeGroupNotFound(NotFound):\n    msg_fmt = _(\"Volume group {0} could not be found.\")\n\n\nclass MaskingViewNotFound(NotFound):\n    msg_fmt = _(\"Masking View {0} could not be found.\")\n\n\nclass StorageHostGrpHostRelNotFound(NotFound):\n    msg_fmt = _(\"Storage Host Group Host Relation {0} could not be found.\")\n\n\nclass PortGrpPortRelNotFound(NotFound):\n    msg_fmt = _(\"Port Group Port Relation {0} could not be found.\")\n\n\nclass VolGrpVolRelationNotFound(NotFound):\n    msg_fmt = _(\"Volume Group Volume Relation {0} could not be found.\")\n\n\nclass ControllerNotFound(NotFound):\n    msg_fmt = _(\"Controller {0} could not be found.\")\n\n\nclass ControllerListNotFound(NotFound):\n    msg_fmt = _(\"Controller List for {0} could not be found.\")\n\n\nclass PortNotFound(NotFound):\n    msg_fmt = _(\"Port {0} could not be found.\")\n\n\nclass PortListNotFound(NotFound):\n    msg_fmt = _(\"Port List for {0} could not be found.\")\n\n\nclass DiskNotFound(NotFound):\n    msg_fmt = _(\"Disk {0} could not be found.\")\n\n\nclass FilesystemNotFound(NotFound):\n    msg_fmt = _(\"Filesystem {0} could not be found.\")\n\n\nclass QtreeNotFound(NotFound):\n    msg_fmt = _(\"Qtree {0} could not be found.\")\n\n\nclass QuotaNotFound(NotFound):\n    msg_fmt = _(\"Quota {0} could not be found.\")\n\n\nclass ShareNotFound(NotFound):\n    msg_fmt = _(\"Share {0} could not be found.\")\n\n\nclass StorageDriverNotFound(NotFound):\n    msg_fmt = _(\"Storage driver '{0}'could not be found.\")\n\n\nclass TaskNotFound(NotFound):\n    msg_fmt = _(\"Task {0} could not be found.\")\n\n\nclass FailedTaskNotFound(NotFound):\n    msg_fmt = _(\"Failed task {0} could not be found.\")\n\n\nclass ConfigNotFound(NotFound):\n    msg_fmt = _(\"Could not find config at {0}.\")\n\n\nclass PasteAppNotFound(NotFound):\n    msg_fmt = _(\"Could not load paste app '{0}' from {1}.\")\n\n\nclass StorageBackendException(DelfinException):\n    msg_fmt = _(\"Exception from Storage Backend: {0}.\")\n\n\nclass SSHException(DelfinException):\n    msg_fmt = _(\"Exception in SSH protocol negotiation or logic. {0}\")\n\n\nclass SSHInjectionThreat(DelfinException):\n    msg_fmt = _(\"SSH command injection detected: {0}.\")\n\n\n# Tooz locking\nclass LockCreationFailed(DelfinException):\n    msg_fmt = _('Unable to create lock. Coordination backend not started.')\n\n\nclass LockAcquisitionFailed(DelfinException):\n    msg_fmt = _('Lock acquisition failed.')\n\n\nclass DuplicateExtension(DelfinException):\n    msg_fmt = _('Found duplicate extension: {0}.')\n\n\nclass ImproperIPVersion(DelfinException):\n    msg_fmt = _(\"Provided improper IP version {0}.\")\n\n\nclass ConnectTimeout(DelfinException):\n    msg_fmt = _(\"Connect timeout.\")\n    code = 500\n\n\nclass InvalidUsernameOrPassword(DelfinException):\n    msg_fmt = _(\"Invalid username or password.\")\n    code = 400\n\n\nclass BadResponse(Invalid):\n    msg_fmt = _('Bad response from server')\n    code = 500\n\n\nclass InvalidPrivateKey(DelfinException):\n    msg_fmt = _(\"not a valid RSA private key.\")\n    code = 400\n\n\nclass SSHConnectTimeout(DelfinException):\n    msg_fmt = _(\"SSH connect timeout.\")\n    code = 500\n\n\nclass SSHNotFoundKnownHosts(NotFound):\n    msg_fmt = _(\"{0} not found in known_hosts.\")\n    code = 400\n\n\nclass StorageClearAlertFailed(DelfinException):\n    msg_fmt = _(\"Failed to clear alert. Reason: {0}.\")\n\n\nclass StorageListAlertFailed(DelfinException):\n    msg_fmt = _(\"Failed to list alerts. Reason: {0}.\")\n\n\nclass HTTPConnectionTimeout(DelfinException):\n    msg_fmt = _(\"HTTP connection timeout: {0}.\")\n\n\nclass InvalidCAPath(DelfinException):\n    msg_fmt = _(\"Invalid CA path: {0}.\")\n\n\nclass StoragePerformanceCollectionFailed(DelfinException):\n    msg_fmt = _(\"Failed to collect performance metrics. Reason: {0}.\")\n\n\nclass SSLCertificateFailed(Invalid):\n    msg_fmt = _(\"SSL Certificate Failed.\")\n    code = 400\n\n\nclass SSLHandshakeFailed(Invalid):\n    msg_fmt = _(\"SSL handshake failure.\")\n\n\nclass StorageIsSyncing(Invalid):\n    msg_fmt = _(\"Storage {0} is syncing now, please try again later.\")\n\n\nclass InvalidIpOrPort(DelfinException):\n    msg_fmt = _(\"Invalid ip or port.\")\n    code = 400\n\n\nclass InvalidStorageCapability(Invalid):\n    msg_fmt = _(\"Invalid capability response: {0}\")\n    code = 500\n\n\nclass StorageCapabilityNotSupported(Invalid):\n    msg_fmt = _(\"Capability feature not supported by storage\")\n    code = 501\n\n\nclass EmptyResourceMetrics(DelfinException):\n    msg_fmt = _(\"Empty resource metric in capabilities\")\n    code = 501\n\n\nclass TelemetryTaskExecError(DelfinException):\n    msg_fmt = _(\"Failure in telemetry task execution\")\n\n\nclass ComponentNotFound(NotFound):\n    msg_fmt = _(\"Component {0} could not be found.\")\n\n\nclass IncompleteTrapInformation(DelfinException):\n    msg_fmt = _(\"Incomplete trap information.\"\n                \"Storage {0} alert information needs to be synchronized.\")\n\n\nclass StorageMaxUserCountException(DelfinException):\n    msg_fmt = _(\n        \"Exception from storage of users has reached the upper limit: {0}.\")\n"
  },
  {
    "path": "delfin/exporter/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/exporter/base_exporter.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nimport six\nfrom stevedore import extension\n\nfrom delfin import exception\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\nexporter_opts = [\n    cfg.ListOpt('alert_exporters',\n                default=['AlertExporterExample'],\n                help=\"Which exporters for alert push.\"),\n    cfg.ListOpt('performance_exporters',\n                default=['PerformanceExporterExample'],\n                help=\"Which exporters for performance push.\"),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(exporter_opts)\n\n\nclass BaseExporter(object):\n    \"\"\"Base class for data exporter.\"\"\"\n\n    def dispatch(self, ctxt, data):\n        \"\"\"Dispatch data to the third platforms.\n            :param ctxt: delfin.RequestContext\n            :param data: The data to be pushed, it's a list with dict item.\n            :type data: list\n        \"\"\"\n        raise NotImplementedError()\n\n\nclass BaseManager(BaseExporter):\n    def __init__(self, namespace):\n        self.extension_manager = extension.ExtensionManager(namespace)\n        self.exporters = self._get_exporters()\n\n    def dispatch(self, ctxt, data):\n        if not isinstance(data, (list, tuple)):\n            data = [data]\n        for exporter in self.exporters:\n            try:\n                exporter.dispatch(ctxt, data)\n            except exception.DelfinException as e:\n                err_msg = _(\"Failed to export data (%s).\") % e.msg\n                LOG.exception(err_msg)\n            except Exception as e:\n                err_msg = six.text_type(e)\n                LOG.exception(err_msg)\n\n    def _get_exporters(self):\n        \"\"\"Get exporters from configuration file which\n        shall be supported in entry points.\n        \"\"\"\n        supported_exporters = self._get_supported_exporters()\n        configured_exporters = self._get_configured_exporters()\n        return [cls() for cls in supported_exporters\n                if cls.__name__ in configured_exporters]\n\n    def _get_supported_exporters(self):\n        \"\"\"Get all supported exporters from entry points file.\"\"\"\n        return [ext.plugin for ext in self.extension_manager]\n\n    def _get_configured_exporters(self):\n        \"\"\"Get exporters from configuration file.\"\"\"\n        raise NotImplementedError()\n\n\nclass AlertExporterManager(BaseManager):\n    NAMESPACE = 'delfin.alert.exporters'\n\n    def __init__(self):\n        super(AlertExporterManager, self).__init__(self.NAMESPACE)\n\n    def _get_configured_exporters(self):\n        return CONF.alert_exporters\n\n\nclass PerformanceExporterManager(BaseManager):\n    NAMESPACE = 'delfin.performance.exporters'\n\n    def __init__(self):\n        super(PerformanceExporterManager, self).__init__(self.NAMESPACE)\n\n    def _get_configured_exporters(self):\n        return CONF.performance_exporters\n"
  },
  {
    "path": "delfin/exporter/example.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log\nfrom delfin.exporter import base_exporter\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertExporterExample(base_exporter.BaseExporter):\n    def dispatch(self, ctxt, data):\n        LOG.debug(\"AlertExporterExample, report data: %s\" % data)\n\n\nclass PerformanceExporterExample(base_exporter.BaseExporter):\n    def dispatch(self, ctxt, data):\n        LOG.debug(\"PerformanceExporterExample, report data: %s\" % data)\n"
  },
  {
    "path": "delfin/exporter/kafka/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/exporter/kafka/exporter.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.exporter import base_exporter\nfrom delfin.exporter.kafka import kafka\n\n\nclass AlertExporterKafka(base_exporter.BaseExporter):\n    def dispatch(self, ctxt, data):\n        pass\n\n\nclass PerformanceExporterKafka(base_exporter.BaseExporter):\n    def dispatch(self, ctxt, data):\n        kafka_obj = kafka.KafkaExporter()\n        kafka_obj.push_to_kafka(data)\n"
  },
  {
    "path": "delfin/exporter/kafka/kafka.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom kafka import KafkaProducer\n\n\"\"\"\"\nThe metrics received from driver is should be in this format\n\nstorage_metrics = [Metric(name='response_time',\n     labels={'storage_id': '1', 'resource_type': 'array'},\n     values={16009988175: 74.10422968341392, 16009988180: 74.10422968341392}),\n     Metric(name='throughput',\n     labels={'storage_id': '1', 'resource_type': 'array'},\n     values={16009988188: 68.57886608255163, 16009988190: 68.57886608255163}),\n     Metric(name='read_throughput',\n     labels={'storage_id': '1', 'resource_type': 'array'},\n     values={1600998817585: 76.60140757331934}),\n     Metric(name='write_throughput',\n     labels={'storage_id': '1', 'resource_type': 'array'},\n     values={1600998817585: 20.264160223426305})]\n\n# metrics and its unit we do support\nunit_of_metric = {'response_time': 'ms', 'throughput': 'IOPS',\n                  'read_throughput': 'IOPS', 'write_throughput': 'IOPS',\n                  'bandwidth': 'MBps', 'read_bandwidth': 'MBps',\n                  'write_bandwidth': 'MBps'\n                  }\n\"\"\"\n\nLOG = log.getLogger(__name__)\nCONF = cfg.CONF\n\nkafka_opts = [\n    cfg.StrOpt('kafka_topic_name', default='delfin-kafka',\n               help='The topic of kafka'),\n    cfg.StrOpt('kafka_ip', default='localhost',\n               help='The kafka server IP'),\n    cfg.StrOpt('kafka_port', default='9092',\n               help='The kafka server port'),\n]\n\nCONF.register_opts(kafka_opts, \"KAFKA_EXPORTER\")\nkafka = CONF.KAFKA_EXPORTER\n\n\nclass KafkaExporter(object):\n\n    def push_to_kafka(self, data):\n        topic = kafka.kafka_topic_name\n        ip = kafka.kafka_ip\n        port = kafka.kafka_port\n        bootstrap_server = ip + ':' + port\n        producer = KafkaProducer(\n            bootstrap_servers=[bootstrap_server],\n            value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n\n        producer.send(topic, value=data)\n"
  },
  {
    "path": "delfin/exporter/prometheus/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/exporter/prometheus/alert_manager.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport requests\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nLOG = log.getLogger(__name__)\nCONF = cfg.CONF\nalert_mngr_opts = [\n\n    cfg.StrOpt('alert_manager_host', default='localhost',\n               help='The prometheus alert manager host'),\n    cfg.StrOpt('alert_manager_port', default='9093',\n               help='The prometheus alert manager port'),\n]\n\nCONF.register_opts(alert_mngr_opts, \"PROMETHEUS_ALERT_MANAGER_EXPORTER\")\nalert_cfg = CONF.PROMETHEUS_ALERT_MANAGER_EXPORTER\n\n\nclass PrometheusAlertExporter(object):\n    alerts = []\n    model_key = ['alert_id', 'alert_name', 'sequence_number', 'category',\n                 'severity', 'type', 'location', 'recovery_advice',\n                 'storage_id', 'storage_name', 'vendor',\n                 'model', 'serial_number', 'occur_time']\n\n    def push_prometheus_alert(self, alerts):\n\n        host = alert_cfg.alert_manager_host\n        port = alert_cfg.alert_manager_port\n        for alert in alerts:\n            dict = {}\n            dict[\"labels\"] = {}\n            dict[\"annotations\"] = {}\n            for key in self.model_key:\n                dict[\"labels\"][key] = str(alert.get(key))\n\n            dict[\"annotations\"][\"summary\"] = alert.get(\"description\")\n\n            self.alerts.append(dict)\n            try:\n                response = requests.post('http://' + host + \":\" + port +\n                                         '/api/v1/alerts',\n                                         json=self.alerts)\n                if response.status_code != 200:\n                    LOG.error(\"POST request failed for alert %s \",\n                              alert.get('alert_id'))\n            except Exception:\n                LOG.error(\"Exporting alert to alert manager has been failed \"\n                          \"for alert %s \", alert.get('alert_id'))\n"
  },
  {
    "path": "delfin/exporter/prometheus/exporter.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log\nfrom delfin.exporter import base_exporter\nfrom delfin.exporter.prometheus import prometheus, alert_manager\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertExporterPrometheus(base_exporter.BaseExporter):\n    def dispatch(self, ctxt, data):\n        alert_manager_obj = alert_manager.PrometheusAlertExporter()\n        alert_manager_obj.push_prometheus_alert(data)\n\n\nclass PerformanceExporterPrometheus(base_exporter.BaseExporter):\n    def dispatch(self, ctxt, data):\n        prometheus_obj = prometheus.PrometheusExporter()\n        prometheus_obj.push_to_prometheus(data)\n"
  },
  {
    "path": "delfin/exporter/prometheus/exporter_server.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\n\nimport six\nfrom flask import Flask\nfrom oslo_config import cfg\nimport sys\nfrom oslo_log import log\n\nLOG = log.getLogger(__name__)\n\napp = Flask(__name__)\n\ngrp = cfg.OptGroup('PROMETHEUS_EXPORTER')\nMETRICS_CACHE_DIR = '/var/lib/delfin/metrics'\nprometheus_opts = [\n    cfg.StrOpt('metric_server_ip', default='0.0.0.0',\n               help='The exporter server host  ip'),\n    cfg.IntOpt('metric_server_port', default=8195,\n               help='The exporter server port'),\n    cfg.StrOpt('metrics_dir', default=METRICS_CACHE_DIR,\n\n               help='The temp directory to keep incoming metrics'),\n]\ncfg.CONF.register_opts(prometheus_opts, group=grp)\ncfg.CONF(sys.argv[1:])\n\n\n@app.route(\"/metrics\", methods=['GET'])\ndef getfile():\n    \"\"\"Read the earliest metric file from the\n    available *.prom files\n    \"\"\"\n    try:\n        if not os.path.exists(cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir):\n            LOG.error('No metrics cache folder exists')\n            return ''\n        os.chdir(cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir)\n    except OSError as e:\n        LOG.error('Error opening metrics folder')\n        raise Exception(e)\n    try:\n        files = glob.glob(\"*.prom\")\n        data = ''\n        if files:\n            files.sort(key=os.path.getmtime)\n            # Read only earliest file in one scrape to /metrics\n            file_name = os.path.join(cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir,\n                                     files[0])\n\n            with open(file_name, \"r\") as f:\n                data = f.read()\n            # Remove a metric file after reading it\n            LOG.info('Metric file %s has been read', file_name)\n            os.remove(file_name)\n            LOG.info('Metric file %s has been deleted', file_name)\n    except Exception as e:\n        msg = six.text_type(e)\n        LOG.error('Error while reading metrics %s', msg)\n        return ''\n\n    return data\n\n\nif __name__ == '__main__':\n    app.run(host=cfg.CONF.PROMETHEUS_EXPORTER.metric_server_ip,\n            port=cfg.CONF.PROMETHEUS_EXPORTER.metric_server_port)\n"
  },
  {
    "path": "delfin/exporter/prometheus/prometheus.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport datetime\nimport glob\nimport os\nimport six\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nLOG = log.getLogger(__name__)\n\ngrp = cfg.OptGroup('PROMETHEUS_EXPORTER')\nMETRICS_CACHE_DIR = '/var/lib/delfin/metrics'\n# Metrics file retention time\nRETENTION_TIME_SEC = 3600\nprometheus_opts = [\n    cfg.StrOpt('metrics_dir', default=METRICS_CACHE_DIR,\n\n               help='The temp directory to keep incoming metrics'),\n    cfg.StrOpt('timezone',\n               default='local',\n               help='time zone of prometheus server '\n               ),\n]\ncfg.CONF.register_opts(prometheus_opts, group=grp)\n\n\"\"\"\"\nThe metrics received from driver is should be in this format\nstorage_metrics = [Metric(name='response_time',\n     labels={'storage_id': '1', 'resource_type': 'array'},\n     values={16009988175: 74.10422968341392, 16009988180: 74.10422968341392}),\n     Metric(name='throughput',\n     labels={'storage_id': '1', 'resource_type': 'array'},\n     values={16009988188: 68.57886608255163, 16009988190: 68.57886608255163}),\n     Metric(name='read_throughput',\n     labels={'storage_id': '1', 'resource_type': 'array'},\n     values={1600998817585: 76.60140757331934}),\n     Metric(name='write_throughput',\n     labels={'storage_id': '1', 'resource_type': 'array'},\n     values={1600998817585: 20.264160223426305})]\n\"\"\"\n\n\nclass PrometheusExporter(object):\n\n    def __init__(self):\n        self.metrics_dir = cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir\n\n    def check_metrics_dir_exists(self, directory):\n        try:\n            if not os.path.exists(directory):\n                os.makedirs(directory)\n            return True\n        except Exception as e:\n            msg = six.text_type(e)\n            LOG.error(\"Error while creating metrics directory. Reason: %s\",\n                      msg)\n            return False\n\n    # Print metrics in Prometheus format.\n    def _write_to_prometheus_format(self, f, metric,\n                                    labels, prom_labels, values):\n        f.write(\"# HELP %s  metric for resource %s and instance %s\\n\"\n                % (metric, labels.get('resource_type'),\n                   labels.get('resource_id')))\n        f.write(\"# TYPE %s gauge\\n\" % metric)\n\n        for timestamp, value in values.items():\n            f.write(\"%s{%s} %f %d\\n\" % (metric, prom_labels,\n                                        value, timestamp))\n\n    def get_file_age(self, path):\n        # Getting ctime of the file/folder\n        # Time will be in seconds\n        ctime = os.stat(path).st_ctime\n        # Returning the time\n        return ctime\n\n    def clean_old_metric_files(self, metrics_dir):\n        os.chdir(metrics_dir)\n        files = glob.glob(\"*.prom\")\n        for file in files:\n            file_age = datetime.datetime.now().timestamp() \\\n                - self.get_file_age(file)\n            if file_age >= RETENTION_TIME_SEC:\n                LOG.info(\"Removing metric file %s\"\n                         \" as it crossed the retention period\", file)\n                os.remove(file)\n\n    def push_to_prometheus(self, storage_metrics):\n        if not self.check_metrics_dir_exists(self.metrics_dir):\n            return\n        try:\n            self.clean_old_metric_files(self.metrics_dir)\n        except Exception:\n            LOG.error('Error while cleaning old metrics files')\n        time_stamp = str(datetime.datetime.now().timestamp())\n        temp_file_name = os.path.join(self.metrics_dir,\n                                      time_stamp + \".prom.temp\")\n        actual_file_name = os.path.join(self.metrics_dir,\n                                        time_stamp + \".prom\")\n        # make a temp  file with current timestamp\n        with open(temp_file_name, \"w\") as f:\n            for metric in storage_metrics:\n                name = metric.name\n                labels = metric.labels\n                values = metric.values\n                storage_id = labels.get('storage_id')\n                storage_name = labels.get('name')\n                storage_sn = labels.get('serial_number')\n                resource_type = labels.get('resource_type')\n                resource_id = labels.get('resource_id')\n                unit = labels.get('unit')\n                m_type = labels.get('type', 'RAW')\n                value_type = labels.get('value_type', 'gauge')\n                prom_labels = (\n                    \"storage_id=\\\"%s\\\",\"\n                    \"storage_name=\\\"%s\\\",\"\n                    \"storage_sn=\\\"%s\\\",\"\n                    \"resource_type=\\\"%s\\\",\"\n                    \"resource_id=\\\"%s\\\",\"\n                    \"type=\\\"%s\\\",\"\n                    \"unit=\\\"%s\\\",\"\n                    \"value_type=\\\"%s\\\"\" %\n                    (storage_id, storage_name, storage_sn, resource_type,\n                        resource_id,\n                        m_type, unit, value_type))\n                name = labels.get('resource_type') + '_' + name\n                self._write_to_prometheus_format(f, name, labels, prom_labels,\n                                                 values)\n        # this is done so that the exporter server never see an incomplete file\n        try:\n            f.close()\n            os.renames(temp_file_name, actual_file_name)\n            LOG.info('A new metric file %s has been generated',\n                     actual_file_name)\n        except Exception:\n            LOG.error('Error while renaming the temporary metric file')\n"
  },
  {
    "path": "delfin/i18n.py",
    "content": "# Copyright 2014 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"oslo.i18n integration module.\n\nSee https://docs.openstack.org/oslo.i18n/latest/user/usage.html .\n\n\"\"\"\n\nimport oslo_i18n\n\nDOMAIN = 'delfin'\n\n_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)\n\n# The primary translation function using the well-known name \"_\"\n_ = _translators.primary\n\n\ndef translate(value, user_locale):\n    return oslo_i18n.translate(value, user_locale)\n\n\ndef get_available_languages():\n    return oslo_i18n.get_available_languages(DOMAIN)\n"
  },
  {
    "path": "delfin/leader_election/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/leader_election/distributor/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/leader_election/distributor/perf_job_manager.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom oslo_log import log\n\nfrom delfin import manager\nfrom delfin.leader_election.distributor import task_distributor\n\nLOG = log.getLogger(__name__)\n\n\nclass PerfJobManager(manager.Manager):\n    \"\"\"Generate job to job distributor\"\"\"\n\n    RPC_API_VERSION = '1.0'\n\n    def __init__(self, service_name=None, *args, **kwargs):\n        super(PerfJobManager, self).__init__(*args, **kwargs)\n\n    def add_new_job(self, context, task_id):\n        distributor = task_distributor.TaskDistributor(context)\n        distributor.distribute_new_job(task_id)\n"
  },
  {
    "path": "delfin/leader_election/distributor/task_distributor.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport six\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom delfin import db\nfrom delfin.coordination import ConsistentHashing\nfrom delfin.task_manager import metrics_rpcapi as task_rpcapi\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\n\n\nclass TaskDistributor(object):\n    def __init__(self, ctx):\n        self.ctx = ctx\n        self.task_rpcapi = task_rpcapi.TaskAPI()\n\n    def distribute_new_job(self, task_id):\n        partitioner = ConsistentHashing()\n        partitioner.start()\n        executor = partitioner.get_task_executor(task_id)\n        try:\n            db.task_update(self.ctx, task_id, {'executor': executor})\n            LOG.info('Distribute a new job, id: %s' % task_id)\n            self.task_rpcapi.assign_job(self.ctx, task_id, executor)\n        except Exception as e:\n            LOG.error('Failed to distribute the new job, reason: %s',\n                      six.text_type(e))\n            raise e\n\n    def distribute_failed_job(self, failed_task_id, executor):\n\n        try:\n            db.failed_task_update(self.ctx, failed_task_id,\n                                  {'executor': executor})\n            LOG.info('Distribute a failed job, id: %s' % failed_task_id)\n            self.task_rpcapi.assign_failed_job(self.ctx, failed_task_id,\n                                               executor)\n        except Exception as e:\n            LOG.error('Failed to distribute failed job, reason: %s',\n                      six.text_type(e))\n            raise e\n"
  },
  {
    "path": "delfin/leader_election/factory.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.leader_election.tooz.callback import ToozLeaderElectionCallback\nfrom delfin.leader_election.tooz.leader_elector import Elector\nfrom delfin.task_manager.scheduler.schedule_manager import SchedulerManager\n\nLEADER_ELECTION_KEY = \"delfin-performance-metric-collection\"\n\n\nclass LeaderElectionFactory:\n\n    @staticmethod\n    def construct_elector(plugin, leader_key=None):\n        \"\"\"\n        Construct leader election elector based on specified plugin\n\n        :param string plugin: required plugin for leader election\n        \"\"\"\n        # Maintain a unique key for metric collection leader election\n        leader_election_key = LEADER_ELECTION_KEY\n        if leader_key:\n            leader_election_key = leader_key\n\n        scheduler_mgr = SchedulerManager()\n\n        if plugin == \"tooz\":\n            scheduler_mgr.start()\n            # Create callback object\n            callback = ToozLeaderElectionCallback.register(\n                on_leading_callback=scheduler_mgr.schedule_boot_jobs,\n                on_stop_callback=scheduler_mgr.stop)\n\n            return Elector(callback, leader_election_key)\n        else:\n            raise ValueError(plugin)\n"
  },
  {
    "path": "delfin/leader_election/interface.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Leader election interface defined\"\"\"\n\nimport six\nimport abc\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass LeaderCallback:\n\n    def __init__(self):\n        self.on_started_leading_callback = None\n        \"\"\"on_started_leading is called when elected as leader\"\"\"\n\n        self.on_stopped_leading_callback = None\n        \"\"\"on_stopped_leading is called when Leader give up its leadership\"\"\"\n\n    @abc.abstractmethod\n    def on_started_leading(self, *args, **kwargs):\n        pass\n\n    @abc.abstractmethod\n    def on_stopped_leading(self, *args, **kwargs):\n        pass\n\n    @classmethod\n    def register(cls, on_leading_callback, on_stop_callback):\n        callback = cls()\n        callback.on_started_leading_callback = on_leading_callback\n        callback.on_stopped_leading_callback = on_stop_callback\n        return callback\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass LeaderElector:\n\n    def __init__(self, callbacks, election_key):\n        self.callbacks = callbacks\n        self.election_key = election_key\n\n    @abc.abstractmethod\n    def run(self):\n        \"\"\"kick start leader election.\n        Invoke callback.on_started_leading callback once elected as leader\n        Invoke callback.on_stopped_leading callback once lose leadership\n\n        run returns once leader losses its leadership\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def cleanup(self):\n        \"\"\"Cleanup leader election residue\n        \"\"\"\n        pass\n"
  },
  {
    "path": "delfin/leader_election/tooz/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/leader_election/tooz/callback.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.leader_election.interface import LeaderCallback\n\n\nclass ToozLeaderElectionCallback(LeaderCallback):\n\n    def on_started_leading(self, *args, **kwargs):\n        return self.on_started_leading_callback()\n\n    def on_stopped_leading(self, *args, **kwargs):\n        return self.on_stopped_leading_callback()\n"
  },
  {
    "path": "delfin/leader_election/tooz/leader_elector.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Leader elector is leased based leader election\"\"\"\n\nimport threading\n\nfrom oslo_log import log\nfrom oslo_utils import timeutils\n\nfrom delfin.coordination import LeaderElectionCoordinator\nfrom delfin.leader_election.interface import LeaderElector\n\nLOG = log.getLogger(__name__)\n\n\nclass Elector(LeaderElector):\n\n    def __init__(self, callbacks, leader_election_key):\n        key = leader_election_key.encode('ascii')\n        super(Elector, self).__init__(callbacks, key)\n\n        self._coordinator = None\n        self.leader = False\n        self._stop = threading.Event()\n        self._runner = None\n\n    def run(self):\n        if self._coordinator:\n            return\n\n        self._stop.clear()\n\n        self._coordinator = LeaderElectionCoordinator()\n        self._coordinator.start()\n\n        self._coordinator.ensure_group(self.election_key)\n        self._coordinator.join_group()\n\n        self._coordinator. \\\n            register_on_start_leading_callback(self.\n                                               callbacks.on_started_leading)\n\n        # Register internal callback to notify being a leader\n        self._coordinator. \\\n            register_on_start_leading_callback(self.set_leader_callback)\n\n        while not self._stop.is_set():\n            with timeutils.StopWatch() as w:\n                LOG.debug(\"sending heartbeats for leader election\")\n                wait_until_next_beat = self._coordinator.send_heartbeat()\n\n            ran_for = w.elapsed()\n            has_to_sleep_for = wait_until_next_beat - ran_for\n            if has_to_sleep_for < 0:\n                LOG.warning(\n                    \"Heart beating took too long to execute (it ran for\"\n                    \" %0.2f seconds which is %0.2f seconds longer than\"\n                    \" the next heartbeat idle time). This may cause\"\n                    \" timeouts (in locks, leadership, ...) to\"\n                    \" happen (which will not end well).\", ran_for,\n                    ran_for - wait_until_next_beat)\n\n            # Check if coordinator is still a leader\n            if self.leader and not self._coordinator.is_still_leader():\n                self.on_stopped_leading()\n                self.leader = False\n                return\n            self._coordinator.start_leader_watch()\n\n            if self.leader:\n                # Adjust time for leader\n                has_to_sleep_for = has_to_sleep_for / 2\n\n            LOG.debug('resting after leader watch as leader=%(leader)s '\n                      'for heartbeat timeout of %(timeout)s sec',\n                      {'timeout': has_to_sleep_for, 'leader': self.leader})\n\n            self._stop.wait(has_to_sleep_for)\n\n    def set_leader_callback(self, *args, **kwargs):\n        self.leader = True\n\n    def cleanup(self):\n        if not self._stop.is_set():\n            self._stop.set()\n\n        if self.leader:\n            self.on_stopped_leading()\n            self.leader = False\n\n        if self._coordinator:\n            self._coordinator.stop()\n            self._coordinator = None\n\n    def on_stopped_leading(self):\n        self.callbacks.on_stopped_leading()\n"
  },
  {
    "path": "delfin/manager.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Base Manager class.\n\nManagers are responsible for a certain aspect of the system.  It is a logical\ngrouping of code relating to a portion of the system.  In general other\ncomponents should be using the manager to make changes to the components that\nit is responsible for.\n\nFor example, other components that need to deal with volumes in some way,\nshould do so by calling methods on the VolumeManager instead of directly\nchanging fields in the database.  This allows us to keep all of the code\nrelating to volumes in the same place.\n\nWe have adopted a basic strategy of Smart managers and dumb data, which means\nrather than attaching methods to data objects, components should call manager\nmethods that act on the data.\n\nMethods on managers that can be executed locally should be called directly. If\na particular method must execute on a remote host, this should be done via rpc\nto the service that wraps the manager\n\nManagers should be responsible for most of the db access, and\nnon-implementation specific data.  Anything implementation specific that can't\nbe generalized should be done by the Driver.\n\nIn general, we prefer to have one manager with multiple drivers for different\nimplementations, but sometimes it makes sense to have multiple managers.  You\ncan think of it this way: Abstract different overall strategies at the manager\nlevel(FlatNetwork vs VlanNetwork), and different implementations at the driver\nlevel(LinuxNetDriver vs CiscoNetDriver).\n\nManagers will often provide methods for initial setup of a host or periodic\ntasks to a wrapping service.\n\nThis module provides Manager, a base class for managers.\n\n\"\"\"\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_service import periodic_task\n\nfrom delfin.db import base\nfrom delfin import version\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\n\n\nclass PeriodicTasks(periodic_task.PeriodicTasks):\n    def __init__(self):\n        super(PeriodicTasks, self).__init__(CONF)\n\n\nclass Manager(base.Base, PeriodicTasks):\n\n    @property\n    def RPC_API_VERSION(self):\n        \"\"\"Redefine this in child classes.\"\"\"\n        raise NotImplementedError\n\n    @property\n    def target(self):\n        \"\"\"This property is used by oslo_messaging.\n\n        https://wiki.openstack.org/wiki/Oslo/Messaging#API_Version_Negotiation\n        \"\"\"\n        if not hasattr(self, '_target'):\n            import oslo_messaging as messaging\n            self._target = messaging.Target(version=self.RPC_API_VERSION)\n        return self._target\n\n    def __init__(self, host=None, db_driver=None):\n        if not host:\n            host = CONF.host\n        self.host = host\n        self.additional_endpoints = []\n        super(Manager, self).__init__(db_driver)\n\n    def periodic_tasks(self, context, raise_on_error=False):\n        \"\"\"Tasks to be run at a periodic interval.\"\"\"\n        return self.run_periodic_tasks(context, raise_on_error=raise_on_error)\n\n    def init_host(self):\n        \"\"\"Handle initialization if this is a standalone service.\n\n        Child classes should override this method.\n\n        \"\"\"\n        pass\n\n    def service_version(self, context):\n        return version.version_string()\n\n    def service_config(self, context):\n        config = {}\n        for key in CONF:\n            config[key] = CONF.get(key, None)\n        return config\n\n    def is_service_ready(self):\n        \"\"\"Method indicating if service is ready.\n\n        This method should be overridden by subclasses which will return False\n        when the back end is not ready yet.\n\n        \"\"\"\n        return True\n"
  },
  {
    "path": "delfin/rpc.py",
    "content": "# Copyright 2013 Red Hat, Inc.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n__all__ = [\n    'init',\n    'cleanup',\n    'set_defaults',\n    'add_extra_exmods',\n    'clear_extra_exmods',\n    'get_allowed_exmods',\n    'RequestContextSerializer',\n    'get_client',\n    'get_server',\n    'get_notifier',\n]\n\nfrom oslo_config import cfg\nimport oslo_messaging as messaging\nfrom oslo_messaging.rpc import dispatcher\nfrom oslo_serialization import jsonutils\n\nimport delfin.context\nimport delfin.exception\nfrom delfin import utils\n\nCONF = cfg.CONF\nTRANSPORT = None\nNOTIFICATION_TRANSPORT = None\nNOTIFIER = None\n\nALLOWED_EXMODS = [\n    delfin.exception.__name__,\n]\nEXTRA_EXMODS = []\n\n\ndef init(conf):\n    global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER\n    exmods = get_allowed_exmods()\n    TRANSPORT = messaging.get_rpc_transport(conf,\n                                            allowed_remote_exmods=exmods)\n    NOTIFICATION_TRANSPORT = messaging.get_notification_transport(\n        conf,\n        allowed_remote_exmods=exmods)\n\n    if utils.notifications_enabled(conf):\n        json_serializer = messaging.JsonPayloadSerializer()\n        serializer = RequestContextSerializer(json_serializer)\n        NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,\n                                      serializer=serializer)\n    else:\n        NOTIFIER = utils.DO_NOTHING\n\n\ndef initialized():\n    return None not in [TRANSPORT, NOTIFIER]\n\n\ndef cleanup():\n    global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER\n    assert TRANSPORT is not None\n    assert NOTIFICATION_TRANSPORT is not None\n    assert NOTIFIER is not None\n    TRANSPORT.cleanup()\n    NOTIFICATION_TRANSPORT.cleanup()\n    TRANSPORT = NOTIFIER = NOTIFICATION_TRANSPORT = None\n\n\ndef set_defaults(control_exchange):\n    messaging.set_transport_defaults(control_exchange)\n\n\ndef add_extra_exmods(*args):\n    EXTRA_EXMODS.extend(args)\n\n\ndef clear_extra_exmods():\n    del EXTRA_EXMODS[:]\n\n\ndef get_allowed_exmods():\n    return ALLOWED_EXMODS + EXTRA_EXMODS\n\n\nclass JsonPayloadSerializer(messaging.NoOpSerializer):\n    @staticmethod\n    def serialize_entity(context, entity):\n        return jsonutils.to_primitive(entity, convert_instances=True)\n\n\nclass RequestContextSerializer(messaging.Serializer):\n\n    def __init__(self, base):\n        self._base = base\n\n    def serialize_entity(self, context, entity):\n        if not self._base:\n            return entity\n        return self._base.serialize_entity(context, entity)\n\n    def deserialize_entity(self, context, entity):\n        if not self._base:\n            return entity\n        return self._base.deserialize_entity(context, entity)\n\n    def serialize_context(self, context):\n        return context.to_dict()\n\n    def deserialize_context(self, context):\n        return delfin.context.RequestContext.from_dict(context)\n\n\ndef get_transport_url(url_str=None):\n    return messaging.TransportURL.parse(CONF, url_str)\n\n\ndef get_client(target, version_cap=None, serializer=None):\n    assert TRANSPORT is not None\n    serializer = RequestContextSerializer(serializer)\n    return messaging.RPCClient(TRANSPORT,\n                               target,\n                               version_cap=version_cap,\n                               serializer=serializer)\n\n\ndef get_server(target, endpoints, serializer=None):\n    assert TRANSPORT is not None\n    access_policy = dispatcher.DefaultRPCAccessPolicy\n    serializer = RequestContextSerializer(serializer)\n    return messaging.get_rpc_server(TRANSPORT,\n                                    target,\n                                    endpoints,\n                                    executor='eventlet',\n                                    serializer=serializer,\n                                    access_policy=access_policy)\n\n\n@utils.if_notifications_enabled\ndef get_notifier(service=None, host=None, publisher_id=None):\n    assert NOTIFIER is not None\n    if not publisher_id:\n        publisher_id = \"%s.%s\" % (service, host or CONF.host)\n    return NOTIFIER.prepare(publisher_id=publisher_id)\n"
  },
  {
    "path": "delfin/service.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright 2011 Justin Santa Barbara\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Generic Node base class for all workers that run on hosts.\"\"\"\n\nimport inspect\nimport os\nimport random\n\nimport eventlet\nimport oslo_messaging as messaging\nfrom eventlet import event\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_service import loopingcall\nfrom oslo_service import service\nfrom oslo_service import threadgroup\nfrom oslo_service import wsgi\nfrom oslo_utils import importutils\n\nfrom delfin import context\nfrom delfin import coordination\nfrom delfin import rpc\nfrom delfin.leader_election.factory import LeaderElectionFactory\n\nLOG = log.getLogger(__name__)\n\nservice_opts = [\n    cfg.BoolOpt('periodic_enable',\n                default=True,\n                help='If enable periodic task.'),\n    cfg.IntOpt('periodic_interval',\n               default=60,\n               help='Seconds between running periodic tasks.'),\n    cfg.IntOpt('periodic_fuzzy_delay',\n               default=60,\n               help='Range of seconds to randomly delay when starting the '\n                    'periodic task scheduler to reduce stampeding. '\n                    '(Disable by setting to 0)'),\n    cfg.HostAddressOpt('delfin_listen',\n                       default=\"::\",\n                       help='IP address for Delfin API to listen '\n                            'on.'),\n    cfg.PortOpt('delfin_listen_port',\n                default=8190,\n                help='Port for Delfin API to listen on.'),\n    cfg.IntOpt('delfin_workers',\n               default=1,\n               help='Number of workers for Delfin API service.'),\n    cfg.BoolOpt('delfin_use_ssl',\n                default=False,\n                help='Wraps the socket in a SSL context if True is set. '\n                     'A certificate file and key file must be specified.'),\n    cfg.HostAddressOpt('trap_receiver_address',\n                       default=\"0.0.0.0\",\n                       help='IP address at which trap receiver listens.'),\n    cfg.PortOpt('trap_receiver_port',\n                default=162,\n                help='Port at which trap receiver listens.'),\n    cfg.StrOpt('leader_election_plugin',\n               default=\"tooz\",\n               help='Supported plugin for leader election. Options: '\n                    'tooz(Default)'),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(service_opts)\n\n\nclass Service(service.Service):\n    \"\"\"Service object for binaries running on hosts.\n\n    A service takes a manager and enables rpc by listening to queues based\n    on topic. It also periodically runs tasks on the manager and reports\n    it state to the database services table.\n    \"\"\"\n\n    def __init__(self, host, binary, topic, manager, periodic_enable=None,\n                 periodic_interval=None, periodic_fuzzy_delay=None,\n                 service_name=None, coordination=False, *args, **kwargs):\n        super(Service, self).__init__()\n        if not rpc.initialized():\n            rpc.init(CONF)\n        self.host = host\n        self.binary = binary\n        self.topic = topic\n        self.manager_class_name = manager\n        manager_class = importutils.import_class(self.manager_class_name)\n        self.manager = manager_class(host=self.host,\n                                     service_name=service_name,\n                                     *args, **kwargs)\n        self.periodic_enable = periodic_enable\n        self.periodic_interval = periodic_interval\n        self.periodic_fuzzy_delay = periodic_fuzzy_delay\n        self.saved_args, self.saved_kwargs = args, kwargs\n        self.timers = []\n        self.coordinator = coordination\n\n    def start(self):\n        if self.coordinator:\n            coordination.LOCK_COORDINATOR.start()\n\n        LOG.info('Starting %(topic)s node.', {'topic': self.topic})\n        LOG.debug(\"Creating RPC server for service %s.\", self.topic)\n\n        target = messaging.Target(topic=self.topic, server=self.host)\n        endpoints = [self.manager]\n        endpoints.extend(self.manager.additional_endpoints)\n        self.rpcserver = rpc.get_server(target, endpoints)\n        self.rpcserver.start()\n\n        self.manager.init_host()\n\n        if self.periodic_interval:\n            if self.periodic_fuzzy_delay:\n                initial_delay = random.randint(0, self.periodic_fuzzy_delay)\n            else:\n                initial_delay = None\n\n            periodic = loopingcall.FixedIntervalLoopingCall(\n                self.periodic_tasks)\n            periodic.start(interval=self.periodic_interval,\n                           initial_delay=initial_delay)\n            self.timers.append(periodic)\n\n    def __getattr__(self, key):\n        manager = self.__dict__.get('manager', None)\n        return getattr(manager, key)\n\n    @classmethod\n    def create(cls, host=None, binary=None, topic=None, manager=None,\n               periodic_enable=None, periodic_interval=None,\n               periodic_fuzzy_delay=None, service_name=None,\n               coordination=False, *args, **kwargs):\n        \"\"\"Instantiates class and passes back application object.\n\n        :param host: defaults to CONF.host\n        :param binary: defaults to basename of executable\n        :param topic: defaults to bin_name - 'delfin-' part\n        :param manager: defaults to CONF.<topic>_manager\n        :param periodic_enable: defaults to CONF.periodic_enable\n        :param periodic_interval: defaults to CONF.periodic_interval\n        :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay\n\n        \"\"\"\n        if not host:\n            host = CONF.host\n        if not binary:\n            binary = os.path.basename(inspect.stack()[-1][1])\n        if not topic:\n            topic = binary\n        if not manager:\n            subtopic = topic.rpartition('delfin-')[2]\n            manager = CONF.get('%s_manager' % subtopic, None)\n        if periodic_enable is None:\n            periodic_enable = CONF.periodic_enable\n        if periodic_interval is None:\n            periodic_interval = CONF.periodic_interval\n        if periodic_fuzzy_delay is None:\n            periodic_fuzzy_delay = CONF.periodic_fuzzy_delay\n        service_obj = cls(host, binary, topic, manager,\n                          periodic_enable=periodic_enable,\n                          periodic_interval=periodic_interval,\n                          periodic_fuzzy_delay=periodic_fuzzy_delay,\n                          service_name=service_name,\n                          coordination=coordination,\n                          *args, **kwargs)\n\n        return service_obj\n\n    def kill(self):\n        \"\"\"Destroy the service object in the datastore.\"\"\"\n        self.stop()\n\n    def stop(self, graceful=False):\n        # Try to shut the connection down, but if we get any sort of\n        # errors, go ahead and ignore them.. as we're shutting down anyway\n        try:\n            if hasattr(self, 'rpcserver'):\n                self.rpcserver.stop()\n        except Exception as e:\n            LOG.error('Stop the rpc server failed, the reason is %s.', e)\n        for x in self.timers:\n            try:\n                x.stop()\n            except Exception as e:\n                LOG.error('Stop the timers failed, the reason is %s.', e)\n        if self.coordinator:\n            try:\n                coordination.LOCK_COORDINATOR.stop()\n            except Exception:\n                LOG.exception(\"Unable to stop the Tooz Locking \"\n                              \"Coordinator.\")\n\n        self.timers = []\n\n        super(Service, self).stop(graceful)\n\n    def wait(self):\n        for x in self.timers:\n            try:\n                x.wait()\n            except Exception:\n                pass\n\n    def periodic_tasks(self, raise_on_error=False):\n        \"\"\"Tasks to be run at a periodic interval.\"\"\"\n        ctxt = context.get_admin_context()\n        self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)\n\n\nclass AlertService(Service):\n    \"\"\"Service object for triggering trap receiver functionalities.\n        \"\"\"\n\n    @classmethod\n    def create(cls, host=None, binary=None, topic=None,\n               manager=None, periodic_interval=None,\n               periodic_fuzzy_delay=None, service_name=None,\n               coordination=False, *args, **kwargs):\n        kwargs['trap_receiver_address'] = CONF.trap_receiver_address\n        kwargs['trap_receiver_port'] = CONF.trap_receiver_port\n\n        service_obj = super(AlertService, cls).create(\n            host=host, binary=binary, topic=topic, manager=manager,\n            periodic_interval=periodic_interval,\n            periodic_fuzzy_delay=periodic_fuzzy_delay,\n            service_name=service_name,\n            coordination=coordination, *args, **kwargs)\n\n        return service_obj\n\n    def start(self):\n        super(AlertService, self).start()\n        self.manager.start()\n\n    def stop(self):\n        try:\n            self.manager.stop()\n        except Exception:\n            pass\n        super(AlertService, self).stop()\n\n\nclass TaskService(Service):\n    \"\"\"Service object for triggering task manager functionalities.\n        \"\"\"\n\n    @classmethod\n    def create(cls, host=None, binary=None, topic=None,\n               manager=None, periodic_interval=None,\n               periodic_fuzzy_delay=None, service_name=None,\n               coordination=False, *args, **kwargs):\n        service_obj = super(TaskService, cls).create(\n            host=host, binary=binary, topic=topic, manager=manager,\n            periodic_interval=periodic_interval,\n            periodic_fuzzy_delay=periodic_fuzzy_delay,\n            service_name=service_name,\n            coordination=coordination, *args, **kwargs)\n\n        return service_obj\n\n    def start(self):\n        super(TaskService, self).start()\n\n\nclass MetricsService(Service):\n    \"\"\"Service object for triggering metrics manager functionalities.\n        \"\"\"\n\n    @classmethod\n    def create(cls, host=None, binary=None, topic=None,\n               manager=None, periodic_interval=None,\n               periodic_fuzzy_delay=None, service_name=None,\n               coordination=False, *args, **kwargs):\n        service_obj = super(MetricsService, cls).create(\n            host=host, binary=binary, topic=topic, manager=manager,\n            periodic_interval=periodic_interval,\n            periodic_fuzzy_delay=periodic_fuzzy_delay,\n            service_name=service_name,\n            coordination=coordination, *args, **kwargs)\n\n        return service_obj\n\n    def start(self):\n        super(MetricsService, self).start()\n        self.manager.init_scheduler(self.topic, self.host)\n\n\nclass LeaderElectionService(service.Service):\n    \"\"\"Leader election service for distributed system\n\n    The service takes callback functions and leader election unique\n    key to synchronize leaders in distributed environment\n    \"\"\"\n\n    def __init__(self, leader_elector, *args, **kwargs):\n        super(LeaderElectionService, self).__init__()\n        self.leader_elector = leader_elector\n\n        self._tg = threadgroup.ThreadGroup()\n        self._stop = event.Event()\n\n    def start(self):\n        \"\"\"Start leader election service\n        \"\"\"\n\n        def run_leader_service(stop):\n            while not stop.ready():\n                try:\n                    # Start/restart participating in leader election\n                    LOG.info(\"Starting leader election service\")\n                    self.leader_elector.run()\n                except Exception as e:\n                    LOG.error(\"Exception in leader election run [%s]\" % e)\n\n                try:\n                    # Cleanup and again start participating for leadership\n                    LOG.info(\"Cleaning leader election residue\")\n                    self.leader_elector.cleanup()\n                except Exception as e:\n                    LOG.error(\"Exception in leader election cleanup [%s]\" % e)\n\n                # Wait for grace period\n                LOG.info(\n                    \"Waiting till grace period[%s] to restart leader \"\n                    \"election\" % CONF.coordination.lease_timeout)\n                eventlet.greenthread.sleep(CONF.coordination.lease_timeout)\n\n        self._tg.add_thread(run_leader_service, self._stop)\n\n    def __getattr__(self, key):\n        leader = self.__dict__.get('leader', None)\n        return getattr(leader, key)\n\n    @classmethod\n    def create(cls, *args, **kwargs):\n        \"\"\"Instantiates class and passes back application object.\n        \"\"\"\n        leader_elector = LeaderElectionFactory.construct_elector(\n            CONF.leader_election_plugin)\n\n        service_obj = cls(leader_elector, *args, **kwargs)\n\n        return service_obj\n\n    def kill(self):\n        self.stop()\n\n    def stop(self, graceful=False):\n        # Stop leader election service\n        if not self._stop.ready():\n            self._stop.send()\n\n        try:\n            # cleanup after stop\n            if self.leader_elector:\n                self.leader_elector.cleanup()\n        except Exception as e:\n            LOG.warning(\"Exception in leader election cleanup [%s]\" % e)\n\n        # Reap thread group:\n        self.tg.stop(graceful)\n\n        super(LeaderElectionService, self).stop(graceful)\n\n    def wait(self):\n        self._tg.wait()\n\n\nclass WSGIService(service.ServiceBase):\n    \"\"\"Provides ability to launch API from a 'paste' configuration.\"\"\"\n\n    def __init__(self, name, loader=None, coordination=False):\n        \"\"\"Initialize, but do not start the WSGI server.\n\n        :param name: The name of the WSGI server given to the loader.\n        :param loader: Loads the WSGI application using the given name.\n        :returns: None\n\n        \"\"\"\n        self.name = name\n        self.manager = self._get_manager()\n        self.loader = loader or wsgi.Loader(CONF)\n        if not rpc.initialized():\n            rpc.init(CONF)\n        self.app = self.loader.load_app(name)\n        self.host = getattr(CONF, '%s_listen' % name, \"0.0.0.0\")\n        self.port = getattr(CONF, '%s_listen_port' % name, 0)\n        self.workers = getattr(CONF, '%s_workers' % name, None)\n        self.use_ssl = getattr(CONF, '%s_use_ssl' % name, False)\n        if self.workers is not None and self.workers < 1:\n            LOG.warning(\n                \"Value of config option %(name)s_workers must be integer \"\n                \"greater than 1.  Input value ignored.\", {'name': name})\n            # Reset workers to default\n            self.workers = None\n        self.server = wsgi.Server(\n            CONF,\n            name,\n            self.app,\n            host=self.host,\n            port=self.port,\n            use_ssl=self.use_ssl\n        )\n        self.coordinator = coordination\n\n    def _get_manager(self):\n        \"\"\"Initialize a Manager object appropriate for this service.\n\n        Use the service name to look up a Manager subclass from the\n        configuration and initialize an instance. If no class name\n        is configured, just return None.\n\n        :returns: a Manager instance, or None.\n\n        \"\"\"\n        fl = '%s_manager' % self.name\n        if fl not in CONF:\n            return None\n\n        manager_class_name = CONF.get(fl, None)\n        if not manager_class_name:\n            return None\n\n        manager_class = importutils.import_class(manager_class_name)\n        return manager_class()\n\n    def start(self):\n        \"\"\"Start serving this service using loaded configuration.\n\n        Also, retrieve updated port number in case '0' was passed in, which\n        indicates a random port should be used.\n\n        :returns: None\n\n        \"\"\"\n        if self.coordinator:\n            coordination.LOCK_COORDINATOR.start()\n        if self.manager:\n            self.manager.init_host()\n        self.server.start()\n        self.port = self.server.port\n\n    def stop(self):\n        \"\"\"Stop serving this API.\n\n        :returns: None\n\n        \"\"\"\n        try:\n            self.server.stop()\n        except Exception:\n            pass\n\n        self._stop_coordinator()\n\n    def wait(self):\n        \"\"\"Wait for the service to stop serving this API.\n\n        :returns: None\n\n        \"\"\"\n        self.server.wait()\n        self._stop_coordinator()\n\n    def reset(self):\n        \"\"\"Reset server greenpool size to default.\n\n        :returns: None\n        \"\"\"\n        self.server.reset()\n\n    def _stop_coordinator(self):\n        if self.coordinator:\n            try:\n                coordination.LOCK_COORDINATOR.stop()\n            except Exception:\n                LOG.exception(\"Unable to stop the Tooz Locking \"\n                              \"Coordinator.\")\n\n\ndef process_launcher():\n    return service.ServiceLauncher(CONF, restart_method='reload')\n\n\n# NOTE(vish): the global launcher is to maintain the existing\n#             functionality of calling service.serve +\n#             service.wait\n_launcher = None\n\n\ndef serve(server, workers=None):\n    global _launcher\n    if not _launcher:\n        _launcher = service.Launcher(CONF, restart_method='mutate')\n\n    _launcher.launch_service(server, workers=workers)\n\n\ndef wait():\n    CONF.log_opt_values(LOG, log.DEBUG)\n    try:\n        _launcher.wait()\n    except KeyboardInterrupt:\n        _launcher.stop()\n    rpc.cleanup()\n"
  },
  {
    "path": "delfin/ssl_utils.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport requests\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom urllib3 import PoolManager\nfrom OpenSSL.crypto import load_certificate, FILETYPE_PEM\n\nfrom delfin import exception\n\nLOG = log.getLogger(__name__)\nCONF = cfg.CONF\nFILE = 'configs.json'\n\n\ndef get_storage_ca_path():\n    return CONF.storage_driver.ca_path\n\n\ndef verify_ca_path(ca_path):\n    \"\"\"\n    Checking the ca_path exists\n    \"\"\"\n    if not os.path.exists(ca_path):\n        LOG.error(\"Directory {0} could not be found.\".format(ca_path))\n        raise exception.InvalidCAPath(ca_path)\n\n\ndef _load_cert(fpath, file, ca_path):\n    with open(fpath, \"rb\") as f:\n        cert_content = f.read()\n        cert = load_certificate(FILETYPE_PEM,\n                                cert_content)\n        hash_val = cert.subject_name_hash()\n        hash_hex = hex(hash_val).strip('0x') + \".0\"\n        linkfile = ca_path + hash_hex\n        if os.path.exists(linkfile):\n            LOG.debug(\"Link for {0} already exist.\".\n                      format(file))\n        else:\n            LOG.info(\"Create link file {0} for {1}.\".\n                     format(linkfile, fpath))\n            os.symlink(fpath, linkfile)\n\n\ndef reload_certificate(ca_path):\n    \"\"\"\n    Checking the driver security config validation.\n    As required by requests, ca_path must be a directory prepared using\n    the c_rehash tool included with OpenSSL.\n    Once new certificate added, this function can be called for update.\n    If there is a CA certificate chain, all CA certificates along this\n    chain should be included in a single file.\n    \"\"\"\n\n    suffixes = ['.pem', '.cer', '.crt', '.crl']\n    files = os.listdir(ca_path)\n    for file in files:\n        if not os.path.isdir(file):\n            suf = os.path.splitext(file)[1]\n            if suf in suffixes:\n                fpath = ca_path + file\n                _load_cert(fpath, file, ca_path)\n\n\ndef get_host_name_ignore_adapter():\n    return HostNameIgnoreAdapter()\n\n\nclass HostNameIgnoreAdapter(requests.adapters.HTTPAdapter):\n    def cert_verify(self, conn, url, verify, cert):\n        conn.assert_hostname = False\n        return super(HostNameIgnoreAdapter, self).cert_verify(\n            conn, url, verify, cert)\n\n    def init_poolmanager(self, connections, maxsize, block=False,\n                         **pool_kwargs):\n        self._pool_connections = connections\n        self._pool_maxsize = maxsize\n        self._pool_block = block\n        self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,\n                                       block=block, strict=True, **pool_kwargs)\n"
  },
  {
    "path": "delfin/task_manager/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/task_manager/manager.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n\n**periodical task manager**\n\n\"\"\"\nfrom oslo_log import log\nfrom oslo_utils import importutils\n\nfrom delfin import manager\nfrom delfin.drivers import manager as driver_manager\nfrom delfin.drivers import api as driver_api\nfrom delfin.task_manager.tasks import alerts, telemetry\n\nLOG = log.getLogger(__name__)\n\n\nclass TaskManager(manager.Manager):\n    \"\"\"manage periodical tasks\"\"\"\n\n    RPC_API_VERSION = '1.0'\n\n    def __init__(self, service_name=None, *args, **kwargs):\n        self.alert_task = alerts.AlertSyncTask()\n        self.telemetry_task = telemetry.TelemetryTask()\n        super(TaskManager, self).__init__(*args, **kwargs)\n\n    def sync_storage_resource(self, context, storage_id, resource_task):\n        LOG.debug(\"Received the sync_storage task: {0} request for storage\"\n                  \" id:{1}\".format(resource_task, storage_id))\n        cls = importutils.import_class(resource_task)\n        device_obj = cls(context, storage_id)\n        device_obj.sync()\n\n    def remove_storage_resource(self, context, storage_id, resource_task):\n        cls = importutils.import_class(resource_task)\n        device_obj = cls(context, storage_id)\n        device_obj.remove()\n\n    def remove_storage_in_cache(self, context, storage_id):\n        LOG.info('Remove storage device in memory for storage id:{0}'\n                 .format(storage_id))\n        driver_api.API().remove_storage(context, storage_id)\n        drivers = driver_manager.DriverManager()\n        drivers.remove_driver(storage_id)\n\n    def sync_storage_alerts(self, context, storage_id, query_para):\n        LOG.info('Alert sync called for storage id:{0}'\n                 .format(storage_id))\n        self.alert_task.sync_alerts(context, storage_id, query_para)\n\n    def clear_storage_alerts(self, context, storage_id, sequence_number_list):\n        LOG.info('Clear alerts called for storage id: {0}'\n                 .format(storage_id))\n        return self.alert_task.clear_alerts(context,\n                                            storage_id,\n                                            sequence_number_list)\n"
  },
  {
    "path": "delfin/task_manager/metrics_manager.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nperiodical task manager for metric collection tasks**\n\"\"\"\nfrom apscheduler.schedulers.background import BackgroundScheduler\nimport datetime\nimport six\n\nfrom oslo_log import log\nfrom oslo_config import cfg\nfrom oslo_utils import uuidutils\nfrom oslo_service import service as oslo_ser\n\nfrom delfin import context as ctxt\nfrom delfin.coordination import ConsistentHashing, GroupMembership\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import manager\nfrom delfin import service\nfrom delfin.task_manager.scheduler import schedule_manager\nfrom delfin.task_manager import subprocess_rpcapi as rpcapi\nfrom delfin.task_manager.scheduler.schedulers.telemetry.job_handler \\\n    import FailedJobHandler\nfrom delfin.task_manager.scheduler.schedulers.telemetry.job_handler \\\n    import JobHandler\n\nLOG = log.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass MetricsTaskManager(manager.Manager):\n    \"\"\"manage periodical tasks\"\"\"\n\n    RPC_API_VERSION = '1.0'\n\n    def __init__(self, service_name=None, *args, **kwargs):\n        super(MetricsTaskManager, self).__init__(*args, **kwargs)\n        scheduler = schedule_manager.SchedulerManager()\n        scheduler.start()\n        partitioner = ConsistentHashing()\n        partitioner.start()\n        partitioner.join_group()\n        self.watch_job_id = None\n        self.cleanup_job_id = None\n        self.group = None\n        self.watcher = None\n        self.scheduler = None\n        self.rpcapi = rpcapi.SubprocessAPI()\n        self.executor_map = {}\n        self.enable_sub_process = CONF.telemetry.enable_dynamic_subprocess\n        if self.enable_sub_process:\n            self.scheduler = BackgroundScheduler()\n            self.scheduler.start()\n        self.schedule_boot_jobs(self.host)\n\n    def assign_job(self, context, task_id, executor):\n        if not self.enable_sub_process:\n            instance = JobHandler.get_instance(context, task_id)\n            instance.schedule_job(task_id)\n        else:\n            if not self.watch_job_id:\n                self.init_watchers(executor)\n            local_executor = self.get_local_executor(\n                context, task_id, None, executor)\n            self.rpcapi.assign_job_local(context, task_id, local_executor)\n\n    def remove_job(self, context, task_id, executor):\n        if not self.enable_sub_process:\n            instance = JobHandler.get_instance(context, task_id)\n            instance.remove_job(task_id)\n        else:\n            job = db.task_get(context, task_id)\n            storage_id = job['storage_id']\n            for name in self.executor_map.keys():\n                if storage_id in self.executor_map[name][\"storages\"]:\n                    local_executor = \"{0}:{1}\".format(executor, name)\n                    self.rpcapi.remove_job_local(\n                        context, task_id, local_executor)\n                    tasks, failed_tasks = self.get_all_tasks(storage_id)\n                    if len(failed_tasks) == 0 and len(tasks) == 0:\n                        self.stop_executor(name, local_executor, storage_id)\n\n    def assign_failed_job(self, context, failed_task_id, executor):\n        if not self.enable_sub_process:\n            instance = FailedJobHandler.get_instance(context, failed_task_id)\n            instance.schedule_failed_job(failed_task_id)\n        else:\n            if not self.watch_job_id:\n                self.init_watchers(executor)\n\n            local_executor = self.get_local_executor(\n                context, None, failed_task_id, executor)\n            self.rpcapi.assign_failed_job_local(\n                context, failed_task_id, local_executor)\n\n    def remove_failed_job(self, context, failed_task_id, executor):\n        if not self.enable_sub_process:\n            instance = FailedJobHandler.get_instance(context, failed_task_id)\n            instance.remove_failed_job(failed_task_id)\n        else:\n            job = db.failed_task_get(context, failed_task_id)\n            storage_id = job['storage_id']\n            for name in self.executor_map.keys():\n                if storage_id in self.executor_map[name][\"storages\"]:\n                    local_executor = \"{0}:{1}\".format(executor, name)\n                    self.rpcapi.remove_failed_job_local(\n                        context, failed_task_id, local_executor)\n                    tasks, failed_tasks = self.get_all_tasks(storage_id)\n                    if len(failed_tasks) == 0 and len(tasks) == 0:\n                        self.stop_executor(name, local_executor, storage_id)\n\n    def schedule_boot_jobs(self, executor):\n        \"\"\"Schedule periodic collection if any task is currently assigned to\n        this executor \"\"\"\n        try:\n            filters = {'executor': executor,\n                       'deleted': False}\n            context = ctxt.get_admin_context()\n            tasks = db.task_get_all(context, filters=filters)\n            failed_tasks = db.failed_task_get_all(context, filters=filters)\n            LOG.info(\"Scheduling boot time jobs for this executor: total \"\n                     \"jobs to be handled :%s\" % len(tasks))\n            for task in tasks:\n                self.assign_job(context, task['id'], executor)\n                LOG.debug('Periodic collection job assigned for id: '\n                          '%s ' % task['id'])\n            for failed_task in failed_tasks:\n                self.assign_failed_job(context, failed_task['id'], executor)\n                LOG.debug('Failed job assigned for id: '\n                          '%s ' % failed_task['id'])\n\n        except Exception as e:\n            LOG.error(\"Failed to schedule boot jobs for this executor \"\n                      \"reason: %s.\",\n                      six.text_type(e))\n        else:\n            LOG.debug(\"Boot job scheduling completed.\")\n\n    def init_watchers(self, group):\n        watcher = GroupMembership(agent_id=group)\n        watcher.start()\n        watcher.create_group(group)\n        LOG.info('Created child process membership group {0}.'\n                 'Initial members of group: {1}'\n                 .format(group, watcher.get_members(group)))\n\n        watcher.register_watcher_func(group,\n                                      self.on_process_join,\n                                      self.on_process_leave)\n        self.group = group\n        self.watcher = watcher\n        self.watch_job_id = uuidutils.generate_uuid()\n        self.scheduler.add_job(watcher.watch_group_change, 'interval',\n                               seconds=CONF.telemetry.\n                               group_change_detect_interval,\n                               next_run_time=datetime.datetime.now(),\n                               id=self.watch_job_id)\n        LOG.info('Created watch for group membership change for group {0}.'\n                 .format(group))\n        self.cleanup_job_id = uuidutils.generate_uuid()\n        self.scheduler.add_job(self.process_cleanup, 'interval',\n                               seconds=CONF.telemetry.process_cleanup_interval,\n                               next_run_time=datetime.datetime.now(),\n                               id=self.cleanup_job_id)\n        LOG.info('Created process cleanup background job for group {0}.'\n                 .format(group))\n\n    def on_process_join(self, event):\n        LOG.info('Member %s joined the group %s' % (event.member_id,\n                                                    event.group_id))\n        host = event.group_id.decode('utf-8')\n        if self.watcher:\n            LOG.info('Processes in current node {0}'\n                     .format(self.watcher.get_members(host)))\n\n    def on_process_leave(self, event):\n        LOG.info('Member %s left the group %s' % (event.member_id,\n                                                  event.group_id))\n        executor_topic = event.member_id.decode('utf-8')\n        name = executor_topic.split(':')[1]\n        if name in self.executor_map.keys():\n            host = event.group_id.decode('utf-8')\n            LOG.info(\"Re-create process {0} in {1} that is handling tasks\"\n                     .format(executor_topic, host))\n            launcher = self.create_process(executor_topic, host)\n            self.executor_map[name][\"launcher\"] = launcher\n            context = ctxt.get_admin_context()\n            for storage_id in self.executor_map[name][\"storages\"]:\n                tasks, failed_tasks = self.get_all_tasks(storage_id)\n                for task in tasks:\n                    LOG.info(\"Re-scheduling task {0} of storage {1}\"\n                             .format(task['id'], storage_id))\n                    self.rpcapi.assign_job_local(\n                        context, task['id'], executor_topic)\n\n                for f_task in failed_tasks:\n                    LOG.info(\"Re-scheduling failed failed task {0},\"\n                             \" of storage {1}\"\n                             .format(f_task['id'], storage_id))\n                    self.rpcapi.assign_failed_job_local(\n                        context, f_task['id'], executor_topic)\n\n    def process_cleanup(self):\n        LOG.info('Periodic process cleanup called')\n        executor_names = self.executor_map.keys()\n\n        # Collect all names to delete\n        names_to_delete = []\n        for name in executor_names:\n            if len(self.executor_map[name][\"storages\"]) == 0:\n                delay = self.executor_map[name][\"cleanup_delay\"]\n                if delay < 0:\n                    LOG.info(\"Cleanup delay for local executor {0} expired\"\n                             .format(name))\n                    names_to_delete.append(name)\n                else:\n                    LOG.info(\"Delay cleanup for local executor {0} for {1}\"\n                             .format(name, delay))\n                    delay = delay - CONF.telemetry.process_cleanup_interval\n                    self.executor_map[name][\"cleanup_delay\"] = delay\n        # Delete names\n        for name in names_to_delete:\n            self.executor_map[name][\"launcher\"].stop()\n            self.executor_map.pop(name)\n\n    def create_process(self, topic=None, host=None):\n        metrics_task_server = service. \\\n            MetricsService.create(binary='delfin-task',\n                                  topic=topic,\n                                  host=host,\n                                  manager='delfin.'\n                                          'task_manager.'\n                                          'subprocess_manager.'\n                                          'SubprocessManager',\n                                  coordination=False)\n        launcher = oslo_ser.ProcessLauncher(CONF)\n        launcher.launch_service(metrics_task_server, workers=1)\n        return launcher\n\n    def get_local_executor(self, context, task_id, failed_task_id, executor):\n        executor_names = self.executor_map.keys()\n        storage_id = None\n        if task_id:\n            job = db.task_get(context, task_id)\n            storage_id = job['storage_id']\n        elif failed_task_id:\n            job = db.failed_task_get(context, failed_task_id)\n            storage_id = job['storage_id']\n        else:\n            raise exception.InvalidInput(\"Missing task id\")\n\n        # Storage already exists\n        for name in executor_names:\n            executor_topic = \"{0}:{1}\".format(executor, name)\n            if storage_id in self.executor_map[name][\"storages\"]:\n                return executor_topic\n\n        # Return existing executor_topic\n        for name in executor_names:\n            no_of_storages = len(self.executor_map[name][\"storages\"])\n            if no_of_storages and (no_of_storages <\n                                   CONF.telemetry.max_storages_in_child):\n                executor_topic = \"{0}:{1}\".format(executor, name)\n                LOG.info(\"Selecting existing local executor {0} for {1}\"\n                         .format(executor_topic, storage_id))\n                self.executor_map[name][\"storages\"].append(storage_id)\n                return executor_topic\n\n        # Return executor_topic after creating one\n        for index in range(CONF.telemetry.max_childs_in_node):\n            name = \"executor_{0}\".format(index + 1)\n            if name not in executor_names:\n                executor_topic = \"{0}:{1}\".format(executor, name)\n                LOG.info(\"Create a new local executor {0} for {1}\"\n                         .format(executor_topic, storage_id))\n                launcher = self.create_process(\n                    topic=executor_topic, host=executor)\n                self.executor_map[name] = {\n                    \"storages\": [storage_id],\n                    \"launcher\": launcher,\n                    \"cleanup_delay\": 0\n                }\n                return executor_topic\n\n        msg = \"Reached maximum number of ({0}) local executors\". \\\n            format(CONF.telemetry.max_childs_in_node)\n        LOG.error(msg)\n        raise RuntimeError(msg)\n\n    def get_all_tasks(self, storage_id):\n        filters = {'storage_id': storage_id,\n                   'deleted': False}\n        context = ctxt.get_admin_context()\n        tasks = db.task_get_all(context, filters=filters)\n        failed_tasks = db.failed_task_get_all(context, filters=filters)\n        return tasks, failed_tasks\n\n    def stop_executor(self, name, local_executor, storage_id):\n        LOG.info(\"Stop and remove local executor {0}\"\n                 .format(local_executor))\n        if storage_id in self.executor_map[name][\"storages\"]:\n            self.executor_map[name][\"storages\"].remove(storage_id)\n        self.executor_map[name][\"cleanup_delay\"] = \\\n            CONF.telemetry.task_cleanup_delay\n\n    def stop(self):\n        \"\"\"Cleanup periodic jobs\"\"\"\n        if self.watch_job_id:\n            self.scheduler.remove_job(self.watch_job_id)\n        if self.cleanup_job_id:\n            self.scheduler.remove_job(self.cleanup_job_id)\n        if self.group and self.watcher:\n            self.watcher.delete_group(self.group)\n        if self.watcher:\n            self.watcher.stop()\n        if self.scheduler:\n            self.scheduler.shutdown()\n        self.watch_job_id = None\n        self.cleanup_job_id = None\n        self.group = None\n        self.watcher = None\n"
  },
  {
    "path": "delfin/task_manager/metrics_rpcapi.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nClient side of the metrics task manager RPC API.\n\"\"\"\n\nimport oslo_messaging as messaging\nfrom oslo_config import cfg\n\nfrom delfin import rpc\n\nCONF = cfg.CONF\n\n\nclass TaskAPI(object):\n    \"\"\"Client side of the metrics task rpc API.\n\n    API version history:\n\n        1.0 - Initial version.\n    \"\"\"\n\n    RPC_API_VERSION = '1.0'\n\n    def __init__(self):\n        super(TaskAPI, self).__init__()\n        self.target = messaging.Target(topic=CONF.host,\n                                       version=self.RPC_API_VERSION)\n        self.client = rpc.get_client(self.target,\n                                     version_cap=self.RPC_API_VERSION)\n\n    def get_client(self, topic):\n        target = messaging.Target(topic=topic,\n                                  version=self.RPC_API_VERSION)\n        return rpc.get_client(target, version_cap=self.RPC_API_VERSION)\n\n    def assign_job(self, context, task_id, executor):\n        rpc_client = self.get_client(str(executor))\n        call_context = rpc_client.prepare(topic=str(executor), version='1.0',\n                                          fanout=True)\n        return call_context.cast(context, 'assign_job',\n                                 task_id=task_id, executor=executor)\n\n    def remove_job(self, context, task_id, executor):\n        rpc_client = self.get_client(str(executor))\n        call_context = rpc_client.prepare(topic=str(executor), version='1.0',\n                                          fanout=True)\n        return call_context.cast(context, 'remove_job',\n                                 task_id=task_id, executor=executor)\n\n    def assign_failed_job(self, context, failed_task_id, executor):\n        rpc_client = self.get_client(str(executor))\n        call_context = rpc_client.prepare(topic=str(executor), version='1.0',\n                                          fanout=True)\n        return call_context.cast(context, 'assign_failed_job',\n                                 failed_task_id=failed_task_id,\n                                 executor=executor)\n\n    def remove_failed_job(self, context, failed_task_id, executor):\n        rpc_client = self.get_client(str(executor))\n        call_context = rpc_client.prepare(topic=str(executor), version='1.0',\n                                          fanout=True)\n        return call_context.cast(context, 'remove_failed_job',\n                                 failed_task_id=failed_task_id,\n                                 executor=executor)\n\n    def create_perf_job(self, context, task_id):\n        rpc_client = self.get_client('JobGenerator')\n        call_context = rpc_client.prepare(topic='JobGenerator', version='1.0')\n        return call_context.cast(context, 'add_new_job',\n                                 task_id=task_id)\n"
  },
  {
    "path": "delfin/task_manager/perf_job_controller.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nClient side of the metrics task manager RPC API.\n\"\"\"\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.task_manager import metrics_rpcapi\n\nLOG = log.getLogger(__name__)\nCONF = cfg.CONF\n\n\ndef create_perf_job(context, storage_id, capabilities):\n    # Add it to db\n    # Check resource_metric attribute availability and\n    # check if resource_metric is empty\n    if 'resource_metrics' not in capabilities \\\n            or not bool(capabilities.get('resource_metrics')):\n        raise exception.EmptyResourceMetrics()\n\n    task = dict()\n    task.update(storage_id=storage_id)\n    task.update(args=capabilities.get('resource_metrics'))\n    task.update(interval=capabilities.get('collect_interval')\n                if capabilities.get('collect_interval')\n                else CONF.telemetry.performance_collection_interval)\n    task.update(method=constants.TelemetryCollection.PERFORMANCE_TASK_METHOD)\n    db.task_create(context=context, values=task)\n    # Add it to RabbitMQ\n    filters = {'storage_id': storage_id}\n    task_id = db.task_get_all(context, filters=filters)[0].get('id')\n    metrics_rpcapi.TaskAPI().create_perf_job(context, task_id)\n\n\ndef delete_perf_job(context, storage_id):\n    # Delete it from scheduler\n    filters = {'storage_id': storage_id}\n    tasks = db.task_get_all(context, filters=filters)\n    failed_tasks = db.failed_task_get_all(context, filters=filters)\n    for task in tasks:\n        metrics_rpcapi.TaskAPI().remove_job(context, task.get('id'),\n                                            task.get('executor'))\n    for failed_task in failed_tasks:\n        metrics_rpcapi.TaskAPI().remove_failed_job(context,\n                                                   failed_task.get('id'),\n                                                   failed_task.get('executor'))\n\n    # Soft delete tasks\n    db.task_delete_by_storage(context, storage_id)\n    db.failed_task_delete_by_storage(context, storage_id)\n"
  },
  {
    "path": "delfin/task_manager/rpcapi.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nClient side of the task manager RPC API.\n\"\"\"\n\nimport oslo_messaging as messaging\nfrom oslo_config import cfg\n\nfrom delfin import rpc\n\nCONF = cfg.CONF\n\n\nclass TaskAPI(object):\n    \"\"\"Client side of the task rpc API.\n\n    API version history:\n\n        1.0 - Initial version.\n    \"\"\"\n\n    RPC_API_VERSION = '1.0'\n\n    def __init__(self):\n        super(TaskAPI, self).__init__()\n        target = messaging.Target(topic=CONF.delfin_task_topic,\n                                  version=self.RPC_API_VERSION)\n        self.client = rpc.get_client(target, version_cap=self.RPC_API_VERSION)\n\n    def sync_storage_resource(self, context, storage_id, resource_task):\n        call_context = self.client.prepare(version='1.0')\n        return call_context.cast(context,\n                                 'sync_storage_resource',\n                                 storage_id=storage_id,\n                                 resource_task=resource_task)\n\n    def collect_telemetry(self, context, storage_id, telemetry_task, args,\n                          start_time, end_time):\n        call_context = self.client.prepare(version='1.0')\n        return call_context.call(context,\n                                 'collect_telemetry',\n                                 storage_id=storage_id,\n                                 telemetry_task=telemetry_task,\n                                 args=args,\n                                 start_time=start_time,\n                                 end_time=end_time)\n\n    def remove_storage_resource(self, context, storage_id, resource_task):\n        call_context = self.client.prepare(version='1.0')\n        return call_context.cast(context,\n                                 'remove_storage_resource',\n                                 storage_id=storage_id,\n                                 resource_task=resource_task)\n\n    def remove_storage_in_cache(self, context, storage_id):\n        call_context = self.client.prepare(version='1.0', fanout=True)\n        return call_context.cast(context,\n                                 'remove_storage_in_cache',\n                                 storage_id=storage_id)\n\n    def remove_telemetry_instances(self, context, storage_id, telemetry_task):\n        call_context = self.client.prepare(version='1.0', fanout=True)\n        return call_context.cast(context,\n                                 'remove_telemetry_instances',\n                                 storage_id=storage_id,\n                                 telemetry_task=telemetry_task)\n\n    def sync_storage_alerts(self, context, storage_id, query_para):\n        call_context = self.client.prepare(version='1.0')\n        return call_context.cast(context,\n                                 'sync_storage_alerts',\n                                 storage_id=storage_id,\n                                 query_para=query_para)\n\n    def clear_storage_alerts(self, context, storage_id, sequence_number_list):\n        call_context = self.client.prepare(version='1.0')\n        return call_context.call(context,\n                                 'clear_storage_alerts',\n                                 storage_id=storage_id,\n                                 sequence_number_list=sequence_number_list)\n"
  },
  {
    "path": "delfin/task_manager/scheduler/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/task_manager/scheduler/schedule_manager.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\n\nimport six\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom oslo_log import log\nfrom oslo_utils import uuidutils\n\nfrom delfin import context\nfrom delfin import db\nfrom delfin import service\nfrom delfin import utils\nfrom delfin.coordination import ConsistentHashing\nfrom delfin.leader_election.distributor.task_distributor \\\n    import TaskDistributor\nfrom delfin.task_manager import metrics_rpcapi as task_rpcapi\n\nLOG = log.getLogger(__name__)\n\n\n@six.add_metaclass(utils.Singleton)\nclass SchedulerManager(object):\n\n    GROUP_CHANGE_DETECT_INTERVAL_SEC = 30\n\n    def __init__(self, scheduler=None):\n        if not scheduler:\n            scheduler = BackgroundScheduler()\n        self.scheduler = scheduler\n        self.scheduler_started = False\n        self.ctx = context.get_admin_context()\n        self.task_rpcapi = task_rpcapi.TaskAPI()\n        self.watch_job_id = None\n\n    def start(self):\n        \"\"\" Initialise the schedulers for periodic job creation\n        \"\"\"\n        if not self.scheduler_started:\n            self.scheduler.start()\n            self.scheduler_started = True\n\n    def on_node_join(self, event):\n        # A new node joined the group, all the job would be re-distributed.\n        # If the job is already on the node, it would be ignore and would\n        # not be scheduled again\n        LOG.info('Member %s joined the group %s' % (event.member_id,\n                                                    event.group_id))\n        # Get all the jobs\n        filters = {'deleted': False}\n        tasks = db.task_get_all(self.ctx, filters=filters)\n        distributor = TaskDistributor(self.ctx)\n        partitioner = ConsistentHashing()\n        partitioner.start()\n        for task in tasks:\n            # Get the specific executor\n            origin_executor = task['executor']\n            # If the target executor is different from current executor,\n            # remove the job from old executor and add it to new executor\n            new_executor = partitioner.get_task_executor(task['id'])\n            if new_executor != origin_executor:\n                LOG.info('Re-distribute job %s from %s to %s' %\n                         (task['id'], origin_executor, new_executor))\n                self.task_rpcapi.remove_job(self.ctx, task['id'],\n                                            task['executor'])\n            distributor.distribute_new_job(task['id'])\n        failed_tasks = db.failed_task_get_all(self.ctx, filters=filters)\n        for failed_task in failed_tasks:\n            # Get the parent task executor\n            task = db.task_get(self.ctx, failed_task['task_id'])\n            origin_executor = failed_task['executor']\n            new_executor = task['executor']\n            # If the target executor is different from current executor,\n            # remove the job from old executor and add it to new executor\n            if new_executor != origin_executor:\n                LOG.info('Re-distribute failed_job %s from %s to %s' %\n                         (failed_task['id'], origin_executor, new_executor))\n                self.task_rpcapi.remove_failed_job(\n                    self.ctx, failed_task['id'], failed_task['executor'])\n            distributor.distribute_failed_job(failed_task['id'],\n                                              task['executor'])\n        partitioner.stop()\n\n    def on_node_leave(self, event):\n        LOG.info('Member %s left the group %s' % (event.member_id,\n                                                  event.group_id))\n        filters = {'executor': event.member_id.decode('utf-8'),\n                   'deleted': False}\n        re_distribute_tasks = db.task_get_all(self.ctx, filters=filters)\n        distributor = TaskDistributor(self.ctx)\n        for task in re_distribute_tasks:\n            distributor.distribute_new_job(task['id'])\n\n        re_distribute_failed_tasks = db.failed_task_get_all(self.ctx,\n                                                            filters=filters)\n        for failed_task in re_distribute_failed_tasks:\n            task = db.task_get(self.ctx, failed_task['task_id'])\n            executor = task['executor']\n            distributor.distribute_failed_job(failed_task['id'], executor)\n\n    def schedule_boot_jobs(self):\n        # Recover the job in db\n        self.recover_job()\n        self.recover_failed_job()\n        # Start the consumer of job creation message\n        job_generator = service. \\\n            TaskService.create(binary='delfin-task',\n                               topic='JobGenerator',\n                               manager='delfin.'\n                                       'leader_election.'\n                                       'distributor.'\n                                       'perf_job_manager.'\n                                       'PerfJobManager',\n                               coordination=True)\n        service.serve(job_generator)\n        partitioner = ConsistentHashing()\n        partitioner.start()\n        partitioner.register_watcher_func(self.on_node_join,\n                                          self.on_node_leave)\n        self.watch_job_id = uuidutils.generate_uuid()\n        self.scheduler.add_job(partitioner.watch_group_change, 'interval',\n                               seconds=self.GROUP_CHANGE_DETECT_INTERVAL_SEC,\n                               next_run_time=datetime.now(),\n                               id=self.watch_job_id)\n\n    def stop(self):\n        \"\"\"Cleanup periodic jobs\"\"\"\n        if self.watch_job_id:\n            self.scheduler.remove_job(self.watch_job_id)\n\n    def get_scheduler(self):\n        return self.scheduler\n\n    def recover_job(self):\n        filters = {'deleted': False}\n        all_tasks = db.task_get_all(self.ctx, filters=filters)\n        distributor = TaskDistributor(self.ctx)\n        for task in all_tasks:\n            distributor.distribute_new_job(task['id'])\n\n    def recover_failed_job(self):\n        filters = {'deleted': False}\n        all_failed_tasks = db.failed_task_get_all(self.ctx, filters=filters)\n        distributor = TaskDistributor(self.ctx)\n        for failed_task in all_failed_tasks:\n            task = db.task_get(self.ctx, failed_task['task_id'])\n            executor = task['executor']\n            distributor.distribute_failed_job(failed_task['id'], executor)\n"
  },
  {
    "path": "delfin/task_manager/scheduler/schedulers/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/task_manager/scheduler/schedulers/telemetry/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/task_manager/scheduler/schedulers/telemetry/failed_performance_collection_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin.common.constants import TelemetryJobStatus, TelemetryCollection\nfrom delfin.db.sqlalchemy.models import FailedTask\nfrom delfin.db.sqlalchemy.models import Task\nfrom delfin.i18n import _\nfrom delfin.task_manager.scheduler import schedule_manager\nfrom delfin.task_manager.tasks.telemetry import PerformanceCollectionTask\nfrom delfin.task_manager import metrics_rpcapi as metrics_task_rpcapi\n\nLOG = log.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass FailedPerformanceCollectionHandler(object):\n    def __init__(self, ctx, failed_task_id, storage_id, args, job_id,\n                 retry_count, start_time, end_time, executor):\n        self.ctx = ctx\n        self.failed_task_id = failed_task_id\n        self.retry_count = retry_count\n        self.storage_id = storage_id\n        self.job_id = job_id\n        self.args = args\n        self.start_time = start_time\n        self.end_time = end_time\n        self.metrics_task_rpcapi = metrics_task_rpcapi.TaskAPI()\n        self.scheduler_instance = \\\n            schedule_manager.SchedulerManager().get_scheduler()\n        self.result = TelemetryJobStatus.FAILED_JOB_STATUS_INIT\n        self.executor = executor\n\n    @staticmethod\n    def get_instance(ctx, failed_task_id):\n        failed_task = db.failed_task_get(ctx, failed_task_id)\n        task = db.task_get(ctx, failed_task[FailedTask.task_id.name])\n        return FailedPerformanceCollectionHandler(\n            ctx,\n            failed_task[FailedTask.id.name],\n            task[Task.storage_id.name],\n            task[Task.args.name],\n            failed_task[FailedTask.job_id.name],\n            failed_task[FailedTask.retry_count.name],\n            failed_task[FailedTask.start_time.name],\n            failed_task[FailedTask.end_time.name],\n            failed_task[FailedTask.executor.name],\n        )\n\n    def __call__(self):\n        # Upon periodic job callback, if storage is already deleted or soft\n        # deleted,do not proceed with failed performance collection flow\n        try:\n            failed_task = db.failed_task_get(self.ctx, self.failed_task_id)\n            if failed_task[\"deleted\"]:\n                LOG.debug('Storage %s getting deleted, ignoring '\n                          'performance collection cycle for failed task id %s.'\n                          % (self.storage_id, self.failed_task_id))\n                return\n        except exception.FailedTaskNotFound:\n            LOG.debug('Storage %s already deleted, ignoring '\n                      'performance collection cycle for failed task id %s.'\n                      % (self.storage_id, self.failed_task_id))\n            return\n\n        self.retry_count = self.retry_count + 1\n        try:\n            telemetry = PerformanceCollectionTask()\n            status = telemetry.collect(self.ctx, self.storage_id, self.args,\n                                       self.start_time, self.end_time)\n\n            if not status:\n                raise exception.TelemetryTaskExecError()\n        except Exception as e:\n            LOG.error(e)\n            msg = _(\"Failed to collect performance metrics for storage \"\n                    \"id:{0}, reason:{1}\".format(self.storage_id,\n                                                six.text_type(e)))\n            LOG.error(msg)\n        else:\n            LOG.info(\"Successfully completed Performance metrics collection \"\n                     \"for storage id :{0} \".format(self.storage_id))\n            self.result = TelemetryJobStatus.FAILED_JOB_STATUS_SUCCESS\n            self._stop_task()\n            return\n\n        if self.retry_count >= TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT:\n            msg = _(\n                \"Failed to collect performance metrics of task instance \"\n                \"id:{0} for start time:{1} and end time:{2} with \"\n                \"maximum retry. Giving up on \"\n                \"retry\".format(self.failed_task_id, self.start_time,\n                               self.end_time))\n            LOG.error(msg)\n            self._stop_task()\n            return\n\n        self.result = TelemetryJobStatus.FAILED_JOB_STATUS_RETRYING\n        db.failed_task_update(self.ctx, self.failed_task_id,\n                              {FailedTask.retry_count.name: self.retry_count,\n                               FailedTask.result.name: self.result})\n\n    def _stop_task(self):\n        db.failed_task_update(self.ctx, self.failed_task_id,\n                              {FailedTask.retry_count.name: self.retry_count,\n                               FailedTask.result.name: self.result})\n        self.metrics_task_rpcapi.remove_failed_job(self.ctx,\n                                                   self.failed_task_id,\n                                                   self.executor)\n"
  },
  {
    "path": "delfin/task_manager/scheduler/schedulers/telemetry/job_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\n\nimport six\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_utils import uuidutils, importutils\n\nfrom delfin import db\nfrom delfin.common.constants import TelemetryCollection, TelemetryJobStatus\nfrom delfin.exception import TaskNotFound\nfrom delfin.i18n import _\nfrom delfin.task_manager import rpcapi as task_rpcapi\nfrom delfin.task_manager.scheduler import schedule_manager\nfrom delfin.task_manager.tasks.telemetry import PerformanceCollectionTask\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\n\n\nclass JobHandler(object):\n    def __init__(self, ctx, task_id, storage_id, args, interval):\n        # create an object of periodic task scheduler\n        self.ctx = ctx\n        self.task_id = task_id\n        self.storage_id = storage_id\n        self.args = args\n        self.interval = interval\n        self.task_rpcapi = task_rpcapi.TaskAPI()\n        self.scheduler = schedule_manager.SchedulerManager().get_scheduler()\n        self.stopped = False\n        self.job_ids = set()\n\n    @staticmethod\n    def get_instance(ctx, task_id):\n        task = db.task_get(ctx, task_id)\n        return JobHandler(ctx, task_id, task['storage_id'],\n                          task['args'], task['interval'])\n\n    def perform_history_collection(self, start_time, end_time, last_run_time):\n        # Trigger one historic collection to make sure we do not\n        # miss any Data points due to reschedule\n        LOG.debug('Triggering one historic collection for task %s',\n                  self.task_id)\n        try:\n            telemetry = PerformanceCollectionTask()\n            ret = telemetry.collect(self.ctx, self.storage_id, self.args,\n                                    start_time, end_time)\n            LOG.debug('Historic collection performed for task %s with '\n                      'result %s' % (self.task_id, ret))\n            db.task_update(self.ctx, self.task_id,\n                           {'last_run_time': last_run_time})\n        except Exception as e:\n            msg = _(\"Failed to collect performance metrics during history \"\n                    \"collection for storage id:{0}, reason:{1}\"\n                    .format(self.storage_id, six.text_type(e)))\n            LOG.error(msg)\n\n    def schedule_job(self, task_id):\n\n        if self.stopped:\n            # If Job is stopped return immediately\n            return\n\n        LOG.info(\"JobHandler received A job %s to schedule\" % task_id)\n        job = db.task_get(self.ctx, task_id)\n        # Check delete status of the task\n        deleted = job['deleted']\n        if deleted:\n            return\n        collection_class = importutils.import_class(\n            job['method'])\n        instance = collection_class.get_instance(self.ctx, self.task_id)\n        current_time = int(datetime.now().timestamp())\n        last_run_time = current_time\n        next_collection_time = last_run_time + job['interval']\n        job_id = uuidutils.generate_uuid()\n        next_collection_time = datetime \\\n            .fromtimestamp(next_collection_time) \\\n            .strftime('%Y-%m-%d %H:%M:%S')\n\n        existing_job_id = job['job_id']\n\n        scheduler_job = self.scheduler.get_job(existing_job_id)\n\n        if not (existing_job_id and scheduler_job):\n            LOG.info('JobHandler scheduling a new job')\n            self.scheduler.add_job(\n                instance, 'interval', seconds=job['interval'],\n                next_run_time=next_collection_time, id=job_id,\n                misfire_grace_time=int(job['interval'] / 2))\n\n            update_task_dict = {'job_id': job_id}\n            db.task_update(self.ctx, self.task_id, update_task_dict)\n            self.job_ids.add(job_id)\n            LOG.info('Periodic collection tasks scheduled for for job id: '\n                     '%s ' % self.task_id)\n\n            # Check if historic collection is needed for this task.\n            # If the last run time is already set, adjust start_time based on\n            # last run time or history_on_reschedule which is smaller\n            # If jod id is created but last run time is not yet set, then\n            # adjust start_time based on interval or history_on_reschedule\n            # whichever is smaller\n\n            end_time = current_time * 1000\n            # Maximum supported history duration on restart\n            history_on_reschedule = CONF.telemetry. \\\n                performance_history_on_reschedule\n            if job['last_run_time']:\n                start_time = job['last_run_time'] * 1000 \\\n                    if current_time - job['last_run_time'] < \\\n                    history_on_reschedule \\\n                    else (end_time - history_on_reschedule * 1000)\n                self.perform_history_collection(start_time, end_time,\n                                                last_run_time)\n            elif existing_job_id:\n                interval_in_sec = job['interval']\n                start_time = (end_time - interval_in_sec * 1000) \\\n                    if interval_in_sec < history_on_reschedule \\\n                    else (end_time - history_on_reschedule * 1000)\n                self.perform_history_collection(start_time, end_time,\n                                                last_run_time)\n        else:\n            LOG.info('Job already exists with this scheduler')\n\n    def stop(self):\n        self.stopped = True\n        for job_id in self.job_ids.copy():\n            self.remove_scheduled_job(job_id)\n        LOG.info(\"Stopping telemetry jobs\")\n\n    def remove_scheduled_job(self, job_id):\n        if job_id in self.job_ids:\n            self.job_ids.remove(job_id)\n        if job_id and self.scheduler.get_job(job_id):\n            self.scheduler.remove_job(job_id)\n\n    def remove_job(self, task_id):\n        try:\n            LOG.info(\"Received job %s to remove\", task_id)\n            job = db.task_get(self.ctx, task_id)\n            job_id = job['job_id']\n            self.remove_scheduled_job(job_id)\n        except Exception as e:\n            LOG.error(\"Failed to remove periodic scheduling job , reason: %s.\",\n                      six.text_type(e))\n\n\nclass FailedJobHandler(object):\n    def __init__(self, ctx):\n        # create an object of periodic failed task scheduler\n        self.scheduler = schedule_manager.SchedulerManager().get_scheduler()\n        self.ctx = ctx\n        self.stopped = False\n        self.job_ids = set()\n\n    @staticmethod\n    def get_instance(ctx, failed_task_id):\n        return FailedJobHandler(ctx)\n\n    def schedule_failed_job(self, failed_task_id):\n\n        if self.stopped:\n            return\n\n        try:\n            job = db.failed_task_get(self.ctx, failed_task_id)\n            retry_count = job['retry_count']\n            result = job['result']\n            job_id = job['job_id']\n            if retry_count >= \\\n                    TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT or \\\n                    result == TelemetryJobStatus.FAILED_JOB_STATUS_SUCCESS:\n                LOG.info(\"Exiting Failure task processing for task [%d] \"\n                         \"with result [%s] and retry count [%d] \"\n                         % (job['id'], result, retry_count))\n                self._teardown_task(self.ctx, job['id'], job_id)\n                return\n            # If job already scheduled, skip\n            if job_id and self.scheduler.get_job(job_id):\n                return\n\n            try:\n                db.task_get(self.ctx, job['task_id'])\n            except TaskNotFound as e:\n                LOG.info(\"Removing failed telemetry job as parent job \"\n                         \"do not exist: %s\", six.text_type(e))\n                # tear down if original task is not available\n                self._teardown_task(self.ctx, job['id'],\n                                    job_id)\n                return\n\n            if not (job_id and self.scheduler.get_job(job_id)):\n                job_id = uuidutils.generate_uuid()\n                db.failed_task_update(self.ctx, job['id'],\n                                      {'job_id': job_id})\n\n                collection_class = importutils.import_class(\n                    job['method'])\n                instance = \\\n                    collection_class.get_instance(self.ctx, job['id'])\n                self.scheduler.add_job(\n                    instance, 'interval',\n                    seconds=job['interval'],\n                    next_run_time=datetime.now(), id=job_id,\n                    misfire_grace_time=int(job['interval'] / 2))\n                self.job_ids.add(job_id)\n\n        except Exception as e:\n            LOG.error(\"Failed to schedule retry tasks for performance \"\n                      \"collection, reason: %s\", six.text_type(e))\n        else:\n            LOG.info(\"Schedule collection completed\")\n\n    def _teardown_task(self, ctx, failed_task_id, job_id):\n        db.failed_task_delete(ctx, failed_task_id)\n        self.remove_scheduled_job(job_id)\n\n    def remove_scheduled_job(self, job_id):\n        if job_id in self.job_ids:\n            self.job_ids.remove(job_id)\n        if job_id and self.scheduler.get_job(job_id):\n            self.scheduler.remove_job(job_id)\n\n    def stop(self):\n        self.stopped = True\n        for job_id in self.job_ids.copy():\n            self.remove_scheduled_job(job_id)\n\n    def remove_failed_job(self, failed_task_id):\n        try:\n            LOG.info(\"Received failed job %s to remove\", failed_task_id)\n            job = db.failed_task_get(self.ctx, failed_task_id)\n            job_id = job['job_id']\n            self.remove_scheduled_job(job_id)\n            db.failed_task_delete(self.ctx, job['id'])\n            LOG.info(\"Removed failed_task entry  %s \", job['id'])\n        except Exception as e:\n            LOG.error(\"Failed to remove periodic scheduling job , reason: %s.\",\n                      six.text_type(e))\n\n    @classmethod\n    def job_interval(cls):\n        return TelemetryCollection.FAILED_JOB_SCHEDULE_INTERVAL\n"
  },
  {
    "path": "delfin/task_manager/scheduler/schedulers/telemetry/performance_collection_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\n\nimport six\nfrom oslo_config import cfg\n\n\nfrom oslo_log import log\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin.common.constants import TelemetryCollection\nfrom delfin.db.sqlalchemy.models import FailedTask\nfrom delfin.drivers import api as driverapi\nfrom delfin.task_manager import metrics_rpcapi as metrics_task_rpcapi\nfrom delfin.task_manager.scheduler import schedule_manager\nfrom delfin.task_manager.scheduler.schedulers.telemetry. \\\n    failed_performance_collection_handler import \\\n    FailedPerformanceCollectionHandler\nfrom delfin.task_manager.tasks.telemetry import PerformanceCollectionTask\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass PerformanceCollectionHandler(object):\n    def __init__(self, ctx, task_id, storage_id, args, interval, executor):\n        self.ctx = ctx\n        self.task_id = task_id\n        self.storage_id = storage_id\n        self.args = args\n        self.interval = interval\n        self.metric_task_rpcapi = metrics_task_rpcapi.TaskAPI()\n        self.driver_api = driverapi.API()\n        self.executor = executor\n        self.scheduler = schedule_manager.SchedulerManager().get_scheduler()\n\n    @staticmethod\n    def get_instance(ctx, task_id):\n        task = db.task_get(ctx, task_id)\n        return PerformanceCollectionHandler(ctx, task_id, task['storage_id'],\n                                            task['args'], task['interval'],\n                                            task['executor'])\n\n    def __call__(self):\n        # Upon periodic job callback, if storage is already deleted or soft\n        # deleted,do not proceed with performance collection flow\n        try:\n            task = db.task_get(self.ctx, self.task_id)\n            if task[\"deleted\"]:\n                LOG.debug('Storage %s getting deleted, ignoring performance '\n                          'collection cycle for task id %s.'\n                          % (self.storage_id, self.task_id))\n                return\n        except exception.TaskNotFound:\n            LOG.debug('Storage %s already deleted, ignoring performance '\n                      'collection cycle for task id %s.'\n                      % (self.storage_id, self.task_id))\n            return\n\n        # Handles performance collection from driver and dispatch\n        start_time = None\n        end_time = None\n        try:\n            LOG.debug('Collecting performance metrics for task id: %s'\n                      % self.task_id)\n            current_time = int(datetime.now().timestamp())\n\n            # Times are epoch time in milliseconds\n            overlap = CONF.telemetry. \\\n                performance_timestamp_overlap\n            end_time = current_time * 1000\n            start_time = end_time - (self.interval * 1000) - (overlap * 1000)\n            telemetry = PerformanceCollectionTask()\n            status = telemetry.collect(self.ctx, self.storage_id, self.args,\n                                       start_time, end_time)\n\n            db.task_update(self.ctx, self.task_id,\n                           {'last_run_time': current_time})\n\n            if not status:\n                raise exception.TelemetryTaskExecError()\n        except Exception as e:\n            LOG.error(\"Failed to collect performance metrics for \"\n                      \"task id :{0}, reason:{1}\".format(self.task_id,\n                                                        six.text_type(e)))\n            self._handle_task_failure(start_time, end_time)\n        else:\n            LOG.debug(\"Performance collection done for storage id :{0}\"\n                      \",task id :{1} and interval(in sec):{2}\"\n                      .format(self.storage_id, self.task_id, self.interval))\n\n    def _handle_task_failure(self, start_time, end_time):\n        failed_task_interval = TelemetryCollection.FAILED_JOB_SCHEDULE_INTERVAL\n\n        try:\n            # Fetch driver's capability for performance metric retention window\n            # If driver supports it and if it is within collection  range,\n            # consider it for failed task scheduling\n            capabilities = self.driver_api.get_capabilities(self.ctx,\n                                                            self.storage_id)\n            performance_metric_retention_window \\\n                = capabilities.get('performance_metric_retention_window')\n\n            if capabilities.get('failed_job_collect_interval'):\n                failed_task_interval = \\\n                    capabilities.get('failed_job_collect_interval')\n\n            if performance_metric_retention_window:\n                collection_window = performance_metric_retention_window \\\n                    if performance_metric_retention_window <= CONF.telemetry \\\n                    .max_failed_task_retry_window \\\n                    else CONF.telemetry.max_failed_task_retry_window\n                failed_task_interval = collection_window / TelemetryCollection\\\n                    .MAX_FAILED_JOB_RETRY_COUNT\n        except Exception as e:\n            LOG.error(\"Failed to get driver capabilities during failed task \"\n                      \"scheduling for storage id :{0}, reason:{1}\"\n                      .format(self.storage_id, six.text_type(e)))\n\n        failed_task = {FailedTask.storage_id.name: self.storage_id,\n                       FailedTask.task_id.name: self.task_id,\n                       FailedTask.interval.name: failed_task_interval,\n                       FailedTask.end_time.name: end_time,\n                       FailedTask.start_time.name: start_time,\n                       FailedTask.method.name:\n                           FailedPerformanceCollectionHandler.__module__ +\n                           '.' + FailedPerformanceCollectionHandler.__name__,\n                       FailedTask.retry_count.name: 0,\n                       FailedTask.executor.name: self.executor}\n        failed_task = db.failed_task_create(self.ctx, failed_task)\n        self.metric_task_rpcapi.assign_failed_job(self.ctx,\n                                                  failed_task['id'],\n                                                  failed_task['executor'])\n"
  },
  {
    "path": "delfin/task_manager/subprocess_manager.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nSubprocess metrics manager for metric collection tasks**\n\"\"\"\n\nfrom oslo_log import log\nfrom oslo_config import cfg\n\nfrom delfin.coordination import GroupMembership\nfrom delfin import manager\nfrom delfin.task_manager.scheduler import schedule_manager\nfrom delfin.task_manager.scheduler.schedulers.telemetry.job_handler \\\n    import FailedJobHandler\nfrom delfin.task_manager.scheduler.schedulers.telemetry.job_handler \\\n    import JobHandler\n\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\n\n\nclass SubprocessManager(manager.Manager):\n    \"\"\"manage periodical collection tasks in subprocesses\"\"\"\n\n    RPC_API_VERSION = '1.0'\n\n    def __init__(self, service_name=None, *args, **kwargs):\n        super(SubprocessManager, self).__init__(*args, **kwargs)\n\n    def init_scheduler(self, topic, host):\n        scheduler = schedule_manager.SchedulerManager()\n        scheduler.start()\n        watcher = GroupMembership(topic)\n        watcher.start()\n        watcher.join_group(host)\n\n    def assign_job_local(self, context, task_id):\n        instance = JobHandler.get_instance(context, task_id)\n        instance.schedule_job(task_id)\n\n    def remove_job_local(self, context, task_id):\n        instance = JobHandler.get_instance(context, task_id)\n        instance.remove_job(task_id)\n\n    def assign_failed_job_local(self, context, failed_task_id):\n        instance = FailedJobHandler.get_instance(context, failed_task_id)\n        instance.schedule_failed_job(failed_task_id)\n\n    def remove_failed_job_local(self, context, failed_task_id):\n        instance = FailedJobHandler.get_instance(context, failed_task_id)\n        instance.remove_failed_job(failed_task_id)\n"
  },
  {
    "path": "delfin/task_manager/subprocess_rpcapi.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nClient side of the subprocess metrics collection manager RPC API.\n\"\"\"\n\nimport oslo_messaging as messaging\nfrom oslo_config import cfg\n\nfrom delfin import rpc\n\nCONF = cfg.CONF\n\n\nclass SubprocessAPI(object):\n    \"\"\"Client side of the subprocess metrics manager collection rpc API.\n\n    API version history:\n\n        1.0 - Initial version.\n    \"\"\"\n\n    RPC_API_VERSION = '1.0'\n\n    def __init__(self):\n        super(SubprocessAPI, self).__init__()\n        self.target = messaging.Target(topic=CONF.host,\n                                       version=self.RPC_API_VERSION)\n        self.client = rpc.get_client(self.target,\n                                     version_cap=self.RPC_API_VERSION)\n\n    def get_client(self, topic):\n        target = messaging.Target(topic=topic,\n                                  version=self.RPC_API_VERSION)\n        return rpc.get_client(target, version_cap=self.RPC_API_VERSION)\n\n    def assign_job_local(self, context, task_id, executor):\n        rpc_client = self.get_client(str(executor))\n        call_context = rpc_client.prepare(topic=str(executor), version='1.0',\n                                          fanout=False)\n        return call_context.cast(context, 'assign_job_local',\n                                 task_id=task_id)\n\n    def remove_job_local(self, context, task_id, executor):\n        rpc_client = self.get_client(str(executor))\n        call_context = rpc_client.prepare(topic=str(executor), version='1.0',\n                                          fanout=False)\n        return call_context.cast(context, 'remove_job_local',\n                                 task_id=task_id)\n\n    def assign_failed_job_local(self, context, failed_task_id, executor):\n        rpc_client = self.get_client(str(executor))\n        call_context = rpc_client.prepare(topic=str(executor), version='1.0',\n                                          fanout=False)\n        return call_context.cast(context, 'assign_failed_job_local',\n                                 failed_task_id=failed_task_id)\n\n    def remove_failed_job_local(self, context, failed_task_id, executor):\n        rpc_client = self.get_client(str(executor))\n        call_context = rpc_client.prepare(topic=str(executor), version='1.0',\n                                          fanout=False)\n        return call_context.cast(context, 'remove_failed_job_local',\n                                 failed_task_id=failed_task_id)\n"
  },
  {
    "path": "delfin/task_manager/tasks/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/task_manager/tasks/alerts.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\nfrom oslo_log import log\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin.common import alert_util\nfrom delfin.drivers import api as driver_manager\nfrom delfin.exporter import base_exporter\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass AlertSyncTask(object):\n\n    def __init__(self):\n        self.driver_manager = driver_manager.API()\n        self.alert_export_manager = base_exporter.AlertExporterManager()\n\n    def sync_alerts(self, ctx, storage_id, query_para):\n        \"\"\" Syncs all alerts from storage side to exporter \"\"\"\n        LOG.info('Syncing alerts for storage id:{0}, query_para: {1}'.format(\n            storage_id, query_para))\n        try:\n            storage = db.storage_get(ctx, storage_id)\n\n            current_alert_list = self.driver_manager.list_alerts(ctx,\n                                                                 storage_id,\n                                                                 query_para)\n            if not current_alert_list:\n                # No alerts to sync\n                LOG.info('No alerts to sync from storage device for '\n                         'storage id:{0}'.format(storage_id))\n                return\n\n            for alert in current_alert_list:\n                alert_util.fill_storage_attributes(alert, storage)\n            self.alert_export_manager.dispatch(ctx, current_alert_list)\n            LOG.info('Syncing storage alerts successful for storage id:{0}'\n                     .format(storage_id))\n        except Exception as e:\n            msg = _('Failed to sync alerts from storage device: {0}'\n                    .format(six.text_type(e)))\n            LOG.error(msg)\n\n    def clear_alerts(self, ctx, storage_id, sequence_number_list):\n        \"\"\" Clear alert from storage \"\"\"\n\n        LOG.info('Clear alert for storage id:{0}'.format(storage_id))\n        sequence_number_list = sequence_number_list or []\n        failure_list = []\n        for sequence_number in sequence_number_list:\n            try:\n                self.driver_manager.clear_alert(ctx, storage_id,\n                                                sequence_number)\n            except (exception.AccessInfoNotFound,\n                    exception.StorageNotFound) as e:\n                LOG.warning(\"Ignore the situation: %s\", e.msg)\n            except Exception as e:\n                LOG.error(\"Failed to clear alert with sequence number: %s \"\n                          \"for storage: %s, reason: %s.\",\n                          sequence_number, storage_id, six.text_type(e))\n                failure_list.append(sequence_number)\n        return failure_list\n"
  },
  {
    "path": "delfin/task_manager/tasks/resources.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\n\nimport decorator\nfrom oslo_log import log\n\nfrom delfin import coordination\nfrom delfin import db\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.drivers import api as driverapi\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\ndef set_synced_after():\n    @decorator.decorator\n    def _set_synced_after(func, *args, **kwargs):\n        call_args = inspect.getcallargs(func, *args, **kwargs)\n        self = call_args['self']\n        sync_result = constants.ResourceSync.SUCCEED\n        ret = None\n        try:\n            ret = func(*args, **kwargs)\n        except Exception:\n            sync_result = constants.ResourceSync.FAILED\n        lock = coordination.Lock(self.storage_id)\n        with lock:\n            try:\n                storage = db.storage_get(self.context, self.storage_id)\n            except exception.StorageNotFound:\n                LOG.warning('Storage %s not found when set synced'\n                            % self.storage_id)\n            else:\n                # One sync task done, sync status minus 1\n                # When sync status get to 0\n                # means all the sync tasks are completed\n                if storage['sync_status'] != constants.SyncStatus.SYNCED:\n                    storage['sync_status'] -= sync_result\n                    db.storage_update(self.context, self.storage_id,\n                                      {'sync_status': storage['sync_status']})\n\n        return ret\n\n    return _set_synced_after\n\n\ndef check_deleted():\n    @decorator.decorator\n    def _check_deleted(func, *args, **kwargs):\n        call_args = inspect.getcallargs(func, *args, **kwargs)\n        self = call_args['self']\n        ret = func(*args, **kwargs)\n        # When context.read_deleted is 'yes', db.storage_get would\n        # only get the storage whose 'deleted' tag is not default value\n        self.context.read_deleted = 'yes'\n        try:\n            db.storage_get(self.context, self.storage_id)\n        except exception.StorageNotFound:\n            LOG.debug('Storage %s not found when checking deleted'\n                      % self.storage_id)\n        else:\n            self.remove()\n        self.context.read_deleted = 'no'\n        return ret\n\n    return _check_deleted\n\n\nclass StorageResourceTask(object):\n    NATIVE_RESOURCE_ID = None\n\n    def __init__(self, context, storage_id):\n        self.storage_id = storage_id\n        self.context = context\n        self.driver_api = driverapi.API()\n\n    def _classify_resources(self, storage_resources, db_resources, key):\n        \"\"\"\n        :param storage_resources:\n        :param db_resources:\n        :return: it will return three list add_list: the items present in\n        storage but not in current_db. update_list:the items present in\n        storage and in current_db. delete_id_list:the items present not in\n        storage but present in current_db.\n        \"\"\"\n        original_ids_in_db = [resource[key]\n                              for resource in db_resources]\n        delete_id_list = [resource['id'] for resource in db_resources]\n        add_list = []\n        update_list = []\n\n        for resource in storage_resources:\n            if resource[key] in original_ids_in_db:\n                resource['id'] = db_resources[original_ids_in_db.index(\n                    resource[key])]['id']\n                delete_id_list.remove(resource['id'])\n                update_list.append(resource)\n            else:\n                add_list.append(resource)\n\n        return add_list, update_list, delete_id_list\n\n    @check_deleted()\n    @set_synced_after()\n    def sync(self):\n        \"\"\"\n        Synchronizing device resources data to database.\n        \"\"\"\n        LOG.info('{} sync for storage(id={}) start'.format(\n            self.__class__.__name__, self.storage_id))\n        try:\n            # list the storage resources from driver and database\n            storage_resources = self.driver_list_resources()\n            db_resources = self.db_resource_get_all(\n                {'storage_id': self.storage_id})\n\n            add_list, update_list, delete_id_list = self._classify_resources(\n                storage_resources, db_resources, self.NATIVE_RESOURCE_ID)\n\n            if delete_id_list:\n                self.db_resources_delete(delete_id_list)\n\n            if update_list:\n                self.db_resources_update(update_list)\n\n            if add_list:\n                self.db_resources_create(add_list)\n        except NotImplementedError:\n            # Ignore this exception because driver may not support it.\n            pass\n        except Exception as e:\n            msg = _('{} sync for storage(id={}) failed: {}'.format(\n                self.__class__.__name__, self.storage_id, e))\n            LOG.error(msg)\n            raise\n        else:\n            LOG.info('{} sync for storage(id={}) successful'.format(\n                self.__class__.__name__, self.storage_id))\n\n    def remove(self):\n        LOG.info('{} remove for storage(id={})'.format(\n            self.__class__.__name__, self.storage_id))\n        self.db_resource_delete_by_storage()\n\n    def driver_list_resources(self):\n        raise NotImplementedError(\n            'Resource task API driver_list_resources() is not implemented')\n\n    def db_resource_get_all(self, filters):\n        raise NotImplementedError(\n            'Resource task API db_resource_get_all() is not implemented')\n\n    def db_resources_delete(self, delete_id_list):\n        raise NotImplementedError(\n            'Resource task API db_resources_delete() is not implemented')\n\n    def db_resources_update(self, update_list):\n        raise NotImplementedError(\n            'Resource task API db_resources_update() is not implemented')\n\n    def db_resources_create(self, add_list):\n        raise NotImplementedError(\n            'Resource task API db_resources_create() is not implemented')\n\n    def db_resource_delete_by_storage(self):\n        raise NotImplementedError(\n            'Resource task API db_resource_delete_by_storage() '\n            'is not implemented')\n\n\nclass StorageDeviceTask(StorageResourceTask):\n    def __init__(self, context, storage_id):\n        super(StorageDeviceTask, self).__init__(context, storage_id)\n\n    @check_deleted()\n    @set_synced_after()\n    def sync(self):\n        \"\"\"\n        :return:\n        \"\"\"\n        LOG.info('Syncing storage device for storage id:{0}'.format(\n            self.storage_id))\n        try:\n            storage = self.driver_api.get_storage(self.context,\n                                                  self.storage_id)\n\n            db.storage_update(self.context, self.storage_id, storage)\n        except Exception as e:\n            msg = _('Failed to update storage entry in DB: {0}'\n                    .format(e))\n            LOG.error(msg)\n            raise\n        else:\n            LOG.info(\"Syncing storage successful!!!\")\n\n    def remove(self):\n        LOG.info('Remove storage device for storage id:{0}'\n                 .format(self.storage_id))\n        try:\n            db.storage_delete(self.context, self.storage_id)\n            db.access_info_delete(self.context, self.storage_id)\n            db.alert_source_delete(self.context, self.storage_id)\n        except Exception as e:\n            LOG.error('Failed to update storage entry in DB: {0}'.format(e))\n\n\nclass StoragePoolTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_storage_pool_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_storage_pools(\n            self.context, self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.storage_pool_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.storage_pools_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.storage_pools_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.storage_pools_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.storage_pool_delete_by_storage(self.context, self.storage_id)\n\n\nclass StorageVolumeTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_volume_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_volumes(self.context, self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.volume_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.volumes_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.volumes_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.volumes_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.volume_delete_by_storage(self.context, self.storage_id)\n\n\nclass StorageControllerTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_controller_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_controllers(self.context, self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.controller_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.controllers_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.controllers_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.controllers_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.controller_delete_by_storage(self.context, self.storage_id)\n\n\nclass StoragePortTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_port_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_ports(self.context, self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.port_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.ports_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.ports_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.ports_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.port_delete_by_storage(self.context, self.storage_id)\n\n\nclass StorageDiskTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_disk_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_disks(self.context, self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.disk_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.disks_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.disks_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.disks_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.disk_delete_by_storage(self.context, self.storage_id)\n\n\nclass StorageQuotaTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_quota_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_quotas(self.context, self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.quota_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.quotas_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.quotas_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.quotas_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.quota_delete_by_storage(self.context, self.storage_id)\n\n\nclass StorageFilesystemTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_filesystem_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_filesystems(self.context, self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.filesystem_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.filesystems_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.filesystems_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.filesystems_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.filesystem_delete_by_storage(self.context, self.storage_id)\n\n\nclass StorageQtreeTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_qtree_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_qtrees(self.context, self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.qtree_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.qtrees_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.qtrees_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.qtrees_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.qtree_delete_by_storage(self.context, self.storage_id)\n\n\nclass StorageShareTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_share_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_shares(self.context, self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.share_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.shares_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.shares_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.shares_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.share_delete_by_storage(self.context, self.storage_id)\n\n\nclass StorageHostInitiatorTask(StorageResourceTask):\n    def __init__(self, context, storage_id):\n        super(StorageHostInitiatorTask, self).__init__(context, storage_id)\n\n    @check_deleted()\n    @set_synced_after()\n    def sync(self):\n        \"\"\"\n        :return:\n        \"\"\"\n        LOG.info('Syncing storage host initiator for storage id:{0}'\n                 .format(self.storage_id))\n        try:\n            # Collect the storage host initiator list from driver and database\n            storage_host_initiators = self.driver_api \\\n                .list_storage_host_initiators(self.context, self.storage_id)\n            if storage_host_initiators:\n                db.storage_host_initiators_delete_by_storage(\n                    self.context, self.storage_id)\n                db.storage_host_initiators_create(\n                    self.context, storage_host_initiators)\n                LOG.info('Building storage host initiator successful for '\n                         'storage id:{0}'.format(self.storage_id))\n        except AttributeError as e:\n            LOG.error(e)\n        except NotImplementedError:\n            # Ignore this exception because driver may not support it.\n            pass\n        except Exception as e:\n            msg = _('Failed to sync storage host initiators entry '\n                    'in DB: {0}'.format(e))\n            LOG.error(msg)\n        else:\n            LOG.info(\"Syncing storage host initiators successful!!!\")\n\n    def remove(self):\n        LOG.info('Remove storage host initiators for storage id:{0}'\n                 .format(self.storage_id))\n        db.storage_host_initiators_delete_by_storage(self.context,\n                                                     self.storage_id)\n\n\nclass StorageHostTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_storage_host_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_storage_hosts(self.context,\n                                                  self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.storage_hosts_get_all(self.context,\n                                        filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.storage_hosts_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.storage_hosts_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.storage_hosts_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.storage_hosts_delete_by_storage(self.context,\n                                                  self.storage_id)\n\n\nclass StorageHostGroupTask(StorageResourceTask):\n    def __init__(self, context, storage_id):\n        super(StorageHostGroupTask, self).__init__(context, storage_id)\n\n    @check_deleted()\n    @set_synced_after()\n    def sync(self):\n        \"\"\"\n        :return:\n        \"\"\"\n        LOG.info('Syncing storage host group for storage id:{0}'\n                 .format(self.storage_id))\n        try:\n            # Collect the storage host group list from driver and database.\n            # Build relation between host grp and host to be handled here.\n            storage_hg_obj = self.driver_api \\\n                .list_storage_host_groups(self.context, self.storage_id)\n            storage_host_groups = storage_hg_obj['storage_host_groups']\n            storage_host_rels = storage_hg_obj['storage_host_grp_host_rels']\n            if storage_host_groups:\n                db.storage_host_grp_host_rels_delete_by_storage(\n                    self.context, self.storage_id)\n                db.storage_host_grp_host_rels_create(\n                    self.context, storage_host_rels)\n                LOG.info('Building host group relations successful for '\n                         'storage id:{0}'.format(self.storage_id))\n\n            db_storage_host_groups = db.storage_host_groups_get_all(\n                self.context, filters={\"storage_id\": self.storage_id})\n\n            add_list, update_list, delete_id_list = self._classify_resources(\n                storage_host_groups, db_storage_host_groups,\n                'native_storage_host_group_id')\n\n            LOG.debug('###StorageHostGroupTask for {0}:add={1},delete={2},'\n                      'update={3}'.format(self.storage_id,\n                                          len(add_list),\n                                          len(delete_id_list),\n                                          len(update_list)))\n            if delete_id_list:\n                db.storage_host_groups_delete(self.context, delete_id_list)\n\n            if update_list:\n                db.storage_host_groups_update(self.context, update_list)\n\n            if add_list:\n                db.storage_host_groups_create(self.context, add_list)\n\n        except AttributeError as e:\n            LOG.error(e)\n        except NotImplementedError:\n            # Ignore this exception because driver may not support it.\n            pass\n        except Exception as e:\n            msg = _('Failed to sync storage host groups entry in DB: {0}'\n                    .format(e))\n            LOG.error(msg)\n        else:\n            LOG.info(\"Syncing storage host groups successful!!!\")\n\n    def remove(self):\n        LOG.info('Remove storage host groups for storage id:{0}'\n                 .format(self.storage_id))\n        db.storage_host_grp_host_rels_delete_by_storage(self.context,\n                                                        self.storage_id)\n        db.storage_host_groups_delete_by_storage(self.context, self.storage_id)\n\n\nclass PortGroupTask(StorageResourceTask):\n    def __init__(self, context, storage_id):\n        super(PortGroupTask, self).__init__(context, storage_id)\n\n    @check_deleted()\n    @set_synced_after()\n    def sync(self):\n        \"\"\"\n        :return:\n        \"\"\"\n        LOG.info('Syncing port group for storage id:{0}'\n                 .format(self.storage_id))\n        try:\n            # Collect the port groups from driver and database\n            # Build relation between port grp and port to be handled here.\n            port_groups_obj = self.driver_api \\\n                .list_port_groups(self.context, self.storage_id)\n            port_groups = port_groups_obj['port_groups']\n            port_group_relation_list = port_groups_obj['port_grp_port_rels']\n            if port_groups:\n                db.port_grp_port_rels_delete_by_storage(\n                    self.context, self.storage_id)\n                db.port_grp_port_rels_create(\n                    self.context, port_group_relation_list)\n                LOG.info('Building port group relations successful for '\n                         'storage id:{0}'.format(self.storage_id))\n\n            db_port_groups = db.port_groups_get_all(\n                self.context, filters={\"storage_id\": self.storage_id})\n\n            add_list, update_list, delete_id_list = self._classify_resources(\n                port_groups, db_port_groups, 'native_port_group_id')\n\n            LOG.debug('###PortGroupTask for {0}:add={1},delete={2},'\n                      'update={3}'.format(self.storage_id,\n                                          len(add_list),\n                                          len(delete_id_list),\n                                          len(update_list)))\n            if delete_id_list:\n                db.port_groups_delete(self.context, delete_id_list)\n\n            if update_list:\n                db.port_groups_update(self.context, update_list)\n\n            if add_list:\n                db.port_groups_create(self.context, add_list)\n\n        except AttributeError as e:\n            LOG.error(e)\n        except NotImplementedError:\n            # Ignore this exception because driver may not support it.\n            pass\n        except Exception as e:\n            msg = _('Failed to sync port groups entry in DB: {0}'.format(e))\n            LOG.error(msg)\n        else:\n            LOG.info(\"Syncing port groups successful!!!\")\n\n    def remove(self):\n        LOG.info('Remove port groups for storage id:{0}'\n                 .format(self.storage_id))\n        db.port_grp_port_rels_delete_by_storage(self.context,\n                                                self.storage_id)\n        db.port_groups_delete_by_storage(self.context, self.storage_id)\n\n\nclass VolumeGroupTask(StorageResourceTask):\n    def __init__(self, context, storage_id):\n        super(VolumeGroupTask, self).__init__(context, storage_id)\n\n    @check_deleted()\n    @set_synced_after()\n    def sync(self):\n        \"\"\"\n        :return:\n        \"\"\"\n        LOG.info('Syncing volume group for storage id:{0}'\n                 .format(self.storage_id))\n        try:\n            # Collect the volume groups from driver and database\n            # Build relation between volume grp and volume to be handled here.\n            volume_groups_obj = self.driver_api \\\n                .list_volume_groups(self.context, self.storage_id)\n            volume_groups = volume_groups_obj['volume_groups']\n            volume_groups_rels = volume_groups_obj['vol_grp_vol_rels']\n            if volume_groups:\n                db.vol_grp_vol_rels_delete_by_storage(\n                    self.context, self.storage_id)\n                db.vol_grp_vol_rels_create(self.context, volume_groups_rels)\n                LOG.info('Building volume group relations successful for '\n                         'storage id:{0}'.format(self.storage_id))\n\n            db_volume_groups = db.volume_groups_get_all(\n                self.context, filters={\"storage_id\": self.storage_id})\n\n            add_list, update_list, delete_id_list = self._classify_resources(\n                volume_groups, db_volume_groups, 'native_volume_group_id')\n\n            LOG.debug('###VolumeGroupTask for {0}:add={1},delete={2},'\n                      'update={3}'.format(self.storage_id,\n                                          len(add_list),\n                                          len(delete_id_list),\n                                          len(update_list)))\n            if delete_id_list:\n                db.volume_groups_delete(self.context, delete_id_list)\n\n            if update_list:\n                db.volume_groups_update(self.context, update_list)\n\n            if add_list:\n                db.volume_groups_create(self.context, add_list)\n\n        except AttributeError as e:\n            LOG.error(e)\n        except NotImplementedError:\n            # Ignore this exception because driver may not support it.\n            pass\n        except Exception as e:\n            msg = _('Failed to sync volume groups entry in DB: {0}'.format(e))\n            LOG.error(msg)\n        else:\n            LOG.info(\"Syncing volume groups successful!!!\")\n\n    def remove(self):\n        LOG.info('Remove volume groups for storage id:{0}'\n                 .format(self.storage_id))\n        db.vol_grp_vol_rels_delete_by_storage(self.context, self.storage_id)\n        db.volume_groups_delete_by_storage(self.context, self.storage_id)\n\n\nclass MaskingViewTask(StorageResourceTask):\n    NATIVE_RESOURCE_ID = 'native_masking_view_id'\n\n    def driver_list_resources(self):\n        return self.driver_api.list_masking_views(self.context,\n                                                  self.storage_id)\n\n    def db_resource_get_all(self, filters):\n        return db.masking_views_get_all(self.context, filters=filters)\n\n    def db_resources_delete(self, delete_id_list):\n        return db.masking_views_delete(self.context, delete_id_list)\n\n    def db_resources_update(self, update_list):\n        return db.masking_views_update(self.context, update_list)\n\n    def db_resources_create(self, add_list):\n        return db.masking_views_create(self.context, add_list)\n\n    def db_resource_delete_by_storage(self):\n        return db.masking_views_delete_by_storage(self.context,\n                                                  self.storage_id)\n"
  },
  {
    "path": "delfin/task_manager/tasks/telemetry.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\n\nimport six\nfrom oslo_log import log\n\nfrom delfin import context, db\nfrom delfin import exception\nfrom delfin.common.constants import TelemetryTaskStatus\nfrom delfin.drivers import api as driver_api\nfrom delfin.exporter import base_exporter\nfrom delfin.i18n import _\n\nLOG = log.getLogger(__name__)\n\n\nclass TelemetryTask(object):\n    @abc.abstractmethod\n    def collect(self, ctx, storage_id, args, start_time, end_time):\n        pass\n\n    @abc.abstractmethod\n    def remove_telemetry(self, ctx, storage_id):\n        pass\n\n\nclass PerformanceCollectionTask(TelemetryTask):\n    def __init__(self):\n        self.driver_api = driver_api.API()\n        self.perf_exporter = base_exporter.PerformanceExporterManager()\n\n    def collect(self, ctx, storage_id, args, start_time, end_time):\n        try:\n            LOG.debug(\"Performance collection for storage [%s] with start time\"\n                      \" [%s] and end time [%s]\"\n                      % (storage_id, start_time, end_time))\n            perf_metrics = self.driver_api \\\n                .collect_perf_metrics(ctx, storage_id,\n                                      args,\n                                      start_time, end_time)\n\n            # Fill extra labels to metric by fetching metadata from resource DB\n            try:\n                storage_details = db.storage_get(ctx, storage_id)\n                for m in perf_metrics:\n                    m.labels[\"name\"] = storage_details.name\n                    m.labels[\"serial_number\"] = storage_details.serial_number\n            except exception.StorageNotFound:\n                LOG.warning(f'Storage(id={storage_id}) has been removed.')\n                return TelemetryTaskStatus.TASK_EXEC_STATUS_SUCCESS\n            except Exception as e:\n                msg = _('Failed to add extra labels to performance '\n                        'metrics: {0}'.format(e))\n                LOG.error(msg)\n                return TelemetryTaskStatus.TASK_EXEC_STATUS_FAILURE\n\n            self.perf_exporter.dispatch(context, perf_metrics)\n            return TelemetryTaskStatus.TASK_EXEC_STATUS_SUCCESS\n        except Exception as e:\n            LOG.error(\"Failed to collect performance metrics for \"\n                      \"storage id :{0}, reason:{1}\".format(storage_id,\n                                                           six.text_type(e)))\n            return TelemetryTaskStatus.TASK_EXEC_STATUS_FAILURE\n"
  },
  {
    "path": "delfin/test.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Base classes for our unit tests.\n\nAllows overriding of flags for use of fakes, and some black magic for\ninline callbacks.\n\n\"\"\"\n\nimport fixtures\nfrom unittest import mock\nfrom oslo_concurrency import lockutils\nfrom oslo_config import cfg\nfrom oslo_config import fixture as config_fixture\nimport oslo_messaging\nfrom oslo_messaging import conffixture as messaging_conffixture\nfrom oslo_utils import uuidutils\nimport oslotest.base as base_test\n\nfrom delfin.common import config  # noqa\nfrom delfin import coordination\nfrom delfin.db.sqlalchemy import api as db_api\nfrom delfin.db.sqlalchemy import models as db_models\nfrom delfin import rpc\nfrom delfin import service\nfrom delfin.tests.unit import conf_fixture, fake_notifier\n\ntest_opts = [\n    cfg.StrOpt('sqlite_db',\n               default='delfin.sqlite',\n               help='The filename to use with sqlite.'),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(test_opts)\n\n_DB_CACHE = None\n\n\nclass Database(fixtures.Fixture):\n\n    def __init__(self, db_session, sql_connection):\n        self.sql_connection = sql_connection\n        self.engine = db_session.get_engine()\n        self.engine.dispose()\n        conn = self.engine.connect()\n        db_models.BASE.metadata.create_all(self.engine)\n        self._DB = \"\".join(line for line in conn.connection.iterdump())\n        self.engine.dispose()\n\n    def setUp(self):\n        super(Database, self).setUp()\n        conn = self.engine.connect()\n        conn.connection.executescript(self._DB)\n        self.addCleanup(self.engine.dispose)\n\n\nclass TestCase(base_test.BaseTestCase):\n    \"\"\"Test case base class for all unit tests.\"\"\"\n\n    def setUp(self):\n        \"\"\"Run before each test method to initialize test environment.\"\"\"\n        super(TestCase, self).setUp()\n\n        conf_fixture.set_defaults(CONF)\n        CONF([], default_config_files=[])\n\n        global _DB_CACHE\n        if not _DB_CACHE:\n            _DB_CACHE = Database(\n                db_api,\n                sql_connection=CONF.database.connection)\n        self.useFixture(_DB_CACHE)\n\n        self.injected = []\n        self._services = []\n        # This will be cleaned up by the NestedTempfile fixture\n        lock_path = '/' + self.useFixture(fixtures.TempDir()).path\n        self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))\n        self.fixture.config(lock_path=lock_path, group='oslo_concurrency')\n        self.fixture.config(\n            disable_process_locking=True, group='oslo_concurrency')\n\n        rpc.add_extra_exmods('delfin.tests')\n        self.addCleanup(rpc.clear_extra_exmods)\n        self.addCleanup(rpc.cleanup)\n\n        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)\n        self.messaging_conf.transport_url = 'fake:/'\n        self.messaging_conf.response_timeout = 15\n        self.useFixture(self.messaging_conf)\n\n        oslo_messaging.get_notification_transport(CONF)\n        self.override_config('driver', ['test'],\n                             group='oslo_messaging_notifications')\n\n        rpc.init(CONF)\n\n        fake_notifier.stub_notifier(self)\n\n        # Locks must be cleaned up after tests\n        CONF.set_override('backend_type', 'file',\n                          group='coordination')\n        CONF.set_override('backend_server', lock_path,\n                          group='coordination')\n        coordination.LOCK_COORDINATOR.start()\n        self.addCleanup(coordination.LOCK_COORDINATOR.stop)\n\n    def tearDown(self):\n        \"\"\"Runs after each test method to tear down test environment.\"\"\"\n        super(TestCase, self).tearDown()\n        # Reset any overridden flags\n        CONF.reset()\n\n        # Stop any timers\n        for x in self.injected:\n            try:\n                x.stop()\n            except AssertionError:\n                pass\n\n        # Kill any services\n        for x in self._services:\n            try:\n                x.kill()\n            except Exception:\n                pass\n\n        # Delete attributes that don't start with _ so they don't pin\n        # memory around unnecessarily for the duration of the test\n        # suite\n        for key in [k for k in self.__dict__.keys() if k[0] != '_']:\n            del self.__dict__[key]\n\n    def flags(self, **kw):\n        \"\"\"Override flag variables for a test.\"\"\"\n        for k, v in kw.items():\n            CONF.set_override(k, v)\n\n    def start_service(self, name, host=None, **kwargs):\n        host = host or uuidutils.generate_uuid()\n        kwargs.setdefault('host', host)\n        kwargs.setdefault('binary', 'delfin-%s' % name)\n        svc = service.Service.create(**kwargs)\n        svc.start()\n        self._services.append(svc)\n        return svc\n\n    def mock_object(self, obj, attr_name, new_attr=None, **kwargs):\n        \"\"\"Use python mock to mock an object attribute\n\n        Mocks the specified objects attribute with the given value.\n        Automatically performs 'addCleanup' for the mock.\n\n        \"\"\"\n        if not new_attr:\n            new_attr = mock.Mock()\n        patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs)\n        patcher.start()\n        self.addCleanup(patcher.stop)\n        return new_attr\n\n    def mock_class(self, class_name, new_val=None, **kwargs):\n        \"\"\"Use python mock to mock a class\n\n        Mocks the specified objects attribute with the given value.\n        Automatically performs 'addCleanup' for the mock.\n\n        \"\"\"\n        if not new_val:\n            new_val = mock.Mock()\n        patcher = mock.patch(class_name, new_val, **kwargs)\n        patcher.start()\n        self.addCleanup(patcher.stop)\n        return new_val\n\n    # Useful assertions\n    def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):\n        \"\"\"Assert two dicts are equivalent.\n\n        This is a 'deep' match in the sense that it handles nested\n        dictionaries appropriately.\n\n        NOTE:\n\n            If you don't care (or don't know) a given value, you can specify\n            the string DONTCARE as the value. This will cause that dict-item\n            to be skipped.\n\n        \"\"\"\n        def raise_assertion(msg):\n            d1str = str(d1)\n            d2str = str(d2)\n            base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '\n                        'd2: %(d2str)s' %\n                        {\"msg\": msg, \"d1str\": d1str, \"d2str\": d2str})\n            raise AssertionError(base_msg)\n\n        d1keys = set(d1.keys())\n        d2keys = set(d2.keys())\n        if d1keys != d2keys:\n            d1only = d1keys - d2keys\n            d2only = d2keys - d1keys\n            raise_assertion('Keys in d1 and not d2: %(d1only)s. '\n                            'Keys in d2 and not d1: %(d2only)s' %\n                            {\"d1only\": d1only, \"d2only\": d2only})\n\n        for key in d1keys:\n            d1value = d1[key]\n            d2value = d2[key]\n            try:\n                error = abs(float(d1value) - float(d2value))\n                within_tolerance = error <= tolerance\n            except (ValueError, TypeError):\n                # If both values aren't convertible to float, just ignore\n                # ValueError if arg is a str, TypeError if it's something else\n                # (like None)\n                within_tolerance = False\n\n            if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):\n                self.assertDictMatch(d1value, d2value)\n            elif 'DONTCARE' in (d1value, d2value):\n                continue\n            elif approx_equal and within_tolerance:\n                continue\n            elif d1value != d2value:\n                raise_assertion(\"d1['%(key)s']=%(d1value)s != \"\n                                \"d2['%(key)s']=%(d2value)s\" %\n                                {\n                                    \"key\": key,\n                                    \"d1value\": d1value,\n                                    \"d2value\": d2value\n                                })\n\n    def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):\n        \"\"\"Assert a list of dicts are equivalent.\"\"\"\n        def raise_assertion(msg):\n            L1str = str(L1)\n            L2str = str(L2)\n            base_msg = ('List of dictionaries do not match: %(msg)s '\n                        'L1: %(L1str)s L2: %(L2str)s' %\n                        {\"msg\": msg, \"L1str\": L1str, \"L2str\": L2str})\n            raise AssertionError(base_msg)\n\n        L1count = len(L1)\n        L2count = len(L2)\n        if L1count != L2count:\n            raise_assertion('Length mismatch: len(L1)=%(L1count)d != '\n                            'len(L2)=%(L2count)d' %\n                            {\"L1count\": L1count, \"L2count\": L2count})\n\n        for d1, d2 in zip(L1, L2):\n            self.assertDictMatch(d1, d2, approx_equal=approx_equal,\n                                 tolerance=tolerance)\n\n    def assertSubDictMatch(self, sub_dict, super_dict):\n        \"\"\"Assert a sub_dict is subset of super_dict.\"\"\"\n        self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys())))\n        for k, sub_value in sub_dict.items():\n            super_value = super_dict[k]\n            if isinstance(sub_value, dict):\n                self.assertSubDictMatch(sub_value, super_value)\n            elif 'DONTCARE' in (sub_value, super_value):\n                continue\n            else:\n                self.assertEqual(sub_value, super_value)\n\n    def assertIn(self, a, b, *args, **kwargs):\n        \"\"\"Python < v2.7 compatibility.  Assert 'a' in 'b'.\"\"\"\n        try:\n            f = super(TestCase, self).assertIn\n        except AttributeError:\n            self.assertTrue(a in b, *args, **kwargs)\n        else:\n            f(a, b, *args, **kwargs)\n\n    def assertNotIn(self, a, b, *args, **kwargs):\n        \"\"\"Python < v2.7 compatibility.  Assert 'a' NOT in 'b'.\"\"\"\n        try:\n            f = super(TestCase, self).assertNotIn\n        except AttributeError:\n            self.assertFalse(a in b, *args, **kwargs)\n        else:\n            f(a, b, *args, **kwargs)\n\n    def assertIsInstance(self, a, b, *args, **kwargs):\n        \"\"\"Python < v2.7 compatibility.\"\"\"\n        try:\n            f = super(TestCase, self).assertIsInstance\n        except AttributeError:\n            self.assertIsInstance(a, b)\n        else:\n            f(a, b, *args, **kwargs)\n\n    def assertIsNone(self, a, *args, **kwargs):\n        \"\"\"Python < v2.7 compatibility.\"\"\"\n        try:\n            f = super(TestCase, self).assertIsNone\n        except AttributeError:\n            self.assertTrue(a is None)\n        else:\n            f(a, *args, **kwargs)\n\n    def _dict_from_object(self, obj, ignored_keys):\n        if ignored_keys is None:\n            ignored_keys = []\n        return {k: v for k, v in obj.items()\n                if k not in ignored_keys}\n\n    def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):\n        obj_to_dict = lambda o: (  # noqa: E731\n            self._dict_from_object(o, ignored_keys))\n        sort_key = lambda d: [d[k] for k in sorted(d)]  # noqa: E731\n        conv_and_sort = lambda obj: (  # noqa: E731\n            sorted(map(obj_to_dict, obj), key=sort_key))\n\n        self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))\n\n    def assert_notify_called(self, mock_notify, calls):\n        for i in range(0, len(calls)):\n            mock_call = mock_notify.call_args_list[i]\n            call = calls[i]\n\n            posargs = mock_call[0]\n\n            self.assertEqual(call[0], posargs[0])\n            self.assertEqual(call[1], posargs[2])\n\n    def override_config(self, name, override, group=None):\n        \"\"\"Cleanly override CONF variables.\"\"\"\n        CONF.set_override(name, override, group)\n        self.addCleanup(CONF.clear_override, name, group)\n"
  },
  {
    "path": "delfin/tests/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/e2e/GetResources.robot",
    "content": "*** Settings ***\nDocumentation    Tests to verify that GET of resources\n\nLibrary                 RequestsLibrary\nLibrary                 Collections\nLibrary                 JSONLibrary\nLibrary                 OperatingSystem\n\nSuite Setup             Open Application\nSuite Teardown          Close Application\n\n*** Variables ***\n${delfin_url}           http://localhost:8190/v1\n${storage_pools}        storage-pools\n@{res_urls}             storage-pools  volumes  controllers  disks  ports  quotas  qtrees  filesystems  shares\n@{res_indx}             storage_pools  volumes  controllers  disks  ports  quotas  qtrees  filesystems  shares\n\n*** Test Cases ***\nGET Resources when test storage is registered\n    [Tags]    DELFIN\n\n    FOR     ${res_url}   ${res_ind}   IN ZIP    ${res_urls}   ${res_indx}\n            ${ret_json}=            Get All Resource Of     ${res_url}\n            ${res_s}=               Get Value From Json\t    ${ret_json}         $..${res_ind}\n            Should Not Be Empty     ${res_s[0]}\n    END\n\nGET Resources with ID\n    [Tags]    DELFIN\n\n    FOR     ${res_url}   ${res_ind}   IN ZIP    ${res_urls}   ${res_indx}\n            ${ret_json}=            Get All Resource Of     ${res_url}\n            ${res_s}=               Get Value From Json\t    ${ret_json}         $..${res_ind}\n            Should Not Be Empty     ${res_s[0]}\n            ${resource_ids}         Get Value From Json\t    ${ret_json}         $..id\n            ${ret_json}=            Get All Resource with ID    ${res_url}     ${resource_ids[0]}\n            Should Not Be Empty     ${res_s[0]}\n    END\n\nGET Resources with Filter\n    [Tags]    DELFIN\n\n    log to console          \\n\n    ${storages}=            Get All Storages\n    ${storages_id}=         Get Value From Json\t    ${storages[0]}         $..id\n\n    FOR     ${res_url}   ${res_ind}   IN ZIP    ${res_urls}   ${res_indx}\n            ${ret_json}=            Get All Resource with Filter     ${res_url}   storage_id=${storages_id[0]}\n            ${res_s}=               Get Value From Json\t    ${ret_json}         $..${res_ind}\n            Should Not Be Empty     ${res_s[0]}\n            ${ret_json}=            Get All Resource with Filter     ${res_url}   storage_id=123\n            ${res_s}=               Get Value From Json\t    ${ret_json}         $..${res_ind}\n            Should Be Empty     ${res_s[0]}\n\n    END\n\nGET Resources when no storages are registered\n    [Tags]    DELFIN\n    Close Application\n    FOR     ${res_url}   ${res_ind}   IN ZIP    ${res_urls}   ${res_indx}\n            ${ret_json}=            Get All Resource Of     ${res_url}\n            ${res_s}=               Get Value From Json\t    ${ret_json}         $..${res_ind}\n            Should Be Empty         ${res_s[0]}\n    END\n    Open Application\n\n*** Keywords ***\nGet All Resource Of\n    [Arguments]             ${resource}\n    Create Session          delfin          ${delfin_url}\n    ${resp_get}=            GET On Session  delfin    ${resource}\n    Status Should Be        200    ${resp_get}\n    [Return]                ${resp_get.json()}\n\nGet All Resource with ID\n    [Arguments]             ${resource}     ${resource_id}\n    Create Session          delfin          ${delfin_url}\n    ${resp_get}=            GET On Session  delfin    ${resource}/${resource_id}\n    Status Should Be        200    ${resp_get}\n    [Return]                ${resp_get.json()}\n\nGet All Resource with Filter\n    [Arguments]             ${resource}     ${filter}\n    Create Session          delfin          ${delfin_url}\n    ${resp_get}=            GET On Session  delfin    ${resource}?${filter}\n    Status Should Be        200    ${resp_get}\n    [Return]                ${resp_get.json()}\n\nDelete Storage With ID\n    [Arguments]             ${storage_id}\n    Create Session          delfin      ${delfin_url}\n    ${resp_del}=            DELETE On Session    delfin     storages/${storage_id}\n    Status Should Be        202    ${resp_del}\n\nRegister Test Storage\n    ${test}=                 Load Json From File   ${CURDIR}/test.json\n    ${access_info}=          Get Value From Json   ${test}   $.test_register_access_info\n\n    Create Session          delfin      ${delfin_url}\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info[0]}\n    Status Should Be                            201    ${resp_register}\n    Dictionary Should Contain Key               ${resp_register.json()}     id\n    ${storage_id}=          Get Value From Json\t     ${resp_register.json()} \t $..id\n    [Return]                ${storage_id[0]}\n\nGet All Storages\n    Create Session          delfin      ${delfin_url}\n    ${resp_get}=            GET On Session    delfin    storages\n    Status Should Be        200    ${resp_get}\n    ${resp_get_storage}=    Get Value From Json\t        ${resp_get.json()}      $..storages\n    [Return]                ${resp_get_storage[0]}\n\nOpen Application\n    ${array_id}=            Register Test Storage\n    Sleep       10s\n\nClose Application\n    @{storages}=            Get All Storages\n    FOR     ${storage}      IN                      @{storages}\n            ${storage_id}=  Get Value From Json\t    ${storage} \t        $..id\n            Delete Storage With ID                  ${storage_id[0]}\n    END\n    Sleep       10s\n"
  },
  {
    "path": "delfin/tests/e2e/GetStorage.robot",
    "content": "*** Settings ***\nDocumentation    Tests to verify that GET of storages\n\nLibrary                 RequestsLibrary\nLibrary                 Collections\nLibrary                 JSONLibrary\nLibrary                 OperatingSystem\n\n*** Variables ***\n${delfin_url}           http://localhost:8190/v1\n\n*** Test Cases ***\nGET all Storages when no storages are registered\n    [Tags]    DELFIN\n\n    ${storages}=            Get All Storages\n    Should Be Empty         ${storages}\n\nGET all Storages when two storages are registered\n    [Tags]    DELFIN\n\n    ${storage_id_test}=          Register Test Storage\n    ${storage_id_fake}=          Register Fake Storage\n\n    # GET all storages\n    ${storages}=            Get All Storages\n    ${id_list}=    create list       ${storages[0]['id']}      ${storages[1]['id']}\n    List should contain value  ${id_list}   ${storage_id_test}\n    List should contain value  ${id_list}   ${storage_id_fake}\n\n    Delete Storage With ID      ${storage_id_test}\n    Delete Storage With ID      ${storage_id_fake}\n\nGET Storage with a valid Storage ID\n    [Tags]    DELFIN\n    ${storage_id_test}=          Register Test Storage\n\n    # GET all storages\n    ${storage}=            Get Storage With ID     ${storage_id_test}\n    ${id_list}=    create list       ${storage['id']}\n    List should contain value  ${id_list}   ${storage_id_test}\n\n    Delete Storage With ID      ${storage_id_test}\n\n*** Keywords ***\nGet All Storages\n    Create Session          delfin      ${delfin_url}\n    ${resp_get}=            GET On Session    delfin    storages\n    Status Should Be        200         ${resp_get}\n    ${resp_get_storage}=    Get Value From Json\t        ${resp_get.json()}      $..storages\n    [Return]                ${resp_get_storage[0]}\n\nGet Storage With ID\n    [Arguments]             ${storage_id}\n    Create Session          delfin      ${delfin_url}\n    ${resp_get}=            GET On Session    delfin    storages/${storage_id}\n    Status Should Be        200    ${resp_get}\n    [Return]                ${resp_get.json()}\n\nDelete Storage With ID\n    [Arguments]             ${storage_id}\n    Create Session          delfin      ${delfin_url}\n    ${resp_del}=            DELETE On Session    delfin     storages/${storage_id}\n    Status Should Be        202    ${resp_del}\n    Sleep                   10s\n\nRegister Test Storage\n    ${test}=                 Load Json From File   ${CURDIR}/test.json\n    ${access_info}=          Get Value From Json   ${test}   $.test_register_access_info\n\n    Create Session          delfin      ${delfin_url}\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info[0]}\n    Status Should Be                            201    ${resp_register}\n    Dictionary Should Contain Key               ${resp_register.json()}     id\n    ${storage_id}=          Get Value From Json\t     ${resp_register.json()} \t $..id\n    [Return]                ${storage_id[0]}\n\nRegister Fake Storage\n    ${fake_rest}=            Create dictionary  host=10.10.10.100  port=${8080}   username=admin  password=password\n    ${access_info}=          Create dictionary  vendor=fake_storage  model=fake_driver  rest=${fake_rest}\n    ${fake_device}=          Create dictionary  vendor=fake_vendor  model=fake_model\n\n    Create Session           delfin      ${delfin_url}\n    ${resp_register}=        POST On Session     delfin     storages    json=${access_info}\n    ${storage_id}=           Get Value From Json\t     ${resp_register.json()} \t $..id\n    Dictionary Should Contain Sub Dictionary             ${resp_register.json()}     ${fake_device}\n    [Return]                ${storage_id[0]}\n"
  },
  {
    "path": "delfin/tests/e2e/README.md",
    "content": "# Introduction\nThis folder contains end to end, automated, testing scripts for Delfin.\n\nThese tests are using [Robot Framework](https://robotframework.org/) for automation and report generation.\n\nThe end-to-end tests are run against a test driver provided in the path `delfin/tests/e2e/testdriver`.\nThis test driver uses, included storage details in file `delfin/tests/e2e/testdriver/storage.json` for storage simulation when testing.\n\n# Supported OS\nUbuntu 18.04\n\n# Prerequisite\nPrerequisite for [standalone installer](https://github.com/sodafoundation/delfin/blob/master/installer/README.md) is applicable here too.\n\nInstall python 3.6+ and pip.\n\nExport PYTHONPATH as below\n\n```bash\nexport PYTHONPATH=$(pwd)\n```\n# Run tests\nThe end-to-end tests can be run from command prompt as below\n\n```bash\ngit clone https://github.com/sodafoundation/delfin.git && cd delfin\n./delfin/tests/e2e/test_e2e.sh\n```\nThe above script injects test driver into delfin, builds and installs delfin using delfin standalone installer.\nIt runs robot framework scripts against the running delfin application for verifying delfin APIs.\n\nWhen the script finish execution, robot framework generates the test execution summary and log.\nThese are available in the delfin root directory, with names `report.html` and `log.html` respectively."
  },
  {
    "path": "delfin/tests/e2e/RegisterStorage.robot",
    "content": "*** Settings ***\nDocumentation    Tests to verify that registration of storage succeed\n...              and fail correctly depending on the access_info\n...              input provided.\n...              Delfin needs to be installed and APIs are accessble.\n\nLibrary                 RequestsLibrary\nLibrary                 Collections\nLibrary                 JSONLibrary\nLibrary                 OperatingSystem\n\n*** Variables ***\n${delfin_url}           http://localhost:8190/v1\n\n*** Test Cases ***\nRegister Storage with in-valid access_info Test\n    [Tags]    DELFIN\n\n    Create Session          delfin      ${delfin_url}\n\n    ${ref_input}=           Load Json From File   ${CURDIR}/test.json\n    ${ref_access_info}=     Get Value From Json   ${ref_input}   $.test_register_access_info\n\n    # Invalid ip\n    ${access_info}=         Copy Dictionary       ${ref_access_info[0]}   Deepcopy=True\n    Set To Dictionary       ${access_info['rest']}       host=10.10.10.123\n\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info}   expected_status=any\n    Status Should Be        400    ${resp_register}\n    dictionary should contain value   ${resp_register.json()}   InvalidIpOrPort\n\n    # Invalid port\n    ${access_info}=         Copy Dictionary       ${ref_access_info[0]}   Deepcopy=True\n    Set To Dictionary       ${access_info['rest']}       port=${80}\n\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info}   expected_status=any\n    Status Should Be        400    ${resp_register}\n    dictionary should contain value   ${resp_register.json()}   InvalidIpOrPort\n\n    # Invalid username\n    ${access_info}=         Copy Dictionary       ${ref_access_info[0]}   Deepcopy=True\n    Set To Dictionary       ${access_info['rest']}       username=user\n\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info}   expected_status=any\n    Status Should Be        400    ${resp_register}\n    dictionary should contain value   ${resp_register.json()}   InvalidUsernameOrPassword\n\n    # Invalid Password\n    ${access_info}=         Copy Dictionary       ${ref_access_info[0]}   Deepcopy=True\n    Set To Dictionary       ${access_info['rest']}       password=pass\n\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info}   expected_status=any\n    Status Should Be        400    ${resp_register}\n    dictionary should contain value   ${resp_register.json()}   InvalidUsernameOrPassword\n\n\nRegister Storage with valid access_info Test\n    [Tags]    DELFIN\n    # Read storage backend details from JSON file\n    ${ref_storage}=         Load Json From File   ${CURDIR}/testdriver/storage.json\n    ${ref_device}=          Get Value From Json   ${ref_storage}   $..storage\n\n    ${storage_test}=        Register Test Storage\n    Dictionary Should Contain Sub Dictionary        ${storage_test}     ${ref_device[0]}\n    Delete Storage With ID  ${storage_test[\"id\"]}\n\nRegister Storage with same access_info Test\n    [Tags]    DELFIN\n    Sleep                   10s\n    ${storage_test}=        Register Test Storage\n\n    ${test}=                Load Json From File   ${CURDIR}/test.json\n    ${access_info}=         Get Value From Json   ${test}   $.test_register_access_info\n    Create Session          delfin      ${delfin_url}\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info[0]}   expected_status=any\n    Status Should Be        400    ${resp_register}\n    dictionary should contain value   ${resp_register.json()}   StorageAlreadyExists\n\n    Delete Storage With ID      ${storage_test[\"id\"]}\n\n*** Keywords ***\nRegister Test Storage\n    ${test}=                Load Json From File   ${CURDIR}/test.json\n    ${access_info}=         Get Value From Json   ${test}   $.test_register_access_info\n\n    Create Session          delfin      ${delfin_url}\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info[0]}\n    Status Should Be        201    ${resp_register}\n    [Return]                ${resp_register.json()}\n\nDelete Storage With ID\n    [Arguments]             ${storage_id}\n    Create Session          delfin      ${delfin_url}\n    ${resp_del}=            DELETE On Session    delfin     storages/${storage_id}\n    Status Should Be        202    ${resp_del}\n    Sleep                   10s\n"
  },
  {
    "path": "delfin/tests/e2e/RemoveStorage.robot",
    "content": "*** Settings ***\nDocumentation    Tests to verify that Delete of storage\n\nLibrary                 RequestsLibrary\nLibrary                 Collections\nLibrary                 JSONLibrary\n\n*** Variables ***\n${delfin_url}           http://localhost:8190/v1\n\n*** Test Cases ***\n\nDelete Storage with valid storage_id\n    [Tags]    DELFIN\n    Sleep                   10s\n    ${storage_id_test}=     Register Test Storage\n    Create Session          delfin      ${delfin_url}\n    ${resp_del}=            DELETE On Session    delfin     storages/${storage_id_test}\n    Status Should Be        202         ${resp_del}\n\nDelete Storage with in-valid storage_id\n    [Tags]    DELFIN\n    Create Session          delfin      ${delfin_url}\n    ${resp_del2}=           DELETE On Session    delfin     storages/111  404\n    ${error_code}=         Get Value From Json   ${resp_del2.json()}  $..error_code\n    dictionary should contain value   ${resp_del2.json()}   StorageNotFound\n\n*** Keywords ***\nRegister Test Storage\n    ${test}=                 Load Json From File   ${CURDIR}/test.json\n    ${access_info}=          Get Value From Json   ${test}   $.test_register_access_info\n\n    Create Session          delfin      ${delfin_url}\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info[0]}\n    Status Should Be                            201    ${resp_register}\n    Dictionary Should Contain Key               ${resp_register.json()}     id\n    ${storage_id}=          Get Value From Json\t     ${resp_register.json()} \t $..id\n    [Return]                ${storage_id[0]}\n"
  },
  {
    "path": "delfin/tests/e2e/UpdateAccessInfo.robot",
    "content": "*** Settings ***\nDocumentation    Tests to verify that GET of resources\n\nLibrary                 RequestsLibrary\nLibrary                 Collections\nLibrary                 JSONLibrary\nLibrary                 OperatingSystem\n\nSuite Setup             Open Application\nSuite Teardown          Close Application\n\n*** Variables ***\n${delfin_url}           http://localhost:8190/v1\n\n*** Test Cases ***\nUpdate with invalid access_info Test\n    [Tags]    DELFIN\n    @{storages}=            Get All Storages\n    ${storage_id}=          Get Value From Json\t    ${storages[0]} \t        $..id\n\n    # Invalid access_info vendor and model\n    ${access_info_rest}=    Create dictionary  host=10.10.10.10  port=${8080}   username=user_1  password=pass_1\n    ${access_info}=         Create dictionary  vendor=test_vendor  model=test_model  rest=${access_info_rest}\n    ${resp}=                Update Access Info      ${storage_id[0]}   ${access_info}\n    Status Should Be        400         ${resp}\n    dictionary should contain value     ${resp.json()}   InvalidInput\n\n    # Invalid access_info, ip address\n    ${access_info_rest}=    Create dictionary  host=100.10.10.10  port=${8080}   username=user_1  password=pass_1\n    ${access_info}=         Create dictionary  rest=${access_info_rest}\n    ${resp}=                Update Access Info      ${storage_id[0]}   ${access_info}\n    Status Should Be        400         ${resp}\n    dictionary should contain value     ${resp.json()}   InvalidIpOrPort\n\n    # Invalid access_info, port\n    ${access_info_rest}=    Create dictionary  host=10.10.10.10  port=${80}   username=user_1  password=pass_1\n    ${access_info}=         Create dictionary  rest=${access_info_rest}\n    ${resp}=                Update Access Info      ${storage_id[0]}   ${access_info}\n    Status Should Be        400         ${resp}\n    dictionary should contain value     ${resp.json()}   InvalidIpOrPort\n\n    # Invalid access_info, username\n    ${access_info_rest}=    Create dictionary  host=10.10.10.10  port=${8080}   username=user  password=pass_1\n    ${access_info}=         Create dictionary  rest=${access_info_rest}\n    ${resp}=                Update Access Info      ${storage_id[0]}   ${access_info}\n    Status Should Be        400         ${resp}\n    dictionary should contain value     ${resp.json()}   InvalidUsernameOrPassword\n\n    # Invalid access_info, password\n    ${access_info_rest}=    Create dictionary  host=10.10.10.10  port=${8080}   username=user_1  password=pass\n    ${access_info}=         Create dictionary  rest=${access_info_rest}\n    ${resp}=                Update Access Info      ${storage_id[0]}   ${access_info}\n    Status Should Be        400         ${resp}\n    dictionary should contain value     ${resp.json()}   InvalidUsernameOrPassword\n\n    # Invalid storage_id\n    ${access_info_rest}=    Create dictionary  host=10.10.10.10  port=${8080}   username=user_1  password=pass_1\n    ${access_info}=         Create dictionary  rest=${access_info_rest}\n    ${resp}=                Update Access Info      123   ${access_info}\n    Status Should Be        404         ${resp}\n    dictionary should contain value     ${resp.json()}   AccessInfoNotFound\n\nUpdate with valid access_info Test\n    [Tags]    DELFIN\n    @{storages}=            Get All Storages\n    ${storage_id}=          Get Value From Json\t    ${storages[0]} \t        $..id\n\n    # Valid access info and  storage_id\n    ${access_info_rest}=    Create dictionary  host=10.10.10.10  port=${8080}   username=user_1  password=pass_1\n    ${access_info}=         Create dictionary  rest=${access_info_rest}\n    ${resp}=                Update Access Info      ${storage_id[0]}   ${access_info}\n    Status Should Be        200         ${resp}\n    dictionary should contain value     ${resp.json()}   test_vendor\n    dictionary should contain value     ${resp.json()}   test_model\n    dictionary should contain value     ${resp.json()}   ${storage_id[0]}\n\n*** Keywords ***\nUpdate Access Info\n    [Arguments]             ${storage_id}   ${access_info}\n    Create Session          delfin      ${delfin_url}\n    ${resp_update}=         PUT On Session     delfin     storages/${storage_id}/access-info    json=${access_info}   expected_status=any\n    [Return]                ${resp_update}\n\nRegister Test Storage\n    ${test}=                 Load Json From File   ${CURDIR}/test.json\n    ${access_info}=          Get Value From Json   ${test}   $.test_register_access_info\n\n    Create Session          delfin      ${delfin_url}\n    ${resp_register}=       POST On Session     delfin     storages    json=${access_info[0]}\n    Status Should Be                            201    ${resp_register}\n    Dictionary Should Contain Key               ${resp_register.json()}     id\n    ${storage_id}=          Get Value From Json\t     ${resp_register.json()} \t $..id\n    [Return]                ${storage_id[0]}\n\nDelete Storage With ID\n    [Arguments]             ${storage_id}\n    Create Session          delfin      ${delfin_url}\n    ${resp_del}=            DELETE On Session    delfin     storages/${storage_id}\n    Status Should Be        202    ${resp_del}\n    Sleep                   10s\n\n\nGet All Storages\n    Create Session          delfin      ${delfin_url}\n    ${resp_get}=            GET On Session    delfin    storages\n    Status Should Be        200    ${resp_get}\n    ${resp_get_storage}=    Get Value From Json\t        ${resp_get.json()}      $..storages\n    [Return]                ${resp_get_storage[0]}\n\nClose Application\n    @{storages}=            Get All Storages\n    FOR     ${storage}      IN                      @{storages}\n            ${storage_id}=  Get Value From Json\t    ${storage} \t        $..id\n            Delete Storage With ID                  ${storage_id[0]}\n    END\n    Sleep                   10s\n\nOpen Application\n    ${array_id}=            Register Test Storage\n    Sleep                   10s\n\n"
  },
  {
    "path": "delfin/tests/e2e/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/e2e/test.json",
    "content": "{\n  \"test_register_access_info\": {\n    \"vendor\": \"test_vendor\",\n    \"model\": \"test_model\",\n    \"rest\": {\n      \"host\": \"10.10.10.10\",\n      \"port\": 8080,\n      \"username\": \"user_1\",\n      \"password\": \"pass_1\"\n    },\n    \"extra_attributes\": {\n      \"path\": \"storage.json\"\n    }\n  }\n}"
  },
  {
    "path": "delfin/tests/e2e/test_e2e.sh",
    "content": "#!/bin/bash\n# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nTOP_DIR=$(cd $(dirname \"$0\") && pwd)\nDELFIN_DIR=$(cd $TOP_DIR/../../.. && pwd)\n\ncd $DELFIN_DIR\n\nps -ef | grep 'cmd/api.py' | grep -v grep | awk '{print $2}' | xargs kill -9\nps -ef | grep 'cmd/task.py' | grep -v grep | awk '{print $2}' | xargs kill -9\nps -ef | grep 'cmd/alert.py' | grep -v grep | awk '{print $2}' | xargs kill -9\nps -ef | grep 'exporter_server.py' | grep -v grep | awk '{print $2}' | xargs kill -9\n\n# Update setup.py to inject test driver\ncp setup.py setup.py.orig\n\nstr=\"\\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ 'test_vendor test_model = delfin.tests.e2e.testdriver:TestDriver',\"\nsed -i \"/FakeStorageDriver',/ a $str\" $DELFIN_DIR/setup.py\n\ninstaller/install\n\nsource installer/delfin/bin/activate\npip install robotframework\npip install robotframework-requests\npip install robotframework-jsonlibrary\n\nORIG_PATH='\"storage.json\"'\nFILE_PATH=\"${TOP_DIR}/testdriver/storage.json\"\nsed -i \"s|${ORIG_PATH}|\\\"${FILE_PATH}\\\"|g\" $TOP_DIR/test.json\n\nsleep 10\n\nrobot delfin/tests/e2e\n\ndeactivate\n\nmv setup.py.orig  setup.py\necho \"Test completed successfully ...\"\n"
  },
  {
    "path": "delfin/tests/e2e/testdriver/__init__.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport datetime\nimport json\nimport time\nfrom oslo_log import log\n\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.common.constants import ResourceType, StorageMetric\nfrom delfin.drivers import driver\nfrom delfin import cryptor\n\n\nLOG = log.getLogger(__name__)\n\nMIN_STORAGE, MAX_STORAGE = 1, 10\nMIN_PERF_VALUES, MAX_PERF_VALUES = 1, 4\n\n\nclass TestDriver(driver.StorageDriver):\n    \"\"\"FakeStorageDriver shows how to implement the StorageDriver,\n    it also plays a role as faker to fake data for being tested by clients.\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        access_info = kwargs\n        if access_info is None:\n            raise exception.InvalidInput('Input access_info is missing')\n\n        self.array_json = access_info.get(\"extra_attributes\").get(\"path\")\n\n        with open(self.array_json) as f:\n            data = json.load(f)\n\n        # Verify Host & Port\n        f_host = data.get(\"access_info\").get(\"rest\").get(\"host\")\n        f_port = data.get(\"access_info\").get(\"rest\").get(\"port\")\n        f_user = data.get(\"access_info\").get(\"rest\").get(\"username\")\n        f_pass = data.get(\"access_info\").get(\"rest\").get(\"password\")\n        a_host = access_info.get(\"rest\").get(\"host\")\n        a_port = access_info.get(\"rest\").get(\"port\")\n        a_user = access_info.get(\"rest\").get(\"username\")\n        a_pass = access_info.get(\"rest\").get(\"password\")\n        a_pass = cryptor.decode(a_pass)\n        if f_host != a_host:\n            raise exception.InvalidIpOrPort\n        if f_port != a_port:\n            raise exception.InvalidIpOrPort\n        if f_user != a_user:\n            raise exception.InvalidUsernameOrPassword\n        if f_pass != a_pass:\n            raise exception.InvalidUsernameOrPassword\n\n    def reset_connection(self, context, **kwargs):\n        pass\n\n    def get_storage(self, context):\n        with open(self.array_json) as f:\n            data = json.load(f)\n            return data.get('storage')\n\n    def _return_json(self, key):\n        with open(self.array_json) as f:\n            data = json.load(f)\n            values = data.get(key)\n            for value in values:\n                value['storage_id'] = self.storage_id\n            return values\n\n    def list_storage_pools(self, ctx):\n        return self._return_json('storage_pools')\n\n    def list_volumes(self, ctx):\n        return self._return_json('volumes')\n\n    def list_controllers(self, ctx):\n        return self._return_json('controllers')\n\n    def list_ports(self, ctx):\n        return self._return_json('ports')\n\n    def list_disks(self, ctx):\n        return self._return_json('disks')\n\n    def list_quotas(self, ctx):\n        return self._return_json('quotas')\n\n    def list_filesystems(self, ctx):\n        return self._return_json('filesystems')\n\n    def list_qtrees(self, ctx):\n        return self._return_json('qtrees')\n\n    def list_shares(self, ctx):\n        return self._return_json('shares')\n\n    def add_trap_config(self, context, trap_config):\n        pass\n\n    def remove_trap_config(self, context, trap_config):\n        pass\n\n    @staticmethod\n    def parse_alert(context, alert):\n        pass\n\n    def clear_alert(self, context, alert):\n        pass\n\n    def list_alerts(self, context, query_para=None):\n        alert_list = [{\n            \"storage_id\": self.storage_id,\n            'alert_id': str(random.randint(1111111, 9999999)),\n            'sequence_number': 100,\n            'alert_name': 'SNMP connect failed',\n            'category': 'Fault',\n            'severity': 'Major',\n            'type': 'OperationalViolation',\n            'location': 'NetworkEntity=entity1',\n            'description': \"SNMP connection to the storage failed.\",\n            'recovery_advice': \"Check snmp configurations.\",\n            'occur_time': int(time.time())\n        }, {\n            \"storage_id\": self.storage_id,\n            'alert_id': str(random.randint(1111111, 9999999)),\n            'sequence_number': 101,\n            'alert_name': 'Link state down',\n            'category': 'Fault',\n            'severity': 'Critical',\n            'type': 'CommunicationsAlarm',\n            'location': 'NetworkEntity=entity2',\n            'description': \"Backend link has gone down\",\n            'recovery_advice': \"Recheck the network configuration setting.\",\n            'occur_time': int(time.time())\n        }, {\n            \"storage_id\": self.storage_id,\n            'alert_id': str(random.randint(1111111, 9999999)),\n            'sequence_number': 102,\n            'alert_name': 'Power failure',\n            'category': 'Fault',\n            'severity': 'Fatal',\n            'type': 'OperationalViolation',\n            'location': 'NetworkEntity=entity3',\n            'description': \"Power failure occurred. \",\n            'recovery_advice': \"Investigate power connection.\",\n            'occur_time': int(time.time())\n        }, {\n            \"storage_id\": self.storage_id,\n            'alert_id': str(random.randint(1111111, 9999999)),\n            'sequence_number': 103,\n            'alert_name': 'Communication failure',\n            'category': 'Fault',\n            'severity': 'Critical',\n            'type': 'CommunicationsAlarm',\n            'location': 'NetworkEntity=network1',\n            'description': \"Communication link gone down\",\n            'recovery_advice': \"Consult network administrator\",\n            'occur_time': int(time.time())\n        }]\n        return alert_list\n\n    def _get_volume_range(self, start, end):\n        volume_list = []\n\n        for i in range(start, end):\n            total, used, free = self._get_random_capacity()\n            v = {\n                \"name\": \"fake_vol_\" + str(i),\n                \"storage_id\": self.storage_id,\n                \"description\": \"Fake Volume\",\n                \"status\": \"normal\",\n                \"native_volume_id\": \"fake_original_id_\" + str(i),\n                \"wwn\": \"fake_wwn_\" + str(i),\n                \"total_capacity\": total,\n                \"used_capacity\": used,\n                \"free_capacity\": free,\n            }\n            volume_list.append(v)\n        return volume_list\n\n    def _get_random_performance(self):\n        def get_random_timestamp_value():\n            rtv = {}\n            for i in range(MIN_PERF_VALUES, MAX_PERF_VALUES):\n                timestamp = int(float(datetime.datetime.now().timestamp()\n                                      ) * 1000)\n                rtv[timestamp] = random.uniform(1, 100)\n            return rtv\n\n        # The sample performance_params after filling looks like,\n        # performance_params = {timestamp1: value1, timestamp2: value2}\n        performance_params = {}\n        for key in constants.DELFIN_ARRAY_METRICS:\n            performance_params[key] = get_random_timestamp_value()\n        return performance_params\n\n    def collect_array_metrics(self, ctx, storage_id, interval, is_history):\n        rd_array_count = random.randint(MIN_STORAGE, MAX_STORAGE)\n        LOG.info(\"Fake_array_metrics number for %s: %d\" % (\n            storage_id, rd_array_count))\n        array_metrics = []\n        labels = {'storage_id': storage_id, 'resource_type': 'array'}\n        fake_metrics = self._get_random_performance()\n\n        for _ in range(rd_array_count):\n            for key in constants.DELFIN_ARRAY_METRICS:\n                m = constants.metric_struct(name=key, labels=labels,\n                                            values=fake_metrics[key])\n                array_metrics.append(m)\n\n        return array_metrics\n\n    @staticmethod\n    def get_capabilities(context, filters=None):\n        \"\"\"Get capability of supported driver.\"\"\"\n        return {\n            'is_historic': False,\n            'resource_metrics': {\n                ResourceType.STORAGE: {\n                    StorageMetric.THROUGHPUT.name: {\n                        \"unit\": StorageMetric.THROUGHPUT.unit,\n                        \"description\": StorageMetric.THROUGHPUT.description\n                    },\n                    StorageMetric.RESPONSE_TIME.name: {\n                        \"unit\": StorageMetric.RESPONSE_TIME.unit,\n                        \"description\": StorageMetric.RESPONSE_TIME.description\n                    },\n                    StorageMetric.READ_RESPONSE_TIME.name: {\n                        \"unit\": StorageMetric.READ_RESPONSE_TIME.unit,\n                        \"description\":\n                            StorageMetric.READ_RESPONSE_TIME.description\n                    },\n                    StorageMetric.WRITE_RESPONSE_TIME.name: {\n                        \"unit\": StorageMetric.WRITE_RESPONSE_TIME.unit,\n                        \"description\":\n                            StorageMetric.WRITE_RESPONSE_TIME.description\n                    },\n                    StorageMetric.IOPS.name: {\n                        \"unit\": StorageMetric.IOPS.unit,\n                        \"description\": StorageMetric.IOPS.description\n                    },\n                    StorageMetric.READ_THROUGHPUT.name: {\n                        \"unit\": StorageMetric.READ_THROUGHPUT.unit,\n                        \"description\":\n                            StorageMetric.READ_THROUGHPUT.description\n                    },\n                    StorageMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": StorageMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            StorageMetric.WRITE_THROUGHPUT.description\n                    },\n                    StorageMetric.READ_IOPS.name: {\n                        \"unit\": StorageMetric.READ_IOPS.unit,\n                        \"description\": StorageMetric.READ_IOPS.description\n                    },\n                    StorageMetric.WRITE_IOPS.name: {\n                        \"unit\": StorageMetric.WRITE_IOPS.unit,\n                        \"description\": StorageMetric.WRITE_IOPS.description\n                    },\n                }\n            }\n        }\n"
  },
  {
    "path": "delfin/tests/e2e/testdriver/storage.json",
    "content": "{\n  \"access_info\": {\n    \"vendor\":\"test_vendor\",\n    \"model\":\"test_model\",\n    \"rest\": {\n      \"host\": \"10.10.10.10\",\n      \"port\": 8080,\n      \"username\": \"user_1\",\n      \"password\": \"pass_1\"\n    },\n    \"extra_attributes\": {\n      \"array_json\": null,\n      \"return_exception\": null\n    }\n  },\n  \"storage\": {\n    \"name\":\"test_storage\",\n    \"description\": \"Test storage array\",\n    \"location\": \"Test location\",\n    \"status\": \"normal\",\n    \"vendor\": \"test_vendor\",\n    \"model\": \"test_model\",\n    \"serial_number\": \"Serial_123_ABC\",\n    \"firmware_version\": \"1.10.100\",\n    \"total_capacity\": 1000000,\n    \"used_capacity\": 750000,\n    \"free_capacity\": 250000,\n    \"raw_capacity\": 2000000,\n    \"subscribed_capacity\": 1500000\n  },\n  \"storage_pools\": [\n    {\n      \"name\": \"test_pool_1\",\n      \"storage_id\": null,\n      \"native_storage_pool_id\": \"test_native_pool_id_1\",\n      \"description\": \"Test Storage Pool\",\n      \"status\": \"normal\",\n      \"storage_type\": \"block\",\n      \"total_capacity\": 10000,\n      \"used_capacity\": 7500,\n      \"free_capacity\": 2500\n    },\n    {\n      \"name\": \"test_pool_2\",\n      \"storage_id\": null,\n      \"native_storage_pool_id\": \"test_native_pool_id_2\",\n      \"description\": \"Test Storage Pool\",\n      \"status\": \"normal\",\n      \"total_capacity\": 50000,\n      \"used_capacity\": 57500,\n      \"free_capacity\": 2500\n    }\n  ],\n  \"volumes\": [\n    {\n      \"name\": \"test_volume_1\",\n      \"storage_id\": null,\n      \"native_volume_id\": \"test_native_volume_id_1\",\n      \"native_storage_pool_id\": \"test_native_pool_id_1\",\n      \"description\": \"Test Storage Pool\",\n      \"status\": \"normal\",\n      \"total_capacity\": 10000,\n      \"used_capacity\": 7500,\n      \"free_capacity\": 2500\n    },\n    {\n      \"name\": \"test_volume_2\",\n      \"storage_id\": null,\n      \"native_volume_id\": \"test_native_volume_id_2\",\n      \"native_storage_pool_id\": \"test_native_pool_id_2\",\n      \"description\": \"Test Storage Pool\",\n      \"status\": \"normal\",\n      \"total_capacity\": 50000,\n      \"used_capacity\": 57500,\n      \"free_capacity\": 2500\n    }\n  ],\n  \"controllers\": [\n    {\n      \"name\": \"test_controller_1\",\n      \"storage_id\": null,\n      \"native_controller_id\": \"test_native_ctrl_id_1\",\n      \"location\": \"location_controller_1\",\n      \"status\": \"normal\",\n      \"memory_size\": 10000,\n      \"cpu_info\": \"Intel Xenon\",\n      \"soft_version\": \"ver_100\"\n    },\n    {\n      \"name\": \"test_controller_2\",\n      \"storage_id\": null,\n      \"native_controller_id\": \"test_native_ctrl_id_2\",\n      \"location\": \"location_controller_2\",\n      \"status\": \"normal\",\n      \"memory_size\": 10000,\n      \"cpu_info\": \"ARM x64\",\n      \"soft_version\": \"ver_200\"\n    }\n  ],\n  \"ports\": [\n    {\n      \"name\": \"test_port_1\",\n      \"storage_id\": null,\n      \"native_port_id\": \"test_native_port_id_1\",\n      \"location\": \"location_port_1\",\n      \"connection_status\": \"connected\",\n      \"health_status\": \"normal\",\n      \"type\": \"fc\",\n      \"logical_type\": \"location_port_1\",\n      \"speed\": 100,\n      \"max_speed\": 1000,\n      \"native_parent_id\": \"test_ports_parent_id_1\",\n      \"wwn\": \"wwn_10000\",\n      \"mac_address\": \"mac_abcd\",\n      \"ipv4\": \"0.0.0.0\",\n      \"ipv4_mask\": \"255.255.255.0\",\n      \"ipv6\": \"0\",\n      \"ipv6_mask\": \"::\"\n    },\n    {\n      \"name\": \"test_port_2\",\n      \"storage_id\": null,\n      \"native_port_id\": \"test_native_port_id_2\",\n      \"location\": \"location_port_2\",\n      \"connection_status\": \"connected\",\n      \"health_status\": \"normal\",\n      \"type\": \"fc\",\n      \"logical_type\": \"location_port_2\",\n      \"speed\": 100,\n      \"max_speed\": 1000,\n      \"native_parent_id\": \"test_ports_parent_id_2\",\n      \"wwn\": \"wwn_20000\",\n      \"mac_address\": \"mac_abcd\",\n      \"ipv4\": \"100.0.0.0\",\n      \"ipv4_mask\": \"255.255.255.0\",\n      \"ipv6\": \"0\",\n      \"ipv6_mask\": \"::\"\n    }\n  ],\n  \"disks\": [\n    {\n      \"name\": \"test_disk_id_1\",\n      \"storage_id\": null,\n      \"native_disk_id\": \"test_native_disk_id_2\",\n      \"serial_number\": \"serial_1000\",\n      \"manufacturer\": \"Crucial\",\n      \"model\": \"model_SSD3D\",\n      \"firmware\": \"firmware_123\",\n      \"speed\": 1000,\n      \"capacity\": 10000,\n      \"status\": \"normal\",\n      \"physical_type\": \"ssd\",\n      \"logical_type\": \"free\",\n      \"health_score\": 75,\n      \"native_diskgroup_id\": \"test_dg_id_100\",\n      \"location\": \"location_disk_1\"\n    },\n    {\n      \"name\": \"test_disk_id_1\",\n      \"storage_id\": null,\n      \"native_disk_id\": \"test_native_disk_id_2\",\n      \"serial_number\": \"serial_1000\",\n      \"manufacturer\": \"Crucial\",\n      \"model\": \"model_SSD3D\",\n      \"firmware\": \"firmware_123\",\n      \"speed\": 1000,\n      \"capacity\": 10000,\n      \"status\": \"normal\",\n      \"physical_type\": \"ssd\",\n      \"logical_type\": \"free\",\n      \"health_score\": 75,\n      \"native_diskgroup_id\": \"test_dg_id_100\",\n      \"location\": \"location_disk_1\"\n    }\n  ],\n  \"quotas\": [\n    {\n      \"native_quota_id\": \"test_quota_id_1\",\n      \"type\": \"tree\",\n      \"storage_id\": null,\n      \"native_filesystem_id\": \"test_native_filesystem_id_1\",\n      \"native_qtree_id\": \"test_native_qtree_id_1\",\n      \"capacity_hard_limit\": 10000,\n      \"capacity_soft_limit\": 9000,\n      \"file_hard_limit\": 1000,\n      \"file_soft_limit\": 900,\n      \"file_count\": 500,\n      \"used_capacity\": 5000,\n      \"user_group_name\": \"usr0\"\n    },\n    {\n      \"native_quota_id\": \"test_quota_id_2\",\n      \"type\": \"group\",\n      \"storage_id\": null,\n      \"native_filesystem_id\": \"test_native_filesystem_id_2\",\n      \"native_qtree_id\": \"test_native_qtree_id_2\",\n      \"capacity_hard_limit\": 20000,\n      \"capacity_soft_limit\": 9000,\n      \"file_hard_limit\": 2000,\n      \"file_soft_limit\": 900,\n      \"file_count\": 700,\n      \"used_capacity\": 7000,\n      \"user_group_name\": \"grp0\"\n    }\n  ],\n  \"filesystems\": [\n    {\n      \"name\": \"test_filesystem_1\",\n      \"storage_id\": null,\n      \"native_filesystem_id\": \"test_native_fs_id_1\",\n      \"native_pool_id\": \"test_native_pool_id_1\",\n      \"status\": \"normal\",\n      \"type\": \"thin\",\n      \"security_mode\": \"ntfs\",\n      \"total_capacity\": 10000,\n      \"used_capacity\": 9000,\n      \"free_capacity\": 1000,\n      \"worm\": \"non_worm\",\n      \"deduplicated\": true,\n      \"compressed\": true\n    },\n    {\n      \"name\": \"test_filesystem_2\",\n      \"storage_id\": null,\n      \"native_filesystem_id\": \"test_native_fs_id_2\",\n      \"native_pool_id\": \"test_native_pool_id_2\",\n      \"status\": \"faulty\",\n      \"type\": \"thick\",\n      \"security_mode\": \"unix\",\n      \"total_capacity\": 10000,\n      \"used_capacity\": 9000,\n      \"free_capacity\": 1000,\n      \"worm\": \"non_worm\",\n      \"deduplicated\": true,\n      \"compressed\": true\n    }\n  ],\n  \"qtrees\": [\n    {\n      \"name\": \"test_qtree_1\",\n      \"storage_id\": null,\n      \"native_qtree_id\": \"test_native_qtree_id_1\",\n      \"native_filesystem_id\": \"test_native_filesystem_id_1\",\n      \"security_mode\": \"ntfs\",\n      \"path\": \"/root/qtree_1\"\n    },\n    {\n      \"name\": \"test_qtree_2\",\n      \"storage_id\": null,\n      \"native_qtree_id\": \"test_native_qtree_id_2\",\n      \"native_filesystem_id\": \"test_native_filesystem_id_2\",\n      \"security_mode\": \"unix\",\n      \"path\": \"/root/qtree_2\"\n    }\n  ],\n  \"shares\": [\n    {\n      \"name\": \"test_share_1\",\n      \"storage_id\": null,\n      \"native_share_id\": \"test_native_share_id_1\",\n      \"native_filesystem_id\": \"test_native_fs_id_1\",\n      \"native_qtree_id\": \"test_native_qtree_id_1\",\n      \"protocol\": \"cifs\",\n      \"path\": \"/root/share_1\"\n    },\n    {\n      \"name\": \"test_share_2\",\n      \"storage_id\": null,\n      \"native_share_id\": \"test_native_share_id_2\",\n      \"native_filesystem_id\": \"test_native_fs_id_2\",\n      \"native_qtree_id\": \"test_native_qtree_id_2\",\n      \"protocol\": \"cifs\",\n      \"path\": \"/root/share_2\"\n    }\n  ]\n}"
  },
  {
    "path": "delfin/tests/unit/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/alert_manager/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/alert_manager/fakes.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport six\n\nfrom retrying import Retrying\nfrom pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher\n\nfrom delfin import exception\nfrom delfin.common import constants\n\n\ndef fake_storage_info():\n    return {\n        'id': 'abcd-1234-56789',\n        'name': 'storage1',\n        'vendor': 'fake vendor',\n        'model': 'fake model',\n        'serial_number': 'serial-1234',\n    }\n\n\ndef fake_alert_model():\n    return {'alert_id': '1050',\n            'alert_name': 'SAMPLE_ALERT_NAME',\n            'severity': constants.Severity.WARNING,\n            'category': constants.Category.NOT_SPECIFIED,\n            'type': constants.EventType.EQUIPMENT_ALARM,\n            'sequence_number': 79,\n            'description': 'Diagnostic event trace triggered.',\n            'recovery_advice': 'NA',\n            'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n            'location': 'Array id=000192601409,Component type=location1 '\n                        'Group,Component name=comp1,Event source=symmetrix',\n            }\n\n\ndef fake_v3_alert_source():\n    return {'storage_id': 'abcd-1234-5678',\n            'version': 'snmpv3',\n            'engine_id': '800000d30300000e112245',\n            'username': 'test1',\n            'auth_key': 'YWJjZDEyMzQ1Njc=',\n            'auth_protocol': 'HMACMD5',\n            'privacy_key': 'YWJjZDEyMzQ1Njc=',\n            'privacy_protocol': 'DES',\n            'host': '127.0.0.1'\n            }\n\n\ndef fake_v3_alert_source_list_with_one():\n    return [\n        {'storage_id': 'abcd-1234-5678',\n         'version': 'snmpv3',\n         'engine_id': '800000d30300000e112245',\n         'username': 'test1',\n         'auth_key': 'YWJjZDEyMzQ1Njc=',\n         'auth_protocol': 'HMACMD5',\n         'privacy_key': 'YWJjZDEyMzQ1Njc=',\n         'privacy_protocol': 'DES'\n         }\n    ]\n\n\ndef null_alert_source_list():\n    return []\n\n\ndef fake_v3_alert_source_list():\n    return [\n        {'storage_id': 'abcd-1234-5678',\n         'version': 'snmpv3',\n         'engine_id': '800000d30300000e112245',\n         'username': 'test1',\n         'auth_key': 'YWJjZDEyMzQ1Njc=',\n         'auth_protocol': 'HMACMD5',\n         'privacy_key': 'YWJjZDEyMzQ1Njc=',\n         'privacy_protocol': 'DES'\n         },\n        {'storage_id': 'abcd-1234-5677',\n         'version': 'snmpv3',\n         'engine_id': '800000d30300000e112246',\n         'username': 'test2',\n         'auth_key': 'YWJjZDEyMzQ1Njc=',\n         'auth_protocol': 'HMACMD5',\n         'privacy_key': 'YWJjZDEyMzQ1Njc=',\n         'privacy_protocol': 'DES'\n         }\n    ]\n\n\ndef parse_alert_exception():\n    raise exception.InvalidResults(\"parse alert failed.\")\n\n\ndef load_config_exception(para):\n    raise exception.InvalidResults(\"load config failed.\")\n\n\ndef mock_add_transport(snmpEngine, transportDomain, transport):\n    snmpEngine.transportDispatcher = AsyncoreDispatcher()\n\n\ndef config_delv3_exception(snmp_engine, username, securityEngineId):\n    raise exception.InvalidResults(\"Config delete failed.\")\n\n\ndef mock_cmdgen_get_cmd(self, authData, transportTarget, *varNames, **kwargs):\n    self.snmpEngine.transportDispatcher = AsyncoreDispatcher()\n    return None, None, None, None\n\n\ndef fake_v2_alert_source():\n    return {'storage_id': 'abcd-1234-5678',\n            'version': 'snmpv2c',\n            'community_string': 'YWJjZDEyMzQ1Njc=',\n            }\n\n\ndef fake_retry(*dargs, **dkw):\n    \"\"\"\n    Decorator function that instantiates the Retrying object\n    @param *dargs: positional arguments passed to Retrying object\n    @param **dkw: keyword arguments passed to the Retrying object\n    \"\"\"\n    if dkw.get('stop_max_attempt_number'):\n        dkw['stop_max_attempt_number'] = 1\n\n    # support both @retry and @retry() as valid syntax\n    if len(dargs) == 1 and callable(dargs[0]):\n        def wrap_simple(f):\n\n            @six.wraps(f)\n            def wrapped_f(*args, **kw):\n                return Retrying().call(f, *args, **kw)\n\n            return wrapped_f\n\n        return wrap_simple(dargs[0])\n\n    else:\n        def wrap(f):\n\n            @six.wraps(f)\n            def wrapped_f(*args, **kw):\n                return Retrying(*dargs, **dkw).call(f, *args, **kw)\n\n            return wrapped_f\n\n        return wrap\n\n\nFAKE_STOTRAGE = {\n    'id': 1,\n    'name': 'fake_storage',\n    'vendor': 'fake_vendor',\n    'model': 'fake_model',\n    'serial_number': '12345678',\n}\n"
  },
  {
    "path": "delfin/tests/unit/alert_manager/test_alert_processor.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom unittest import mock\n\nfrom oslo_utils import importutils\n\nfrom delfin import context\nfrom delfin import exception\nfrom delfin.common import constants\nfrom delfin.tests.unit.alert_manager import fakes\n\n\nclass AlertProcessorTestCase(unittest.TestCase):\n    ALERT_PROCESSOR_CLASS = 'delfin.alert_manager.alert_processor' \\\n                            '.AlertProcessor'\n\n    @mock.patch('delfin.task_manager.rpcapi.TaskAPI', mock.Mock())\n    def _get_alert_processor(self):\n        alert_processor_class = importutils.import_class(\n            self.ALERT_PROCESSOR_CLASS)\n        alert_processor = alert_processor_class()\n        return alert_processor\n\n    @mock.patch('delfin.db.storage_get')\n    @mock.patch('delfin.drivers.api.API.parse_alert')\n    @mock.patch('delfin.exporter.base_exporter'\n                '.AlertExporterManager.dispatch')\n    @mock.patch('delfin.context.get_admin_context')\n    def test_process_alert_info_success(self, mock_ctxt, mock_export_model,\n                                        mock_parse_alert, mock_storage):\n        fake_storage_info = fakes.fake_storage_info()\n        input_alert = {'storage_id': 'abcd-1234-56789',\n                       'connUnitEventId': 79,\n                       'connUnitName': '000192601409',\n                       'connUnitEventType':\n                           constants.EventType.EQUIPMENT_ALARM,\n                       'connUnitEventDescr': 'Diagnostic '\n                                             'event trace triggered.',\n                       'connUnitEventSeverity': 'warning',\n                       'connUnitType': 'storage-subsystem',\n                       'asyncEventSource': 'eventsource1',\n                       'asyncEventCode': '1050',\n                       'asyncEventComponentType': '1051',\n                       'asyncEventComponentName': 'comp1'}\n\n        expected_alert_model = {'storage_id': fake_storage_info['id'],\n                                'storage_name': fake_storage_info['name'],\n                                'vendor':\n                                    fake_storage_info['vendor'],\n                                'model': fake_storage_info['model'],\n                                'serial_number':\n                                    fake_storage_info['serial_number'],\n                                'location': 'Array id=000192601409,Component '\n                                            'type=location1 '\n                                            'Group,Component name=comp1,Event '\n                                            'source=symmetrix',\n                                'type': input_alert['connUnitEventType'],\n                                'severity': constants.Severity.WARNING,\n                                'category': constants.Category.NOT_SPECIFIED,\n                                'description':\n                                    input_alert['connUnitEventDescr'],\n                                'resource_type':\n                                    constants.DEFAULT_RESOURCE_TYPE,\n                                'alert_id': input_alert['asyncEventCode'],\n                                'alert_name': 'SAMPLE_ALERT_NAME',\n                                'sequence_number': 79,\n                                'recovery_advice': 'NA'\n                                }\n        mock_storage.return_value = fake_storage_info\n        expected_ctxt = context.get_admin_context()\n        mock_ctxt.return_value = expected_ctxt\n        mock_parse_alert.return_value = fakes.fake_alert_model()\n        alert_processor_inst = self._get_alert_processor()\n        alert_processor_inst.process_alert_info(input_alert)\n\n        # Verify that model returned by driver is exported\n        mock_export_model.assert_called_once_with(expected_ctxt,\n                                                  [expected_alert_model])\n\n    @mock.patch('delfin.db.storage_get')\n    @mock.patch('delfin.drivers.api.API.parse_alert',\n                fakes.parse_alert_exception)\n    def test_process_alert_info_exception(self, mock_storage):\n        \"\"\" Mock parse alert for raising exception\"\"\"\n        alert = {'storage_id': 'abcd-1234-56789',\n                 'storage_name': 'storage1',\n                 'vendor': 'fake vendor',\n                 'model': 'fake mode',\n                 'serial_number': 'serial-1234'}\n\n        mock_storage.return_value = fakes.fake_storage_info()\n        alert_processor_inst = self._get_alert_processor()\n        self.assertRaisesRegex(exception.InvalidResults,\n                               \"Failed to fill the alert model from driver.\",\n                               alert_processor_inst.process_alert_info, alert)\n\n    @mock.patch('delfin.context.get_admin_context')\n    @mock.patch('delfin.db.storage_get')\n    @mock.patch('delfin.drivers.api.API.parse_alert')\n    @mock.patch('delfin.alert_manager.alert_processor.'\n                'AlertProcessor.sync_storage_alert')\n    def test_process_alert_info_incompletetrap_exception(self, mock_sync_alert,\n                                                         mock_parse_alert,\n                                                         mock_storage,\n                                                         mock_ctxt):\n        \"\"\" Mock parse alert for raising exception\"\"\"\n        alert = {'storage_id': 'abcd-1234-56789',\n                 'storage_name': 'storage1',\n                 'vendor': 'fake vendor',\n                 'model': 'fake mode',\n                 'serial_number': 'serial-1234'}\n\n        mock_ctxt.return_value = context.get_admin_context()\n        mock_storage.return_value = fakes.fake_storage_info()\n        mock_parse_alert.side_effect = exception.IncompleteTrapInformation(\n            'abcd-1234-56789')\n        alert_processor_inst = self._get_alert_processor()\n        alert_processor_inst.process_alert_info(alert)\n\n        self.assertTrue(mock_sync_alert.called)\n"
  },
  {
    "path": "delfin/tests/unit/alert_manager/test_snmp_validator.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport random\nfrom unittest import mock\n\nfrom pysnmp.entity.rfc3413.oneliner import cmdgen\n\nfrom delfin import context\nfrom delfin import db\nfrom delfin import test\nfrom delfin.alert_manager import snmp_validator\nfrom delfin.common import constants\nfrom delfin.exporter import base_exporter\nfrom delfin.tests.unit.alert_manager import fakes\n\n\nclass TestSNMPValidator(test.TestCase):\n    @mock.patch.object(db, 'alert_source_update',\n                       mock.Mock())\n    @mock.patch('delfin.alert_manager.snmp_validator.'\n                'SNMPValidator.validate_connectivity')\n    def test_validate(self, mock_validate_connectivity):\n        validator = snmp_validator.SNMPValidator()\n\n        mock_validate_connectivity.return_value = fakes.fake_v3_alert_source()\n        v3_alert_source_without_engine_id = fakes.fake_v3_alert_source()\n        v3_alert_source_without_engine_id.pop('engine_id')\n        validator.validate(context, v3_alert_source_without_engine_id)\n        self.assertEqual(db.alert_source_update.call_count, 1)\n\n        mock_validate_connectivity.return_value = fakes.fake_v3_alert_source()\n        validator.validate(context,\n                           fakes.fake_v3_alert_source())\n        self.assertEqual(db.alert_source_update.call_count, 1)\n\n    @mock.patch.object(cmdgen.UdpTransportTarget, '_resolveAddr',\n                       mock.Mock())\n    @mock.patch.object(cmdgen.UdpTransportTarget, 'setLocalAddress',\n                       mock.Mock())\n    @mock.patch.object(cmdgen.CommandGenerator, 'getCmd',\n                       fakes.mock_cmdgen_get_cmd)\n    @mock.patch('delfin.db.access_info_get')\n    @mock.patch('pysnmp.entity.observer.MetaObserver.registerObserver')\n    @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher'\n                '.closeDispatcher')\n    def test_validate_connectivity(self, mock_close_dispatcher,\n                                   mock_register_observer,\n                                   mock_access_info_get):\n        # Get a random host\n        a = random.randint(0, 255)\n        b = random.randint(0, 255)\n        c = random.randint(0, 255)\n        d = random.randint(0, 255)\n        host = str(a) + '.' + str(b) + '.' + str(c) + '.' + str(d)\n        # Get a random port\n        port = random.randint(1024, 65535)\n        # snmpv3\n        v3_alert_source = fakes.fake_v3_alert_source()\n        v3_alert_source['host'] = host\n        v3_alert_source['port'] = port\n        mock_access_info_get.return_value = {'model': 'vsp'}\n        snmp_validator.SNMPValidator.validate_connectivity(\n            context.RequestContext(), v3_alert_source)\n        self.assertEqual(mock_close_dispatcher.call_count, 1)\n        self.assertEqual(mock_register_observer.call_count, 1)\n        # snmpv2c\n        v2_alert_source = fakes.fake_v2_alert_source()\n        v2_alert_source['host'] = host\n        v2_alert_source['port'] = port\n        snmp_validator.SNMPValidator.validate_connectivity(\n            context.RequestContext(), v2_alert_source)\n        self.assertEqual(mock_close_dispatcher.call_count, 2)\n        self.assertEqual(mock_register_observer.call_count, 1)\n\n    @mock.patch.object(db, 'storage_get',\n                       mock.Mock(return_value=fakes.FAKE_STOTRAGE))\n    @mock.patch.object(snmp_validator.SNMPValidator,\n                       '_dispatch_snmp_validation_alert', mock.Mock())\n    def test_handle_validation_result(self):\n        validator = snmp_validator.SNMPValidator()\n\n        validator._handle_validation_result(\n            context, fakes.FAKE_STOTRAGE['id'],\n            constants.Category.FAULT)\n        snmp_validator.SNMPValidator._dispatch_snmp_validation_alert \\\n            .assert_called_with(context,\n                                fakes.FAKE_STOTRAGE,\n                                constants.Category.FAULT)\n\n        validator._handle_validation_result(\n            context, fakes.FAKE_STOTRAGE['id'],\n            constants.Category.RECOVERY)\n        snmp_validator.SNMPValidator._dispatch_snmp_validation_alert \\\n            .assert_called_with(context,\n                                fakes.FAKE_STOTRAGE,\n                                constants.Category.RECOVERY)\n\n    @mock.patch.object(base_exporter.AlertExporterManager, 'dispatch',\n                       mock.Mock())\n    def test_dispatch_snmp_validation_alert(self):\n        validator = snmp_validator.SNMPValidator()\n        storage = fakes.FAKE_STOTRAGE\n        alert = {\n            'storage_id': storage['id'],\n            'storage_name': storage['name'],\n            'vendor': storage['vendor'],\n            'model': storage['model'],\n            'serial_number': storage['serial_number'],\n            'alert_id': constants.SNMP_CONNECTION_FAILED_ALERT_ID,\n            'sequence_number': 0,\n            'alert_name': 'SNMP connect failed',\n            'category': constants.Category.FAULT,\n            'severity': constants.Severity.MAJOR,\n            'type': constants.EventType.COMMUNICATIONS_ALARM,\n            'location': 'NetworkEntity=%s' % storage['name'],\n            'description': \"SNMP connection to the storage failed. \"\n                           \"SNMP traps from storage will not be received.\",\n            'recovery_advice': \"1. The network connection is abnormal. \"\n                               \"2. SNMP authentication parameters \"\n                               \"are invalid.\",\n            'occur_time': mock.ANY,\n        }\n\n        validator._dispatch_snmp_validation_alert(\n            context, storage, constants.Category.FAULT)\n        base_exporter.AlertExporterManager(). \\\n            dispatch.assert_called_once_with(context, alert)\n"
  },
  {
    "path": "delfin/tests/unit/alert_manager/test_trap_receiver.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom unittest import mock\n\nimport retrying\nfrom oslo_utils import importutils\nfrom pysnmp.carrier.asyncore.dgram import udp\nfrom pysnmp.entity import engine, config\n\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.tests.unit.alert_manager import fakes\n\nretrying.retry = fakes.fake_retry\n\n\nclass TrapReceiverTestCase(test.TestCase):\n    TRAP_RECEIVER_CLASS = 'delfin.alert_manager.trap_receiver' \\\n                          '.TrapReceiver'\n    DEF_TRAP_RECV_ADDR = '127.0.0.1'\n    DEF_TRAP_RECV_PORT = '162'\n\n    def setUp(self):\n        super(TrapReceiverTestCase, self).setUp()\n        self.alert_rpc_api = mock.Mock()\n        trap_receiver_class = importutils.import_class(\n            self.TRAP_RECEIVER_CLASS)\n        self.trap_receiver = trap_receiver_class(self.DEF_TRAP_RECV_ADDR,\n                                                 self.DEF_TRAP_RECV_PORT)\n        self.mock_object(self.trap_receiver,\n                         'alert_rpc_api', self.alert_rpc_api)\n\n    def _get_trap_receiver(self):\n        return self.trap_receiver\n\n    @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher'\n                '.jobStarted')\n    @mock.patch('delfin.db.api.alert_source_get_all')\n    @mock.patch('pysnmp.carrier.asyncore.dgram.udp.UdpTransport'\n                '.openServerMode', mock.Mock())\n    @mock.patch('delfin.alert_manager.trap_receiver.TrapReceiver'\n                '._mib_builder', mock.Mock())\n    def test_start_success(self, mock_alert_source, mock_dispatcher):\n        mock_alert_source.return_value = {}\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.trap_receiver_address = self.DEF_TRAP_RECV_ADDR\n        trap_receiver_inst.trap_receiver_port = self.DEF_TRAP_RECV_PORT\n        trap_receiver_inst.start()\n\n        # Verify that snmp engine is initialised and transport config is set\n        self.assertTrue(trap_receiver_inst.snmp_engine is not None)\n\n    @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher'\n                '.jobStarted')\n    @mock.patch('delfin.db.api.alert_source_get_all')\n    @mock.patch('delfin.alert_manager.trap_receiver.TrapReceiver'\n                '._load_snmp_config', fakes.load_config_exception)\n    def test_start_with_exception(self, mock_alert_source, mock_dispatcher):\n        mock_alert_source.return_value = {}\n        trap_receiver_inst = self._get_trap_receiver()\n\n        # Mock load config to raise exception\n        self.assertRaisesRegex(ValueError, \"Failed to setup for trap listener\",\n                               trap_receiver_inst.start)\n\n    @mock.patch('pysnmp.carrier.asyncore.dgram.udp.UdpTransport'\n                '.openServerMode', mock.Mock())\n    def test_add_transport_successful(self):\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        trap_receiver_inst.trap_receiver_address = self.DEF_TRAP_RECV_ADDR\n        trap_receiver_inst.trap_receiver_port = self.DEF_TRAP_RECV_PORT\n        trap_receiver_inst._add_transport()\n        get_transport = config.getTransport(trap_receiver_inst.snmp_engine,\n                                            udp.domainName)\n        # Verify that snmp engine transport config is set after _add_transport\n        self.assertTrue(get_transport is not None)\n\n    def test_add_transport_exception(self):\n        trap_receiver_inst = self._get_trap_receiver()\n        exception_msg = r\"int\\(\\) argument must be a string, \" \\\n                        \"a bytes-like object or a number, not 'NoneType'\"\n        # Mock exception by not initialising snmp engine\n        self.assertRaisesRegex(exception.DelfinException,\n                               exception_msg,\n                               trap_receiver_inst._add_transport)\n\n    @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher'\n                '.jobStarted')\n    @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher'\n                '.closeDispatcher')\n    @mock.patch('delfin.db.api.alert_source_get_all')\n    @mock.patch('pysnmp.carrier.asyncore.dgram.udp.UdpTransport'\n                '.openServerMode', mock.Mock())\n    @mock.patch('pysnmp.entity.config.addTransport', fakes.mock_add_transport)\n    @mock.patch('delfin.alert_manager.trap_receiver.TrapReceiver'\n                '._mib_builder', mock.Mock())\n    def test_stop_with_snmp_engine(self, mock_alert_source,\n                                   mock_close_dispatcher, mock_dispatcher):\n        mock_alert_source.return_value = {}\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.trap_receiver_address = self.DEF_TRAP_RECV_ADDR\n        trap_receiver_inst.trap_receiver_port = self.DEF_TRAP_RECV_PORT\n        trap_receiver_inst.start()\n        trap_receiver_inst.stop()\n\n        # Verify that close dispatcher is called during alert manager stop\n        self.assertTrue(mock_close_dispatcher.called)\n\n    @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher'\n                '.closeDispatcher')\n    def test_stop_without_snmp_engine(self, mock_close_dispatcher):\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.stop()\n\n        # Verify that close dispatcher is not called when engine is not\n        # initialised\n        self.assertFalse(mock_close_dispatcher.called)\n\n    @mock.patch('delfin.cryptor.decode', mock.Mock(return_value='public'))\n    @mock.patch('delfin.alert_manager.snmp_validator.SNMPValidator.validate')\n    @mock.patch('pysnmp.entity.config.addV1System')\n    def test_sync_snmp_config_add_v2_version(self, mock_add_config,\n                                             mock_validator):\n        ctxt = {}\n        alert_config = {'storage_id': 'abcd-1234-5678',\n                        'version': 'snmpv2c',\n                        'community_string': b'public'}\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        trap_receiver_inst.sync_snmp_config(ctxt,\n                                            snmp_config_to_add=alert_config)\n\n        # Verify that config is added to snmp engine\n        # Storage_id is internally modified to remove '-' while adding\n        mock_add_config.assert_called_once_with(trap_receiver_inst.snmp_engine,\n                                                'abcd12345678',\n                                                alert_config[\n                                                    'community_string'],\n                                                contextName=alert_config[\n                                                    'community_string'])\n        mock_validator.assert_called_once_with(ctxt, alert_config)\n\n    @mock.patch('pysnmp.entity.config.delV1System')\n    def test_sync_snmp_config_del_v2_version(self, mock_del_config):\n        ctxt = {}\n        alert_config = {'storage_id': 'abcd-1234-5678',\n                        'version': 'snmpv2c',\n                        'community_string': 'public'}\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        trap_receiver_inst.sync_snmp_config(ctxt,\n                                            snmp_config_to_del=alert_config)\n\n        # Verify that config is deleted from snmp engine\n        # Storage_id is internally modified to remove '-' while deleting\n        mock_del_config.assert_called_once_with(trap_receiver_inst.snmp_engine,\n                                                'abcd12345678')\n\n    def test_sync_snmp_config_add_invalid_version(self):\n        ctxt = {}\n        alert_source_config = {'storage_id': 'abcd-1234-5678',\n                               'version': 'snmpv4',\n                               'community_string': b'public'}\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        self.assertRaisesRegex(exception.InvalidSNMPConfig, \"Invalid snmp \"\n                                                            \"version\",\n                               trap_receiver_inst.sync_snmp_config, ctxt,\n                               snmp_config_to_add=alert_source_config)\n\n    @mock.patch('pysnmp.entity.config.addV3User')\n    def test_sync_snmp_config_add_v3_version(self, mock_add_config):\n        ctxt = {}\n        alert_config = fakes.fake_v3_alert_source()\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        trap_receiver_inst.sync_snmp_config(ctxt,\n                                            snmp_config_to_add=alert_config)\n\n        # Verify that addV3User to add config to engine\n        self.assertTrue(mock_add_config.called)\n\n    @mock.patch('pysnmp.entity.config.delV3User')\n    def test_sync_snmp_config_del_v3_version(self, mock_del_config):\n        ctxt = {}\n        alert_config = fakes.fake_v3_alert_source()\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        trap_receiver_inst.sync_snmp_config(ctxt,\n                                            snmp_config_to_add=alert_config)\n        trap_receiver_inst.sync_snmp_config(ctxt,\n                                            snmp_config_to_del=alert_config)\n\n        # Verify that delV3User to del config from engine\n        self.assertTrue(mock_del_config.called)\n\n    @mock.patch('pysnmp.entity.config.delV3User', fakes.config_delv3_exception)\n    @mock.patch('logging.LoggerAdapter.warning')\n    def test_sync_snmp_config_del_exception(self, mock_log_warning):\n        ctxt = {}\n        alert_config = fakes.fake_v3_alert_source()\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        trap_receiver_inst.sync_snmp_config(ctxt,\n                                            snmp_config_to_del=alert_config)\n        self.assertTrue(mock_log_warning.called)\n\n    def test_sync_snmp_config_invalid_auth_protocol(self):\n        ctxt = {}\n        alert_source_config = fakes.fake_v3_alert_source()\n        alert_source_config['auth_protocol'] = 'invalid_auth'\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        self.assertRaisesRegex(exception.InvalidSNMPConfig, \"Invalid \"\n                                                            \"auth_protocol\",\n                               trap_receiver_inst.sync_snmp_config, ctxt,\n                               snmp_config_to_add=alert_source_config)\n\n    def test_sync_snmp_config_invalid_priv_protocol(self):\n        ctxt = {}\n        alert_source_config = fakes.fake_v3_alert_source()\n        alert_source_config['privacy_protocol'] = 'invalid_priv'\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        self.assertRaisesRegex(exception.InvalidSNMPConfig, \"Invalid \"\n                                                            \"privacy_protocol\",\n                               trap_receiver_inst.sync_snmp_config, ctxt,\n                               snmp_config_to_add=alert_source_config)\n\n    @mock.patch('pysnmp.entity.config.addV3User')\n    @mock.patch('delfin.db.api.alert_source_get_all')\n    def test_load_snmp_config(self, mock_alert_source_list, mock_add_config):\n        mock_alert_source_list.return_value = fakes.fake_v3_alert_source_list()\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        trap_receiver_inst._load_snmp_config()\n\n        # Verify that config is added to engine\n        self.assertTrue(mock_add_config.called)\n\n    @mock.patch('delfin.db.alert_source_get_all')\n    def test_get_alert_source_by_host_success(self, mock_alert_source_list):\n        # alert_source_config = fakes.fake_v3_alert_source()\n        expected_alert_source = {'storage_id': 'abcd-1234-5678',\n                                 'version': 'snmpv3',\n                                 'engine_id': '800000d30300000e112245',\n                                 'username': 'test1',\n                                 'auth_key': 'YWJjZDEyMzQ1Njc=',\n                                 'auth_protocol': 'HMACMD5',\n                                 'privacy_key': 'YWJjZDEyMzQ1Njc=',\n                                 'privacy_protocol': 'DES'\n                                 }\n        mock_alert_source_list.return_value = fakes. \\\n            fake_v3_alert_source_list_with_one()\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        alert_source = trap_receiver_inst. \\\n            _get_alert_source_by_host('127.0.0.1')\n        self.assertDictEqual(expected_alert_source, alert_source)\n\n    @mock.patch('delfin.db.alert_source_get_all')\n    def test_get_alert_source_by_host_without_storage(self,\n                                                      mock_alert_source_list):\n        # alert_source_config = fakes.fake_v3_alert_source()\n        mock_alert_source_list.return_value = fakes.null_alert_source_list()\n        trap_receiver_inst = self._get_trap_receiver()\n        trap_receiver_inst.snmp_engine = engine.SnmpEngine()\n        self.assertRaisesRegex(exception.AlertSourceNotFoundWithHost, \"\",\n                               trap_receiver_inst._get_alert_source_by_host,\n                               '127.0.0.1')\n"
  },
  {
    "path": "delfin/tests/unit/api/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/api/extensions/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/api/extensions/foxinsocks.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2011 OpenStack LLC.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nfrom delfin.api import extensions\n\n\nclass FoxInSocksController(object):\n\n    def index(self, req):\n        return \"Try to say this Mr. Knox, sir...\"\n\n\nclass Foxinsocks(extensions.ExtensionDescriptor):\n    \"\"\"The Fox In Socks Extension.\"\"\"\n\n    name = \"Fox In Socks\"\n    alias = \"FOXNSOX\"\n    namespace = \"http://www.fox.in.socks/api/ext/pie/v1.0\"\n    updated = \"2011-01-22T13:25:27-06:00\"\n\n    def __init__(self, ext_mgr):\n        ext_mgr.register(self)\n\n    def get_resources(self):\n        resources = []\n        resource = extensions.ResourceExtension('foxnsocks',\n                                                FoxInSocksController())\n        resources.append(resource)\n        return resources\n\n    def get_controller_extensions(self):\n        return []\n"
  },
  {
    "path": "delfin/tests/unit/api/fakes.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 OpenStack LLC.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport routes\nimport webob.dec\nimport webob.request\nfrom oslo_service import wsgi\n\nfrom delfin import context\nfrom delfin import exception\nfrom delfin.api.common import wsgi as os_wsgi\nfrom delfin.common import config, constants  # noqa\nfrom delfin.common.constants import ResourceType, StorageMetric, \\\n    StoragePoolMetric, VolumeMetric, ControllerMetric, PortMetric, \\\n    DiskMetric, FileSystemMetric\nfrom delfin.db.sqlalchemy import models\n\n\n@webob.dec.wsgify\ndef fake_wsgi(self, req):\n    return self.application\n\n\nclass TestRouter(wsgi.Router):\n    def __init__(self, controller):\n        mapper = routes.Mapper()\n        mapper.resource(\"test\", \"tests\",\n                        controller=os_wsgi.Resource(controller))\n        super(TestRouter, self).__init__(mapper)\n\n\nclass HTTPRequest(os_wsgi.Request):\n\n    @classmethod\n    def blank(cls, *args, **kwargs):\n        if not kwargs.get('base_url'):\n            kwargs['base_url'] = 'http://localhost/v1'\n        use_admin_context = kwargs.pop('use_admin_context', False)\n        out = os_wsgi.Request.blank(*args, **kwargs)\n        out.environ['delfin.context'] = context.RequestContext(\n            is_admin=use_admin_context)\n        return out\n\n\ndef fake_storages_get_all(context, marker=None, limit=None, sort_keys=None,\n                          sort_dirs=None, filters=None, offset=None):\n    return [\n        {\n            \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n            \"created_at\": \"2020-06-09T08:59:48.710890\",\n            \"free_capacity\": 1045449,\n            \"updated_at\": \"2020-06-09T08:59:48.769470\",\n            \"name\": \"fake_driver\",\n            \"location\": \"HK\",\n            \"firmware_version\": \"1.0.0\",\n            \"vendor\": \"fake_vendor\",\n            \"status\": \"normal\",\n            \"sync_status\": constants.SyncStatus.SYNCED,\n            \"model\": \"fake_model\",\n            \"description\": \"it is a fake driver.\",\n            \"serial_number\": \"2102453JPN12KA0000113\",\n            \"used_capacity\": 3126,\n            \"total_capacity\": 1048576,\n            'raw_capacity': 1610612736000,\n            'subscribed_capacity': 219902325555200\n        },\n        {\n            \"id\": \"277a1d8f-a36e-423e-bdd9-db154f32c289\",\n            \"created_at\": \"2020-06-09T08:58:23.008821\",\n            \"free_capacity\": 1045449,\n            \"updated_at\": \"2020-06-09T08:58:23.033601\",\n            \"name\": \"fake_driver\",\n            \"location\": \"HK\",\n            \"firmware_version\": \"1.0.0\",\n            \"vendor\": \"fake_vendor\",\n            \"status\": \"normal\",\n            \"sync_status\": constants.SyncStatus.SYNCED,\n            \"model\": \"fake_model\",\n            \"description\": \"it is a fake driver.\",\n            \"serial_number\": \"2102453JPN12KA0000112\",\n            \"used_capacity\": 3126,\n            \"total_capacity\": 1048576,\n            'raw_capacity': 1610612736000,\n            'subscribed_capacity': 219902325555200\n\n        }\n    ]\n\n\ndef fake_storages_get_all_with_filter(\n        context, marker=None, limit=None,\n        sort_keys=None, sort_dirs=None, filters=None, offset=None):\n    return [\n        {\n            \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n            \"created_at\": \"2020-06-09T08:59:48.710890\",\n            \"free_capacity\": 1045449,\n            \"updated_at\": \"2020-06-09T08:59:48.769470\",\n            \"name\": \"fake_driver\",\n            \"location\": \"HK\",\n            \"firmware_version\": \"1.0.0\",\n            \"vendor\": \"fake_vendor\",\n            \"status\": \"normal\",\n            \"sync_status\": constants.SyncStatus.SYNCED,\n            \"model\": \"fake_model\",\n            \"description\": \"it is a fake driver.\",\n            \"serial_number\": \"2102453JPN12KA0000113\",\n            \"used_capacity\": 3126,\n            \"total_capacity\": 1048576,\n            'raw_capacity': 1610612736000,\n            'subscribed_capacity': 219902325555200\n        }\n    ]\n\n\ndef fake_storages_show(context, storage_id):\n    return {\n        \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n        \"created_at\": \"2020-06-09T08:59:48.710890\",\n        \"free_capacity\": 1045449,\n        \"updated_at\": \"2020-06-09T08:59:48.769470\",\n        \"name\": \"fake_driver\",\n        \"location\": \"HK\",\n        \"firmware_version\": \"1.0.0\",\n        \"vendor\": \"fake_vendor\",\n        \"status\": \"normal\",\n        \"sync_status\": constants.SyncStatus.SYNCED,\n        \"model\": \"fake_model\",\n        \"description\": \"it is a fake driver.\",\n        \"serial_number\": \"2102453JPN12KA0000113\",\n        \"used_capacity\": 3126,\n        \"total_capacity\": 1048576,\n        'raw_capacity': 1610612736000,\n        'subscribed_capacity': 219902325555200\n    }\n\n\ndef fake_access_info_get_all(context, marker=None, limit=None, sort_keys=None,\n                             sort_dirs=None, filters=None, offset=None):\n    return [\n        {\n            'created_at': \"2020-06-09T08:59:48.710890\",\n            'storage_id': '5f5c806d-2e65-473c-b612-345ef43f0642',\n            'model': 'fake_driver',\n            'vendor': 'fake_storage',\n            'rest': {\n                'host': '10.0.0.76',\n                'port': 1234,\n                'username': 'admin',\n                'password': b'YWJjZA=='\n            },\n            'extra_attributes': {'array_id': '0001234567891'},\n            'updated_at': None\n        }\n    ]\n\n\ndef fake_sync(self, req, id):\n    pass\n\n\ndef fake_v3_alert_source_config():\n    return {'host': '127.0.0.1',\n            'version': 'snmpv3',\n            'security_level': 'authPriv',\n            'engine_id': '800000d30300000e112245',\n            'username': 'test1',\n            'auth_key': 'abcd123456',\n            'auth_protocol': 'HMACMD5',\n            'privacy_key': 'abcd123456',\n            'privacy_protocol': 'DES',\n            'context_name': 'NA',\n            'retry_num': 2,\n            'expiration': 2,\n            'port': 161\n            }\n\n\ndef fake_v2_alert_source_config():\n    return {'host': '127.0.0.1',\n            'version': 'snmpv2c',\n            'community_string': 'public',\n            'context_name': 'NA',\n            'retry_num': 2,\n            'expiration': 2,\n            'port': 161\n            }\n\n\ndef fake_v3_alert_source():\n    alert_source = models.AlertSource()\n    alert_source.host = '127.0.0.1'\n    alert_source.storage_id = 'abcd-1234-5678'\n    alert_source.version = 'snmpv3'\n    alert_source.engine_id = '800000d30300000e112245'\n    alert_source.username = 'test1'\n    alert_source.auth_key = 'YWJjZDEyMzQ1Njc='\n    alert_source.auth_protocol = 'HMACMD5'\n    alert_source.privacy_key = 'YWJjZDEyMzQ1Njc='\n    alert_source.privacy_protocol = 'DES'\n    alert_source.port = 161\n    alert_source.context_name = \"\"\n    alert_source.retry_num = 1\n    alert_source.expiration = 1\n    alert_source.created_at = '2020-06-15T09:50:31.698956'\n    alert_source.updated_at = '2020-06-15T09:50:31.698956'\n    return alert_source\n\n\ndef fake_all_snmp_configs():\n    alert_source = models.AlertSource()\n    alert_source.host = '127.0.0.1'\n    alert_source.storage_id = 'abcd-1234-5678'\n    alert_source.version = 'snmpv3'\n    alert_source.engine_id = '800000d30300000e112245'\n    alert_source.username = 'test1'\n    alert_source.auth_key = 'YWJjZDEyMzQ1Njc='\n    alert_source.auth_protocol = 'HMACMD5'\n    alert_source.privacy_key = 'YWJjZDEyMzQ1Njc='\n    alert_source.privacy_protocol = 'DES'\n    alert_source.port = 161\n    alert_source.context_name = \"\"\n    alert_source.retry_num = 1\n    alert_source.expiration = 1\n    alert_source.created_at = '2020-06-15T09:50:31.698956'\n    alert_source.updated_at = '2020-06-15T09:50:31.698956'\n    return [alert_source]\n\n\ndef fake_v3_alert_source_noauth_nopriv():\n    alert_source = models.AlertSource()\n    alert_source.host = '127.0.0.1'\n    alert_source.storage_id = 'abcd-1234-5678'\n    alert_source.version = 'snmpv3'\n    alert_source.security_level = 'noAuthnoPriv'\n    alert_source.engine_id = '800000d30300000e112245'\n    alert_source.username = 'test1'\n    alert_source.port = 161\n    alert_source.context_name = \"\"\n    alert_source.retry_num = 1\n    alert_source.expiration = 1\n    alert_source.created_at = '2020-06-15T09:50:31.698956'\n    alert_source.updated_at = '2020-06-15T09:50:31.698956'\n    return alert_source\n\n\ndef fake_v3_alert_source_auth_nopriv():\n    alert_source = models.AlertSource()\n    alert_source.host = '127.0.0.1'\n    alert_source.storage_id = 'abcd-1234-5678'\n    alert_source.version = 'snmpv3'\n    alert_source.security_level = 'authNoPriv'\n    alert_source.auth_protocol = 'HMACMD5'\n    alert_source.engine_id = '800000d30300000e112245'\n    alert_source.username = 'test1'\n    alert_source.port = 161\n    alert_source.context_name = \"\"\n    alert_source.retry_num = 1\n    alert_source.expiration = 1\n    alert_source.created_at = '2020-06-15T09:50:31.698956'\n    alert_source.updated_at = '2020-06-15T09:50:31.698956'\n    return alert_source\n\n\ndef fake_v2_alert_source():\n    alert_source = models.AlertSource()\n    alert_source.host = '127.0.0.1'\n    alert_source.storage_id = 'abcd-1234-5678'\n    alert_source.version = 'snmpv2c'\n    alert_source.community_string = 'public'\n    alert_source.port = 161\n    alert_source.context_name = \"\"\n    alert_source.retry_num = 1\n    alert_source.expiration = 1\n    alert_source.created_at = '2020-06-15T09:50:31.698956'\n    alert_source.updated_at = '2020-06-15T09:50:31.698956'\n    return alert_source\n\n\ndef alert_source_get_exception(ctx, storage_id):\n    raise exception.AlertSourceNotFound('abcd-1234-5678')\n\n\ndef fake_access_info_show(context, storage_id):\n    access_info = models.AccessInfo()\n\n    access_info.updated_at = '2020-06-15T09:50:31.698956'\n    access_info.storage_id = '865ffd4d-f1f7-47de-abc3-5541ef44d0c1'\n    access_info.created_at = '2020-06-15T09:50:31.698956'\n    access_info.vendor = 'fake_storage'\n    access_info.model = 'fake_driver'\n    access_info.rest = {\n        'host': '10.0.0.0',\n        'username': 'admin',\n        'password': 'YWJjZA==',\n        'port': 1234\n    }\n    access_info.extra_attributes = {'array_id': '0001234567897'}\n\n    return access_info\n\n\ndef fake_access_infos_show_all(context):\n    access_info = models.AccessInfo()\n\n    access_info.updated_at = '2020-06-15T09:50:31.698956'\n    access_info.storage_id = '865ffd4d-f1f7-47de-abc3-5541ef44d0c1'\n    access_info.created_at = '2020-06-15T09:50:31.698956'\n    access_info.vendor = 'fake_storage'\n    access_info.model = 'fake_driver'\n    access_info.rest = {\n        'host': '10.0.0.0',\n        'username': 'admin',\n        'password': 'YWJjZA==',\n        'port': 1234\n    }\n    access_info.extra_attributes = {'array_id': '0001234567897'}\n\n    return [access_info]\n\n\ndef fake_update_access_info(self, context):\n    access_info = models.AccessInfo()\n\n    access_info.updated_at = '2020-06-15T09:50:31.698956'\n    access_info.storage_id = '865ffd4d-f1f7-47de-abc3-5541ef44d0c1'\n    access_info.created_at = '2020-06-15T09:50:31.698956'\n    access_info.vendor = 'fake_storage'\n    access_info.model = 'fake_driver'\n    access_info.rest = {\n        'host': '10.0.0.0',\n        'username': 'admin_modified',\n        'password': 'YWJjZA==',\n        'port': 1234\n    }\n    access_info.extra_attributes = {'array_id': '0001234567897'}\n\n    return access_info\n\n\ndef fake_volume_get_all(context, marker=None,\n                        limit=None, sort_keys=None,\n                        sort_dirs=None, filters=None, offset=None):\n    return [\n        {\n            \"created_at\": \"2020-06-10T07:17:31.157079\",\n            \"updated_at\": \"2020-06-10T07:17:31.157079\",\n            \"id\": \"d7fe425b-fddc-4ba4-accb-4343c142dc47\",\n            \"name\": \"004DF\",\n            \"storage_id\": \"5f5c806d-2e65-473c-b612-345ef43f0642\",\n            \"native_storage_pool_id\": \"SRP_1\",\n            \"description\": \"fake_storage 'thin device' volume\",\n            \"status\": \"available\",\n            \"native_volume_id\": \"004DF\",\n            \"wwn\": \"60000970000297801855533030344446\",\n            \"type\": 'thin',\n            \"total_capacity\": 1075838976,\n            \"used_capacity\": 0,\n            \"free_capacity\": 1075838976,\n            \"compressed\": True,\n            \"deduplicated\": False\n        },\n        {\n            \"created_at\": \"2020-06-10T07:17:31.157079\",\n            \"updated_at\": \"2020-06-10T07:17:31.157079\",\n            \"id\": \"dad84a1f-db8d-49ab-af40-048fc3544c12\",\n            \"name\": \"004E0\",\n            \"storage_id\": \"5f5c806d-2e65-473c-b612-345ef43f0642\",\n            \"native_storage_pool_id\": \"SRP_1\",\n            \"description\": \"fake_storage 'thin device' volume\",\n            \"status\": \"available\",\n            \"native_volume_id\": \"004E0\",\n            \"wwn\": \"60000970000297801855533030344530\",\n            \"type\": 'thin',\n            \"total_capacity\": 1075838976,\n            \"used_capacity\": 0,\n            \"free_capacity\": 1075838976,\n            \"compressed\": True,\n            \"deduplicated\": False\n        }\n    ]\n\n\ndef fake_volume_show(context, volume_id):\n    return {\n        \"created_at\": \"2020-06-10T07:17:31.157079\",\n        \"updated_at\": \"2020-06-10T07:17:31.157079\",\n        \"id\": \"d7fe425b-fddc-4ba4-accb-4343c142dc47\",\n        \"name\": \"004DF\",\n        \"storage_id\": \"5f5c806d-2e65-473c-b612-345ef43f0642\",\n        \"native_storage_pool_id\": \"SRP_1\",\n        \"description\": \"fake_storage 'thin device' volume\",\n        \"status\": \"available\",\n        \"native_volume_id\": \"004DF\",\n        \"wwn\": \"60000970000297801855533030344446\",\n        \"type\": 'thin',\n        \"total_capacity\": 1075838976,\n        \"used_capacity\": 0,\n        \"free_capacity\": 1075838976,\n        \"compressed\": True,\n        \"deduplicated\": False\n    }\n\n\ndef fake_storage_pool_get_all(context, marker=None,\n                              limit=None, sort_keys=None,\n                              sort_dirs=None, filters=None, offset=None):\n    return [\n        {\n            \"created_at\": \"2020-06-10T07:17:08.707356\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n            \"name\": \"SRP_1\",\n            \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n            \"native_storage_pool_id\": \"SRP_1\",\n            \"description\": \"fake storage Pool\",\n            \"status\": \"normal\",\n            \"storage_type\": \"block\",\n            \"total_capacity\": 26300318136401,\n            \"used_capacity\": 19054536509358,\n            \"free_capacity\": 7245781627043,\n            'subscribed_capacity': 219902325555200\n        }\n    ]\n\n\ndef fake_storage_pool_show(context, storage_pool_id):\n    return {\n        \"created_at\": \"2020-06-10T07:17:08.707356\",\n        \"updated_at\": \"2020-06-10T07:17:08.707356\",\n        \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n        \"name\": \"SRP_1\",\n        \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n        \"native_storage_pool_id\": \"SRP_1\",\n        \"description\": \"fake storage Pool\",\n        \"status\": \"normal\",\n        \"storage_type\": \"block\",\n        \"total_capacity\": 26300318136401,\n        \"used_capacity\": 19054536509358,\n        \"free_capacity\": 7245781627043,\n        'subscribed_capacity': 219902325555200\n    }\n\n\ndef fake_storage_get_exception(ctx, storage_id):\n    raise exception.StorageNotFound(storage_id)\n\n\ndef fake_getcmd_exception(auth_data, transport_target, *var_names, **kwargs):\n    return \"Connection failed\", None, None, None\n\n\ndef fake_getcmd_success(auth_data, transport_target, *var_names, **kwargs):\n    return None, None, None, None\n\n\ndef fake_get_capabilities(context, storage_id):\n    return {'is_historic': False,\n            'resource_metrics': {\n                ResourceType.STORAGE: {\n                    StorageMetric.THROUGHPUT.name: {\n                        \"unit\": StorageMetric.THROUGHPUT.unit,\n                        \"description\": StorageMetric.THROUGHPUT.description\n                    },\n                    StorageMetric.RESPONSE_TIME.name: {\n                        \"unit\": StorageMetric.RESPONSE_TIME.unit,\n                        \"description\": StorageMetric.RESPONSE_TIME.description\n                    },\n                    StorageMetric.READ_RESPONSE_TIME.name: {\n                        \"unit\": StorageMetric.READ_RESPONSE_TIME.unit,\n                        \"description\":\n                            StorageMetric.READ_RESPONSE_TIME.description\n                    },\n                    StorageMetric.WRITE_RESPONSE_TIME.name: {\n                        \"unit\": StorageMetric.WRITE_RESPONSE_TIME.unit,\n                        \"description\":\n                            StorageMetric.WRITE_RESPONSE_TIME.description\n                    },\n                    StorageMetric.IOPS.name: {\n                        \"unit\": StorageMetric.IOPS.unit,\n                        \"description\": StorageMetric.IOPS.description\n                    },\n                    StorageMetric.READ_THROUGHPUT.name: {\n                        \"unit\": StorageMetric.READ_THROUGHPUT.unit,\n                        \"description\":\n                            StorageMetric.READ_THROUGHPUT.description\n                    },\n                    StorageMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": StorageMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            StorageMetric.WRITE_THROUGHPUT.description\n                    },\n                    StorageMetric.READ_IOPS.name: {\n                        \"unit\": StorageMetric.READ_IOPS.unit,\n                        \"description\": StorageMetric.READ_IOPS.description\n                    },\n                    StorageMetric.WRITE_IOPS.name: {\n                        \"unit\": StorageMetric.WRITE_IOPS.unit,\n                        \"description\": StorageMetric.WRITE_IOPS.description\n                    },\n                },\n                ResourceType.STORAGE_POOL: {\n                    StoragePoolMetric.THROUGHPUT.name: {\n                        \"unit\": StoragePoolMetric.THROUGHPUT.unit,\n                        \"description\": StoragePoolMetric.THROUGHPUT.description\n                    },\n                    StoragePoolMetric.RESPONSE_TIME.name: {\n                        \"unit\": StoragePoolMetric.RESPONSE_TIME.unit,\n                        \"description\":\n                            StoragePoolMetric.RESPONSE_TIME.description\n                    },\n                    StoragePoolMetric.IOPS.name: {\n                        \"unit\": StoragePoolMetric.IOPS.unit,\n                        \"description\": StoragePoolMetric.IOPS.description\n                    },\n                    StoragePoolMetric.READ_THROUGHPUT.name: {\n                        \"unit\": StoragePoolMetric.READ_THROUGHPUT.unit,\n                        \"description\":\n                            StoragePoolMetric.READ_THROUGHPUT.description\n                    },\n                    StoragePoolMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": StoragePoolMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            StoragePoolMetric.WRITE_THROUGHPUT.description\n                    },\n                    StoragePoolMetric.READ_IOPS.name: {\n                        \"unit\": StoragePoolMetric.READ_IOPS.unit,\n                        \"description\": StoragePoolMetric.READ_IOPS.description\n                    },\n                    StoragePoolMetric.WRITE_IOPS.name: {\n                        \"unit\": StoragePoolMetric.WRITE_IOPS.unit,\n                        \"description\": StoragePoolMetric.WRITE_IOPS.description\n                    },\n\n                },\n                ResourceType.VOLUME: {\n                    VolumeMetric.THROUGHPUT.name: {\n                        \"unit\": VolumeMetric.THROUGHPUT.unit,\n                        \"description\": VolumeMetric.THROUGHPUT.description\n                    },\n                    VolumeMetric.RESPONSE_TIME.name: {\n                        \"unit\": VolumeMetric.RESPONSE_TIME.unit,\n                        \"description\": VolumeMetric.RESPONSE_TIME.description\n                    },\n                    VolumeMetric.READ_RESPONSE_TIME.name: {\n                        \"unit\": VolumeMetric.READ_RESPONSE_TIME.unit,\n                        \"description\":\n                            VolumeMetric.READ_RESPONSE_TIME.description\n                    },\n                    VolumeMetric.WRITE_RESPONSE_TIME.name: {\n                        \"unit\": VolumeMetric.WRITE_RESPONSE_TIME.unit,\n                        \"description\":\n                            VolumeMetric.WRITE_RESPONSE_TIME.description\n                    },\n                    VolumeMetric.IOPS.name: {\n                        \"unit\": VolumeMetric.IOPS.unit,\n                        \"description\": VolumeMetric.IOPS.description\n                    },\n                    VolumeMetric.READ_THROUGHPUT.name: {\n                        \"unit\": VolumeMetric.READ_THROUGHPUT.unit,\n                        \"description\": VolumeMetric.READ_THROUGHPUT.description\n                    },\n                    VolumeMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": VolumeMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            VolumeMetric.WRITE_THROUGHPUT.description\n                    },\n                    VolumeMetric.READ_IOPS.name: {\n                        \"unit\": VolumeMetric.READ_IOPS.unit,\n                        \"description\": VolumeMetric.READ_IOPS.description\n                    },\n                    VolumeMetric.WRITE_IOPS.name: {\n                        \"unit\": VolumeMetric.WRITE_IOPS.unit,\n                        \"description\": VolumeMetric.WRITE_IOPS.description\n                    },\n                    VolumeMetric.CACHE_HIT_RATIO.name: {\n                        \"unit\": VolumeMetric.CACHE_HIT_RATIO.unit,\n                        \"description\": VolumeMetric.CACHE_HIT_RATIO.description\n                    },\n                    VolumeMetric.READ_CACHE_HIT_RATIO.name: {\n                        \"unit\": VolumeMetric.READ_CACHE_HIT_RATIO.unit,\n                        \"description\":\n                            VolumeMetric.READ_CACHE_HIT_RATIO.description\n                    },\n                    VolumeMetric.WRITE_CACHE_HIT_RATIO.name: {\n                        \"unit\": VolumeMetric.WRITE_CACHE_HIT_RATIO.unit,\n                        \"description\":\n                            VolumeMetric.WRITE_CACHE_HIT_RATIO.description\n                    },\n                    VolumeMetric.IO_SIZE.name: {\n                        \"unit\": VolumeMetric.IO_SIZE.unit,\n                        \"description\": VolumeMetric.IO_SIZE.description\n                    },\n                    VolumeMetric.READ_IO_SIZE.name: {\n                        \"unit\": VolumeMetric.READ_IO_SIZE.unit,\n                        \"description\": VolumeMetric.READ_IO_SIZE.description\n                    },\n                    VolumeMetric.WRITE_IO_SIZE.name: {\n                        \"unit\": VolumeMetric.WRITE_IO_SIZE.unit,\n                        \"description\": VolumeMetric.WRITE_IO_SIZE.description\n                    },\n                },\n                ResourceType.CONTROLLER: {\n                    ControllerMetric.THROUGHPUT.name: {\n                        \"unit\": ControllerMetric.THROUGHPUT.unit,\n                        \"description\": ControllerMetric.THROUGHPUT.description\n                    },\n                    ControllerMetric.RESPONSE_TIME.name: {\n                        \"unit\": ControllerMetric.RESPONSE_TIME.unit,\n                        \"description\":\n                            ControllerMetric.RESPONSE_TIME.description\n                    },\n                    ControllerMetric.IOPS.name: {\n                        \"unit\": ControllerMetric.IOPS.unit,\n                        \"description\": ControllerMetric.IOPS.description\n                    },\n                    ControllerMetric.READ_THROUGHPUT.name: {\n                        \"unit\": ControllerMetric.READ_THROUGHPUT.unit,\n                        \"description\":\n                            ControllerMetric.READ_THROUGHPUT.description\n                    },\n                    ControllerMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": ControllerMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            ControllerMetric.WRITE_THROUGHPUT.description\n                    },\n                    ControllerMetric.READ_IOPS.name: {\n                        \"unit\": ControllerMetric.READ_IOPS.unit,\n                        \"description\": ControllerMetric.READ_IOPS.description\n                    },\n                    ControllerMetric.WRITE_IOPS.name: {\n                        \"unit\": ControllerMetric.WRITE_IOPS.unit,\n                        \"description\": ControllerMetric.WRITE_IOPS.description\n                    },\n                    ControllerMetric.CPU_USAGE.name: {\n                        \"unit\": ControllerMetric.CPU_USAGE.unit,\n                        \"description\": ControllerMetric.CPU_USAGE.description\n                    }\n                },\n                ResourceType.PORT: {\n                    PortMetric.THROUGHPUT.name: {\n                        \"unit\": PortMetric.THROUGHPUT.unit,\n                        \"description\": PortMetric.THROUGHPUT.description\n                    },\n                    PortMetric.RESPONSE_TIME.name: {\n                        \"unit\": PortMetric.RESPONSE_TIME.unit,\n                        \"description\": PortMetric.RESPONSE_TIME.description\n                    },\n                    PortMetric.IOPS.name: {\n                        \"unit\": PortMetric.IOPS.unit,\n                        \"description\": PortMetric.IOPS.description\n                    },\n                    PortMetric.READ_THROUGHPUT.name: {\n                        \"unit\": PortMetric.READ_THROUGHPUT.unit,\n                        \"description\": PortMetric.READ_THROUGHPUT.description\n                    },\n                    PortMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": PortMetric.WRITE_THROUGHPUT.unit,\n                        \"description\": PortMetric.WRITE_THROUGHPUT.description\n                    },\n                    PortMetric.READ_IOPS.name: {\n                        \"unit\": PortMetric.READ_IOPS.unit,\n                        \"description\": PortMetric.READ_IOPS.description\n                    },\n                    PortMetric.WRITE_IOPS.name: {\n                        \"unit\": PortMetric.WRITE_IOPS.unit,\n                        \"description\": PortMetric.WRITE_IOPS.description\n                    },\n\n                },\n                ResourceType.DISK: {\n                    DiskMetric.THROUGHPUT.name: {\n                        \"unit\": DiskMetric.THROUGHPUT.unit,\n                        \"description\": DiskMetric.THROUGHPUT.description\n                    },\n                    DiskMetric.RESPONSE_TIME.name: {\n                        \"unit\": DiskMetric.RESPONSE_TIME.unit,\n                        \"description\": DiskMetric.RESPONSE_TIME.description\n                    },\n                    DiskMetric.IOPS.name: {\n                        \"unit\": DiskMetric.IOPS.unit,\n                        \"description\": DiskMetric.IOPS.description\n                    },\n                    DiskMetric.READ_THROUGHPUT.name: {\n                        \"unit\": DiskMetric.READ_THROUGHPUT.unit,\n                        \"description\": DiskMetric.READ_THROUGHPUT.description\n                    },\n                    DiskMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": DiskMetric.WRITE_THROUGHPUT.unit,\n                        \"description\": DiskMetric.WRITE_THROUGHPUT.description\n                    },\n                    DiskMetric.READ_IOPS.name: {\n                        \"unit\": DiskMetric.READ_IOPS.unit,\n                        \"description\": DiskMetric.READ_IOPS.description\n                    },\n                    DiskMetric.WRITE_IOPS.name: {\n                        \"unit\": DiskMetric.WRITE_IOPS.unit,\n                        \"description\": DiskMetric.WRITE_IOPS.description\n                    },\n\n                },\n                ResourceType.FILESYSTEM: {\n                    FileSystemMetric.THROUGHPUT.name: {\n                        \"unit\": FileSystemMetric.THROUGHPUT.unit,\n                        \"description\": FileSystemMetric.THROUGHPUT.description\n                    },\n                    FileSystemMetric.READ_RESPONSE_TIME.name: {\n                        \"unit\": FileSystemMetric.READ_RESPONSE_TIME.unit,\n                        \"description\":\n                            FileSystemMetric.READ_RESPONSE_TIME.description\n                    },\n                    FileSystemMetric.WRITE_RESPONSE_TIME.name: {\n                        \"unit\": FileSystemMetric.WRITE_RESPONSE_TIME.unit,\n                        \"description\":\n                            FileSystemMetric.WRITE_RESPONSE_TIME.description\n                    },\n                    FileSystemMetric.IOPS.name: {\n                        \"unit\": FileSystemMetric.IOPS.unit,\n                        \"description\": FileSystemMetric.IOPS.description\n                    },\n                    FileSystemMetric.READ_THROUGHPUT.name: {\n                        \"unit\": FileSystemMetric.READ_THROUGHPUT.unit,\n                        \"description\":\n                            FileSystemMetric.READ_THROUGHPUT.description\n                    },\n                    FileSystemMetric.WRITE_THROUGHPUT.name: {\n                        \"unit\": FileSystemMetric.WRITE_THROUGHPUT.unit,\n                        \"description\":\n                            FileSystemMetric.WRITE_THROUGHPUT.description\n                    },\n                    FileSystemMetric.READ_IOPS.name: {\n                        \"unit\": FileSystemMetric.READ_IOPS.unit,\n                        \"description\": FileSystemMetric.READ_IOPS.description\n                    },\n                    FileSystemMetric.WRITE_IOPS.name: {\n                        \"unit\": FileSystemMetric.WRITE_IOPS.unit,\n                        \"description\": FileSystemMetric.WRITE_IOPS.description\n                    },\n                    FileSystemMetric.IO_SIZE.name: {\n                        \"unit\": FileSystemMetric.IO_SIZE.unit,\n                        \"description\": FileSystemMetric.IO_SIZE.description\n                    },\n                    FileSystemMetric.READ_IO_SIZE.name: {\n                        \"unit\": FileSystemMetric.READ_IO_SIZE.unit,\n                        \"description\":\n                            FileSystemMetric.READ_IO_SIZE.description\n                    },\n                    FileSystemMetric.WRITE_IO_SIZE.name: {\n                        \"unit\": FileSystemMetric.WRITE_IO_SIZE.unit,\n                        \"description\":\n                            FileSystemMetric.WRITE_IO_SIZE.description\n                    },\n                },\n\n            }\n            }\n\n\ndef custom_fake_get_capabilities(capabilities):\n    def get_capability(context, storage_id):\n        return capabilities\n\n    return get_capability\n"
  },
  {
    "path": "delfin/tests/unit/api/test_api_validation.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (C) 2017 NTT DATA\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport re\nimport sys\n\nimport fixtures\nimport six\nfrom six.moves import http_client as http\n\nfrom delfin.api import validation\nfrom delfin.api.validation import parameter_types\nfrom delfin import exception\nfrom delfin import test\n\n\nclass FakeRequest(object):\n    environ = {}\n\n\nclass ValidationRegex(test.TestCase):\n\n    def test_build_regex_range(self):\n\n        def _get_all_chars():\n            for i in range(0x7F):\n                yield six.unichr(i)\n\n        self.useFixture(fixtures.MonkeyPatch(\n            'delfin.api.validation.parameter_types._get_all_chars',\n            _get_all_chars))\n\n        r = parameter_types._build_regex_range(ws=False)\n        self.assertEqual(re.escape('!') + '-' + re.escape('~'), r)\n\n        # if we allow whitespace the range starts earlier\n        r = parameter_types._build_regex_range(ws=True)\n        self.assertEqual(re.escape(' ') + '-' + re.escape('~'), r)\n\n        # excluding a character will give us 2 ranges\n        r = parameter_types._build_regex_range(ws=True, exclude=['A'])\n        self.assertEqual(re.escape(' ') + '-' + re.escape('@') +\n                         'B' + '-' + re.escape('~'), r)\n\n        # inverting which gives us all the initial unprintable characters.\n        r = parameter_types._build_regex_range(ws=False, invert=True)\n        self.assertEqual(re.escape('\\x00') + '-' + re.escape(' '), r)\n\n        # excluding characters that create a singleton. Naively this would be:\n        # ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural.\n        r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C'])\n        self.assertEqual(re.escape(' ') + '-' + re.escape('@') +\n                         'B' + 'D' + '-' + re.escape('~'), r)\n\n        # ws=True means the positive regex has printable whitespaces,\n        # so the inverse will not. The inverse will include things we\n        # exclude.\n        r = parameter_types._build_regex_range(\n            ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True)\n        self.assertEqual(re.escape('\\x00') + '-' + re.escape('\\x1f') +\n                         'A-CZ', r)\n\n\nclass APIValidationTestCase(test.TestCase):\n\n    def setUp(self, schema=None):\n        super(APIValidationTestCase, self).setUp()\n        self.post = None\n\n        if schema is not None:\n            @validation.schema(request_body_schema=schema)\n            def post(req, body):\n                return 'Validation succeeded.'\n\n            self.post = post\n\n    def check_validation_error(self, method, body, expected_detail, req=None):\n        if not req:\n            req = FakeRequest()\n        try:\n            method(body=body, req=req,)\n        except exception.InvalidInput as ex:\n            self.assertEqual(http.BAD_REQUEST, ex.code)\n            if isinstance(expected_detail, list):\n                self.assertEqual(expected_detail, ex.error_args,\n                                 'Exception details did not match expected')\n            else:\n                self.assertEqual(expected_detail, ex.error_args[0],\n                                 'Exception details did not match expected')\n        except Exception as ex:\n            self.fail('An unexpected exception happens: %s' % ex)\n        else:\n            self.fail('Any exception did not happen.')\n\n\nclass RequiredDisableTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': {\n                    'type': 'integer',\n                },\n            },\n        }\n        super(RequiredDisableTestCase, self).setUp(schema=schema)\n\n    def test_validate_required_disable(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 1}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'abc': 1}, req=FakeRequest()))\n\n\nclass RequiredEnableTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': {\n                    'type': 'integer',\n                },\n            },\n            'required': ['foo']\n        }\n        super(RequiredEnableTestCase, self).setUp(schema=schema)\n\n    def test_validate_required_enable(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 1}, req=FakeRequest()))\n\n    def test_validate_required_enable_fails(self):\n        detail = \"'foo' is a required property\"\n        self.check_validation_error(self.post, body={'abc': 1},\n                                    expected_detail=detail)\n\n\nclass AdditionalPropertiesEnableTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': {\n                    'type': 'integer',\n                },\n            },\n            'required': ['foo'],\n        }\n        super(AdditionalPropertiesEnableTestCase, self).setUp(schema=schema)\n\n    def test_validate_additionalProperties_enable(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 1}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 1, 'ext': 1},\n                                   req=FakeRequest()))\n\n\nclass AdditionalPropertiesDisableTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': {\n                    'type': 'integer',\n                },\n            },\n            'required': ['foo'],\n            'additionalProperties': False,\n        }\n        super(AdditionalPropertiesDisableTestCase, self).setUp(schema=schema)\n\n    def test_validate_additionalProperties_disable(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 1}, req=FakeRequest()))\n\n    def test_validate_additionalProperties_disable_fails(self):\n        detail = \"Additional properties are not allowed ('ext' was unexpected)\"\n        self.check_validation_error(self.post, body={'foo': 1, 'ext': 1},\n                                    expected_detail=detail)\n\n\nclass PatternPropertiesTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'patternProperties': {\n                '^[a-zA-Z0-9]{1,10}$': {\n                    'type': 'string'\n                },\n            },\n            'additionalProperties': False,\n        }\n        super(PatternPropertiesTestCase, self).setUp(schema=schema)\n\n    def test_validate_patternProperties(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 'bar'}, req=FakeRequest()))\n\n    def test_validate_patternProperties_fails(self):\n        details = \"'__' does not match any of the regexes: \" \\\n                  \"'^[a-zA-Z0-9]{1,10}$'\"\n        self.check_validation_error(self.post, body={'__': 'bar'},\n                                    expected_detail=details)\n\n        details = \"'' does not match any of the regexes: \" \\\n                  \"'^[a-zA-Z0-9]{1,10}$'\"\n        self.check_validation_error(self.post, body={'': 'bar'},\n                                    expected_detail=details)\n\n        details = \"'0123456789a' does not match any of the regexes: \" \\\n                  \"'^[a-zA-Z0-9]{1,10}$'\"\n        self.check_validation_error(self.post, body={'0123456789a': 'bar'},\n                                    expected_detail=details)\n\n        ver_info = sys.version_info\n        if ver_info.major == 3 and ver_info.minor >= 5:\n            detail = \"expected string or bytes-like object\"\n        else:\n            detail = \"expected string or buffer\"\n        self.check_validation_error(self.post, body={None: 'bar'},\n                                    expected_detail=detail)\n\n\nclass StringTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': {\n                    'type': 'string',\n                },\n            },\n        }\n        super(StringTestCase, self).setUp(schema=schema)\n\n    def test_validate_string(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 'abc'}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': '0'}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': ''}, req=FakeRequest()))\n\n    def test_validate_string_fails(self):\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"1 is not of type 'string'\")\n        self.check_validation_error(self.post, body={'foo': 1},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"1.5 is not of type 'string'\")\n        self.check_validation_error(self.post, body={'foo': 1.5},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"True is not of type 'string'\")\n        self.check_validation_error(self.post, body={'foo': True},\n                                    expected_detail=detail)\n\n\nclass StringLengthTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': {\n                    'type': 'string',\n                    'minLength': 1,\n                    'maxLength': 10,\n                },\n            },\n        }\n        super(StringLengthTestCase, self).setUp(schema=schema)\n\n    def test_validate_string_length(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': '0'}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': '0123456789'},\n                                   req=FakeRequest()))\n\n    def test_validate_string_length_fails(self):\n        detail = (\"Invalid input for field/attribute foo.\"\n                  \" '' is too short\")\n        self.check_validation_error(self.post, body={'foo': ''},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo.\"\n                  \" '0123456789a' is too long\")\n        self.check_validation_error(self.post, body={'foo': '0123456789a'},\n                                    expected_detail=detail)\n\n\nclass IntegerTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': {\n                    'type': ['integer', 'string'],\n                    'pattern': '^[0-9]+$',\n                },\n            },\n        }\n        super(IntegerTestCase, self).setUp(schema=schema)\n\n    def test_validate_integer(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 1}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': '1'}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': '0123456789'},\n                                   req=FakeRequest()))\n\n    def test_validate_integer_fails(self):\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"'abc' does not match '^[0-9]+$'\")\n        self.check_validation_error(self.post, body={'foo': 'abc'},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"True is not of type 'integer', 'string'\")\n        self.check_validation_error(self.post, body={'foo': True},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"'0xffff' does not match '^[0-9]+$'\")\n        self.check_validation_error(self.post, body={'foo': '0xffff'},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"1.0 is not of type 'integer', 'string'\")\n        self.check_validation_error(self.post, body={'foo': 1.0},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"'1.0' does not match '^[0-9]+$'\")\n        self.check_validation_error(self.post, body={'foo': '1.0'},\n                                    expected_detail=detail)\n\n\nclass IntegerRangeTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': {\n                    'type': ['integer', 'string'],\n                    'pattern': '^[0-9]+$',\n                    'minimum': 1,\n                    'maximum': 10,\n                },\n            },\n        }\n        super(IntegerRangeTestCase, self).setUp(schema=schema)\n\n    def test_validate_integer_range(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 1}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 10}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': '1'}, req=FakeRequest()))\n\n    def test_validate_integer_range_fails(self):\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"0 is less than the minimum of 1\")\n        self.check_validation_error(self.post, body={'foo': 0},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"11 is greater than the maximum of 10\")\n        self.check_validation_error(self.post, body={'foo': 11},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"0 is less than the minimum of 1\")\n        self.check_validation_error(self.post, body={'foo': '0'},\n                                    expected_detail=detail)\n\n        detail = (\"Invalid input for field/attribute foo. \"\n                  \"11 is greater than the maximum of 10\")\n        self.check_validation_error(self.post, body={'foo': '11'},\n                                    expected_detail=detail)\n\n\nclass NameTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': parameter_types.name,\n            },\n        }\n        super(NameTestCase, self).setUp(schema=schema)\n\n    def test_validate_name(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 'volume.1'},\n                                   req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 'volume 1'},\n                                   req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': 'a'}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': u'\\u0434'}, req=FakeRequest()))\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={'foo': u'\\u0434\\u2006\\ufffd'},\n                                   req=FakeRequest()))\n\n\nclass DatetimeTestCase(APIValidationTestCase):\n\n    def setUp(self):\n        schema = {\n            'type': 'object',\n            'properties': {\n                'foo': {\n                    'type': 'string',\n                    'format': 'date-time',\n                },\n            },\n        }\n        super(DatetimeTestCase, self).setUp(schema=schema)\n\n    def test_validate_datetime(self):\n        self.assertEqual('Validation succeeded.',\n                         self.post(body={\n                             'foo': '2017-01-14T01:00:00Z'}, req=FakeRequest()\n                         ))\n"
  },
  {
    "path": "delfin/tests/unit/api/test_extensions.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.\n# Copyright 2011 OpenStack LLC.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport iso8601\nfrom oslo_config import cfg\nfrom oslo_serialization import jsonutils\nimport webob\n\nfrom delfin import test\nfrom delfin.api import extensions\nfrom delfin.api.v1 import router\n\nCONF = cfg.CONF\n\n\nclass ExtensionTestCase(test.TestCase):\n    def setUp(self):\n        super(ExtensionTestCase, self).setUp()\n        ext_list = CONF.delfin_api_extension[:]\n        fox = ('delfin.tests.unit.api.extensions.foxinsocks.Foxinsocks')\n        if fox not in ext_list:\n            ext_list.append(fox)\n            self.flags(delfin_api_extension=ext_list)\n\n\nclass ExtensionControllerTest(ExtensionTestCase):\n\n    def setUp(self):\n        super(ExtensionControllerTest, self).setUp()\n        self.ext_list = []\n        self.ext_list.sort()\n\n    def test_list_extensions_json(self):\n        app = router.APIRouter()\n        request = webob.Request.blank(\"/extensions\")\n        response = request.get_response(app)\n        self.assertEqual(200, response.status_int)\n\n        # Make sure we have all the extensions, extra extensions being OK.\n        data = jsonutils.loads(response.body)\n        names = [str(x['name']) for x in data['extensions']\n                 if str(x['name']) in self.ext_list]\n        names.sort()\n        self.assertEqual(self.ext_list, names)\n\n        # Ensure all the timestamps are valid according to iso8601\n        for ext in data['extensions']:\n            iso8601.parse_date(ext['updated'])\n\n        # Make sure that at least Fox in Sox is correct.\n        (fox_ext, ) = [\n            x for x in data['extensions'] if x['alias'] == 'FOXNSOX']\n        self.assertEqual(\n            {'name': 'Fox In Socks',\n             'updated': '2011-01-22T13:25:27-06:00',\n             'description': 'The Fox In Socks Extension.',\n             'alias': 'FOXNSOX',\n             'links': []},\n            fox_ext)\n\n        for ext in data['extensions']:\n            url = '/extensions/%s' % ext['alias']\n            request = webob.Request.blank(url)\n            response = request.get_response(app)\n            output = jsonutils.loads(response.body)\n            self.assertEqual(ext['alias'], output['extension']['alias'])\n\n    def test_get_extension_json(self):\n        app = router.APIRouter()\n        request = webob.Request.blank(\"/extensions/FOXNSOX\")\n        response = request.get_response(app)\n        self.assertEqual(200, response.status_int)\n\n        data = jsonutils.loads(response.body)\n        self.assertEqual(\n            {\"name\": \"Fox In Socks\",\n             \"updated\": \"2011-01-22T13:25:27-06:00\",\n             \"description\": \"The Fox In Socks Extension.\",\n             \"alias\": \"FOXNSOX\",\n             \"links\": []},\n            data['extension'])\n\n    def test_get_non_existing_extension_json(self):\n        app = router.APIRouter()\n        request = webob.Request.blank(\"/extensions/4\")\n        response = request.get_response(app)\n        self.assertEqual(404, response.status_int)\n\n\nclass StubExtensionManager(object):\n    \"\"\"Provides access to Tweedle Beetles.\"\"\"\n\n    name = \"Tweedle Beetle Extension\"\n    alias = \"TWDLBETL\"\n\n    def __init__(self, resource_ext=None, action_ext=None, request_ext=None,\n                 controller_ext=None):\n        self.resource_ext = resource_ext\n        self.controller_ext = controller_ext\n        self.extra_resource_ext = None\n\n    def get_resources(self):\n        resource_exts = []\n        if self.resource_ext:\n            resource_exts.append(self.resource_ext)\n        if self.extra_resource_ext:\n            resource_exts.append(self.extra_resource_ext)\n        return resource_exts\n\n    def get_controller_extensions(self):\n        controller_extensions = []\n        if self.controller_ext:\n            controller_extensions.append(self.controller_ext)\n        return controller_extensions\n\n\nclass ExtensionControllerIdFormatTest(test.TestCase):\n\n    def _bounce_id(self, test_id):\n\n        class BounceController(object):\n            def show(self, req, id):\n                return id\n        res_ext = extensions.ResourceExtension('bounce',\n                                               BounceController())\n        manager = StubExtensionManager(res_ext)\n        app = router.APIRouter(manager)\n        request = webob.Request.blank(\"/bounce/%s\" % test_id)\n        response = request.get_response(app)\n        return response.body\n\n    def test_id_with_xml_format(self):\n        result = self._bounce_id('foo.xml')\n        self.assertEqual('foo', result.decode('UTF-8'))\n\n    def test_id_with_json_format(self):\n        result = self._bounce_id('foo.json')\n        self.assertEqual('foo', result.decode('UTF-8'))\n\n    def test_id_with_bad_format(self):\n        result = self._bounce_id('foo.bad')\n        self.assertEqual('foo.bad', result.decode('UTF-8'))\n"
  },
  {
    "path": "delfin/tests/unit/api/test_middlewares.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin import context\nfrom delfin import test\nfrom delfin.api import middlewares\nfrom delfin.wsgi import common\n\n\nclass TestContextWrapper(test.TestCase):\n\n    def _get_fake_req(self):\n        env = {}\n        req = common.Request(env)\n        return req\n\n    def test_context_wrapper(self):\n        context_wrapper = middlewares.ContextWrapper(common.Application())\n        req = self._get_fake_req()\n        context_wrapper(req)\n\n        self.assertIsInstance(req.environ['delfin.context'],\n                              context.RequestContext)\n"
  },
  {
    "path": "delfin/tests/unit/api/test_wsgi.py",
    "content": "#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport ddt\nimport six\nimport webob\n\nimport inspect\n\nfrom delfin.api.common import wsgi\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.tests.unit.api import fakes\n\n\n@ddt.ddt\nclass RequestTest(test.TestCase):\n    def test_content_type_missing(self):\n        request = wsgi.Request.blank('/tests/123', method='POST')\n        request.body = six.b(\"<body />\")\n        self.assertIsNone(request.get_content_type())\n\n    def test_content_type_unsupported(self):\n        request = wsgi.Request.blank('/tests/123', method='POST')\n        request.headers[\"Content-Type\"] = \"text/html\"\n        request.body = six.b(\"asdf<br />\")\n        self.assertRaises(exception.InvalidContentType,\n                          request.get_content_type)\n\n    def test_content_type_with_charset(self):\n        request = wsgi.Request.blank('/tests/123')\n        request.headers[\"Content-Type\"] = \"application/json; charset=UTF-8\"\n        result = request.get_content_type()\n        self.assertEqual(\"application/json\", result)\n\n    def test_content_type_from_accept(self):\n        content_type = 'application/json'\n        request = wsgi.Request.blank('/tests/123')\n        request.headers[\"Accept\"] = content_type\n\n        result = request.best_match_content_type()\n\n        self.assertEqual(content_type, result)\n\n    def test_content_type_from_accept_best(self):\n        request = wsgi.Request.blank('/tests/123')\n        request.headers[\"Accept\"] = \"application/xml, application/json\"\n        result = request.best_match_content_type()\n        self.assertEqual(\"application/json\", result)\n\n        request = wsgi.Request.blank('/tests/123')\n        request.headers[\"Accept\"] = (\"application/json; q=0.3, \"\n                                     \"application/xml; q=0.9\")\n        result = request.best_match_content_type()\n        self.assertEqual(\"application/json\", result)\n\n    def test_content_type_from_query_extension(self):\n        request = wsgi.Request.blank('/tests/123.json')\n        result = request.best_match_content_type()\n        self.assertEqual(\"application/json\", result)\n\n        request = wsgi.Request.blank('/tests/123.invalid')\n        result = request.best_match_content_type()\n        self.assertEqual(\"application/json\", result)\n\n    def test_content_type_accept_default(self):\n        request = wsgi.Request.blank('/tests/123.unsupported')\n        request.headers[\"Accept\"] = \"application/unsupported1\"\n        result = request.best_match_content_type()\n        self.assertEqual(\"application/json\", result)\n\n    def test_cache_and_retrieve_resources(self):\n        request = wsgi.Request.blank('/foo')\n        # Test that trying to retrieve a cached object on\n        # an empty cache fails gracefully\n        self.assertIsNone(request.cached_resource())\n        self.assertIsNone(request.cached_resource_by_id('r-0'))\n\n        resources = [{'id': 'r-%s' % x} for x in range(3)]\n\n        # Cache an empty list of resources using the default name\n        request.cache_resource([])\n        self.assertEqual({}, request.cached_resource())\n        self.assertIsNone(request.cached_resource('r-0'))\n        # Cache some resources\n        request.cache_resource(resources[:2])\n        # Cache  one resource\n        request.cache_resource(resources[2])\n        # Cache  a different resource name\n        other_resource = {'id': 'o-0'}\n        request.cache_resource(other_resource, name='other-resource')\n\n        self.assertEqual(resources[0], request.cached_resource_by_id('r-0'))\n        self.assertEqual(resources[1], request.cached_resource_by_id('r-1'))\n        self.assertEqual(resources[2], request.cached_resource_by_id('r-2'))\n        self.assertIsNone(request.cached_resource_by_id('r-3'))\n        self.assertEqual(\n            {'r-0': resources[0], 'r-1': resources[1], 'r-2': resources[2]},\n            request.cached_resource())\n        self.assertEqual(\n            other_resource,\n            request.cached_resource_by_id('o-0', name='other-resource'))\n\n    @ddt.data(\n        'share_type',\n    )\n    def test_cache_and_retrieve_resources_by_resource(self, resource_name):\n        cache_all_func = 'cache_db_%ss' % resource_name\n        cache_one_func = 'cache_db_%s' % resource_name\n        get_db_all_func = 'get_db_%ss' % resource_name\n        get_db_one_func = 'get_db_%s' % resource_name\n\n        r = wsgi.Request.blank('/foo')\n        amount = 5\n        res_range = range(amount)\n        resources = [{'id': 'id%s' % x} for x in res_range]\n\n        # Store 2\n        getattr(r, cache_all_func)(resources[:amount - 1])\n        # Store 1\n        getattr(r, cache_one_func)(resources[amount - 1])\n\n        for i in res_range:\n            self.assertEqual(\n                resources[i],\n                getattr(r, get_db_one_func)('id%s' % i),\n            )\n        self.assertIsNone(getattr(r, get_db_one_func)('id%s' % amount))\n        self.assertEqual(\n            {'id%s' % i: resources[i] for i in res_range},\n            getattr(r, get_db_all_func)())\n\n\nclass ActionDispatcherTest(test.TestCase):\n    def test_dispatch(self):\n        serializer = wsgi.ActionDispatcher()\n        serializer.create = lambda x: 'pants'\n        self.assertEqual('pants', serializer.dispatch({}, action='create'))\n\n    def test_dispatch_action_None(self):\n        serializer = wsgi.ActionDispatcher()\n        serializer.create = lambda x: 'pants'\n        serializer.default = lambda x: 'trousers'\n        self.assertEqual('trousers', serializer.dispatch({}, action=None))\n\n    def test_dispatch_default(self):\n        serializer = wsgi.ActionDispatcher()\n        serializer.create = lambda x: 'pants'\n        serializer.default = lambda x: 'trousers'\n        self.assertEqual('trousers', serializer.dispatch({}, action='update'))\n\n\nclass DictSerializerTest(test.TestCase):\n    def test_dispatch_default(self):\n        serializer = wsgi.DictSerializer()\n        self.assertEqual('', serializer.serialize({}, 'update'))\n\n\nclass JSONDictSerializerTest(test.TestCase):\n    def test_json(self):\n        input_dict = dict(servers=dict(a=(2, 3)))\n        expected_json = six.b('{\"servers\":{\"a\":[2,3]}}')\n        serializer = wsgi.JSONDictSerializer()\n        result = serializer.serialize(input_dict)\n        result = result.replace(six.b('\\n'),\n                                six.b('')).replace(six.b(' '), six.b(''))\n        self.assertEqual(expected_json, result)\n\n\nclass TextDeserializerTest(test.TestCase):\n    def test_dispatch_default(self):\n        deserializer = wsgi.TextDeserializer()\n        self.assertEqual({}, deserializer.deserialize({}, 'update'))\n\n\nclass JSONDeserializerTest(test.TestCase):\n    def test_json(self):\n        data = \"\"\"{\"a\": {\n                \"a1\": \"1\",\n                \"a2\": \"2\",\n                \"bs\": [\"1\", \"2\", \"3\", {\"c\": {\"c1\": \"1\"}}],\n                \"d\": {\"e\": \"1\"},\n                \"f\": \"1\"}}\"\"\"\n        as_dict = {\n            'body': {\n                'a': {\n                    'a1': '1',\n                    'a2': '2',\n                    'bs': ['1', '2', '3', {'c': {'c1': '1'}}],\n                    'd': {'e': '1'},\n                    'f': '1',\n                },\n            },\n        }\n        deserializer = wsgi.JSONDeserializer()\n        self.assertEqual(as_dict, deserializer.deserialize(data))\n\n\nclass ResourceTest(test.TestCase):\n    def test_resource_call(self):\n        class Controller(object):\n            def index(self, req):\n                return 'off'\n\n        req = webob.Request.blank('/tests')\n        app = fakes.TestRouter(Controller())\n        response = req.get_response(app)\n        self.assertEqual(six.b('off'), response.body)\n        self.assertEqual(200, response.status_int)\n\n    def test_resource_not_authorized(self):\n        class Controller(object):\n            def index(self, req):\n                raise exception.NotAuthorized()\n\n        req = webob.Request.blank('/tests')\n        app = fakes.TestRouter(Controller())\n        response = req.get_response(app)\n        self.assertEqual(403, response.status_int)\n\n    def test_dispatch(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n        method, extensions = resource.get_method(None, 'index', None, '')\n        actual = resource.dispatch(method, None, {'pants': 'off'})\n        expected = 'off'\n        self.assertEqual(expected, actual)\n\n    def test_get_method_undefined_controller_action(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n        self.assertRaises(AttributeError, resource.get_method,\n                          None, 'create', None, '')\n\n    def test_get_method_action_json(self):\n        class Controller(wsgi.Controller):\n            @wsgi.action('fooAction')\n            def _action_foo(self, req, id, body):\n                return body\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n        method, extensions = resource.get_method(None, 'action',\n                                                 'application/json',\n                                                 '{\"fooAction\": true}')\n        self.assertEqual(controller._action_foo, method)\n\n    def test_get_method_action_bad_body(self):\n        class Controller(wsgi.Controller):\n            @wsgi.action('fooAction')\n            def _action_foo(self, req, id, body):\n                return body\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n        self.assertRaises(exception.MalformedRequestBody, resource.get_method,\n                          None, 'action', 'application/json', '{}')\n\n    def test_get_method_unknown_controller_action(self):\n        class Controller(wsgi.Controller):\n            @wsgi.action('fooAction')\n            def _action_foo(self, req, id, body):\n                return body\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n        self.assertRaises(KeyError, resource.get_method,\n                          None, 'action', 'application/json',\n                          '{\"barAction\": true}')\n\n    def test_get_method_action_method(self):\n        class Controller(object):\n            def action(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n        method, extensions = resource.get_method(None, 'action',\n                                                 'application/xml',\n                                                 '<fooAction>true</fooAction')\n        self.assertEqual(controller.action, method)\n\n    def test_get_action_args(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        env = {\n            'wsgiorg.routing_args': [None, {\n                'controller': None,\n                'format': None,\n                'action': 'update',\n                'id': 12,\n            }],\n        }\n\n        expected = {'action': 'update', 'id': 12}\n\n        self.assertEqual(expected, resource.get_action_args(env))\n\n    def test_get_body_bad_content(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        request = wsgi.Request.blank('/', method='POST')\n        request.headers['Content-Type'] = 'application/none'\n        request.body = six.b('foo')\n\n        content_type, body = resource.get_body(request)\n        self.assertIsNone(content_type)\n        self.assertEqual('', body)\n\n    def test_get_body_no_content_type(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        request = wsgi.Request.blank('/', method='POST')\n        request.body = six.b('foo')\n\n        content_type, body = resource.get_body(request)\n        self.assertIsNone(content_type)\n        self.assertEqual('', body)\n\n    def test_get_body_no_content_body(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        request = wsgi.Request.blank('/', method='POST')\n        request.headers['Content-Type'] = 'application/json'\n        request.body = six.b('')\n\n        content_type, body = resource.get_body(request)\n        self.assertIsNone(content_type)\n        self.assertEqual('', body)\n\n    def test_get_body(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        request = wsgi.Request.blank('/', method='POST')\n        request.headers['Content-Type'] = 'application/json'\n        request.body = six.b('foo')\n\n        content_type, body = resource.get_body(request)\n        self.assertEqual('application/json', content_type)\n        self.assertEqual(six.b('foo'), body)\n\n    def test_deserialize_badtype(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n        self.assertRaises(exception.InvalidContentType,\n                          resource.deserialize,\n                          controller.index, 'application/none', 'foo')\n\n    def test_deserialize_default(self):\n        class JSONDeserializer(object):\n            def deserialize(self, body):\n                return 'json'\n\n        class XMLDeserializer(object):\n            def deserialize(self, body):\n                return 'xml'\n\n        class Controller(object):\n            @wsgi.deserializers(xml=XMLDeserializer)\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller, json=JSONDeserializer)\n\n        obj = resource.deserialize(controller.index, 'application/json', 'foo')\n        self.assertEqual('json', obj)\n\n    def test_deserialize_decorator(self):\n        class JSONDeserializer(object):\n            def deserialize(self, body):\n                return 'json'\n\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller, json=JSONDeserializer)\n\n        obj = resource.deserialize(controller.index, 'application/json', 'foo')\n        self.assertEqual('json', obj)\n\n    def test_register_actions(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        class ControllerExtended(wsgi.Controller):\n            @wsgi.action('fooAction')\n            def _action_foo(self, req, id, body):\n                return body\n\n            @wsgi.action('barAction')\n            def _action_bar(self, req, id, body):\n                return body\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n        self.assertEqual({}, resource.wsgi_actions)\n        extended = ControllerExtended()\n        resource.register_actions(extended)\n        self.assertEqual({'fooAction': extended._action_foo,\n                          'barAction': extended._action_bar, },\n                         resource.wsgi_actions)\n\n    def test_register_extensions(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        class ControllerExtended(wsgi.Controller):\n            @wsgi.extends\n            def index(self, req, resp_obj, pants=None):\n                return None\n\n            @wsgi.extends(action='fooAction')\n            def _action_foo(self, req, resp, id, body):\n                return None\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n        self.assertEqual({}, resource.wsgi_extensions)\n        self.assertEqual({}, resource.wsgi_action_extensions)\n\n        extended = ControllerExtended()\n        resource.register_extensions(extended)\n        self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)\n        self.assertEqual({'fooAction': [extended._action_foo]},\n                         resource.wsgi_action_extensions)\n\n    def test_get_method_extensions(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        class ControllerExtended(wsgi.Controller):\n            @wsgi.extends\n            def index(self, req, resp_obj, pants=None):\n                return None\n\n        controller = Controller()\n        extended = ControllerExtended()\n        resource = wsgi.Resource(controller)\n        resource.register_extensions(extended)\n        method, extensions = resource.get_method(None, 'index', None, '')\n        self.assertEqual(controller.index, method)\n        self.assertEqual([extended.index], extensions)\n\n    def test_get_method_action_extensions(self):\n        class Controller(wsgi.Controller):\n            def index(self, req, pants=None):\n                return pants\n\n            @wsgi.action('fooAction')\n            def _action_foo(self, req, id, body):\n                return body\n\n        class ControllerExtended(wsgi.Controller):\n            @wsgi.extends(action='fooAction')\n            def _action_foo(self, req, resp_obj, id, body):\n                return None\n\n        controller = Controller()\n        extended = ControllerExtended()\n        resource = wsgi.Resource(controller)\n        resource.register_extensions(extended)\n        method, extensions = resource.get_method(None, 'action',\n                                                 'application/json',\n                                                 '{\"fooAction\": true}')\n        self.assertEqual(controller._action_foo, method)\n        self.assertEqual([extended._action_foo], extensions)\n\n    def test_get_method_action_whitelist_extensions(self):\n        class Controller(wsgi.Controller):\n            def index(self, req, pants=None):\n                return pants\n\n        class ControllerExtended(wsgi.Controller):\n            @wsgi.action('create')\n            def _create(self, req, body):\n                pass\n\n            @wsgi.action('delete')\n            def _delete(self, req, id):\n                pass\n\n        controller = Controller()\n        extended = ControllerExtended()\n        resource = wsgi.Resource(controller)\n        resource.register_actions(extended)\n\n        method, extensions = resource.get_method(None, 'create',\n                                                 'application/json',\n                                                 '{\"create\": true}')\n        self.assertEqual(extended._create, method)\n        self.assertEqual([], extensions)\n\n        method, extensions = resource.get_method(None, 'delete', None, None)\n        self.assertEqual(extended._delete, method)\n        self.assertEqual([], extensions)\n\n    def test_pre_process_extensions_regular(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        called = []\n\n        def extension1(req, resp_obj):\n            called.append(1)\n            return None\n\n        def extension2(req, resp_obj):\n            called.append(2)\n            return None\n\n        extensions = [extension1, extension2]\n        response, post = resource.pre_process_extensions(extensions, None, {})\n        self.assertEqual([], called)\n        self.assertIsNone(response)\n        self.assertEqual([extension2, extension1], list(post))\n\n    def test_pre_process_extensions_generator(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        called = []\n\n        def extension1(req):\n            called.append('pre1')\n            resp_obj = yield\n            self.assertIsNone(resp_obj)\n            called.append('post1')\n\n        def extension2(req):\n            called.append('pre2')\n            resp_obj = yield\n            self.assertIsNone(resp_obj)\n            called.append('post2')\n\n        extensions = [extension1, extension2]\n        response, post = resource.pre_process_extensions(extensions, None, {})\n        post = list(post)\n        self.assertEqual(['pre1', 'pre2'], called)\n        self.assertIsNone(response)\n        self.assertEqual(2, len(post))\n        self.assertTrue(inspect.isgenerator(post[0]))\n        self.assertTrue(inspect.isgenerator(post[1]))\n\n        for gen in post:\n            try:\n                gen.send(None)\n            except StopIteration:\n                continue\n\n        self.assertEqual(['pre1', 'pre2', 'post2', 'post1'], called)\n\n    def test_pre_process_extensions_generator_response(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        called = []\n\n        def extension1(req):\n            called.append('pre1')\n            yield 'foo'\n\n        def extension2(req):\n            called.append('pre2')\n\n        extensions = [extension1, extension2]\n        response, post = resource.pre_process_extensions(extensions, None, {})\n        self.assertEqual(['pre1'], called)\n        self.assertEqual('foo', response)\n        self.assertEqual([], post)\n\n    def test_post_process_extensions_regular(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        called = []\n\n        def extension1(req, resp_obj):\n            called.append(1)\n            return None\n\n        def extension2(req, resp_obj):\n            called.append(2)\n            return None\n\n        response = resource.post_process_extensions([extension2, extension1],\n                                                    None, None, {})\n        self.assertEqual([2, 1], called)\n        self.assertIsNone(response)\n\n    def test_post_process_extensions_regular_response(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        called = []\n\n        def extension1(req, resp_obj):\n            called.append(1)\n            return None\n\n        def extension2(req, resp_obj):\n            called.append(2)\n            return 'foo'\n\n        response = resource.post_process_extensions([extension2, extension1],\n                                                    None, None, {})\n        self.assertEqual([2], called)\n        self.assertEqual('foo', response)\n\n    def test_post_process_extensions_generator(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        called = []\n\n        def extension1(req):\n            resp_obj = yield\n            self.assertIsNone(resp_obj)\n            called.append(1)\n\n        def extension2(req):\n            resp_obj = yield\n            self.assertIsNone(resp_obj)\n            called.append(2)\n\n        ext1 = extension1(None)\n        next(ext1)\n        ext2 = extension2(None)\n        next(ext2)\n\n        response = resource.post_process_extensions([ext2, ext1],\n                                                    None, None, {})\n\n        self.assertEqual([2, 1], called)\n        self.assertIsNone(response)\n\n    def test_post_process_extensions_generator_response(self):\n        class Controller(object):\n            def index(self, req, pants=None):\n                return pants\n\n        controller = Controller()\n        resource = wsgi.Resource(controller)\n\n        called = []\n\n        def extension1(req):\n            resp_obj = yield\n            self.assertIsNone(resp_obj)\n            called.append(1)\n\n        def extension2(req):\n            resp_obj = yield\n            self.assertIsNone(resp_obj)\n            called.append(2)\n            yield 'foo'\n\n        ext1 = extension1(None)\n        next(ext1)\n        ext2 = extension2(None)\n        next(ext2)\n\n        response = resource.post_process_extensions([ext2, ext1],\n                                                    None, None, {})\n\n        self.assertEqual([2], called)\n        self.assertEqual('foo', response)\n\n\nclass ResponseObjectTest(test.TestCase):\n    def test_default_code(self):\n        robj = wsgi.ResponseObject({})\n        self.assertEqual(200, robj.code)\n\n    def test_modified_code(self):\n        robj = wsgi.ResponseObject({})\n        robj._default_code = 202\n        self.assertEqual(202, robj.code)\n\n    def test_override_default_code(self):\n        robj = wsgi.ResponseObject({}, code=404)\n        self.assertEqual(404, robj.code)\n\n    def test_override_modified_code(self):\n        robj = wsgi.ResponseObject({}, code=404)\n        robj._default_code = 202\n        self.assertEqual(404, robj.code)\n\n    def test_set_header(self):\n        robj = wsgi.ResponseObject({})\n        robj['Header'] = 'foo'\n        self.assertEqual({'header': 'foo'}, robj.headers)\n\n    def test_get_header(self):\n        robj = wsgi.ResponseObject({})\n        robj['Header'] = 'foo'\n        self.assertEqual('foo', robj['hEADER'])\n\n    def test_del_header(self):\n        robj = wsgi.ResponseObject({})\n        robj['Header'] = 'foo'\n        del robj['hEADER']\n        self.assertNotIn('header', robj.headers)\n\n    def test_header_isolation(self):\n        robj = wsgi.ResponseObject({})\n        robj['Header'] = 'foo'\n        hdrs = robj.headers\n        hdrs['hEADER'] = 'bar'\n        self.assertEqual('foo', robj['hEADER'])\n\n    def test_default_serializers(self):\n        robj = wsgi.ResponseObject({})\n        self.assertEqual({}, robj.serializers)\n\n    def test_bind_serializers(self):\n        robj = wsgi.ResponseObject({}, json='foo')\n        robj._bind_method_serializers(dict(xml='bar', json='baz'))\n        self.assertEqual(dict(xml='bar', json='foo'), robj.serializers)\n\n    def test_get_serializer(self):\n        robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')\n        for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():\n            _mtype, serializer = robj.get_serializer(content_type)\n            self.assertEqual(mtype, serializer)\n\n    def test_get_serializer_defaults(self):\n        robj = wsgi.ResponseObject({})\n        default_serializers = dict(json='json', xml='xml', atom='atom')\n        for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():\n            self.assertRaises(exception.InvalidContentType,\n                              robj.get_serializer, content_type)\n            _mtype, serializer = robj.get_serializer(content_type,\n                                                     default_serializers)\n            self.assertEqual(mtype, serializer)\n\n    def test_serialize(self):\n        class JSONSerializer(object):\n            def serialize(self, obj):\n                return six.b('json')\n\n        class XMLSerializer(object):\n            def serialize(self, obj):\n                return six.b('xml')\n\n        class AtomSerializer(object):\n            def serialize(self, obj):\n                return six.b('atom')\n\n        robj = wsgi.ResponseObject({}, code=202,\n                                   json=JSONSerializer,\n                                   xml=XMLSerializer,\n                                   atom=AtomSerializer)\n        robj['X-header1'] = 'header1'\n        robj['X-header2'] = 'header2'\n\n        for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():\n            request = wsgi.Request.blank('/tests/123')\n            response = robj.serialize(request, content_type)\n\n            self.assertEqual(content_type, response.headers['Content-Type'])\n            self.assertEqual('header1', response.headers['X-header1'])\n            self.assertEqual('header2', response.headers['X-header2'])\n            self.assertEqual(202, response.status_int)\n            self.assertEqual(six.b(mtype), response.body)\n\n\nclass ValidBodyTest(test.TestCase):\n\n    def setUp(self):\n        super(ValidBodyTest, self).setUp()\n        self.controller = wsgi.Controller()\n\n    def test_is_valid_body(self):\n        body = {'foo': {}}\n        self.assertTrue(self.controller.is_valid_body(body, 'foo'))\n\n    def test_is_valid_body_none(self):\n        self.assertFalse(self.controller.is_valid_body(None, 'foo'))\n\n    def test_is_valid_body_empty(self):\n        self.assertFalse(self.controller.is_valid_body({}, 'foo'))\n\n    def test_is_valid_body_no_entity(self):\n        body = {'bar': {}}\n        self.assertFalse(self.controller.is_valid_body(body, 'foo'))\n\n    def test_is_valid_body_malformed_entity(self):\n        body = {'foo': 'bar'}\n        self.assertFalse(self.controller.is_valid_body(body, 'foo'))\n"
  },
  {
    "path": "delfin/tests/unit/api/v1/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/api/v1/test_access_info.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.api.v1.access_info import AccessInfoController\nfrom delfin.tests.unit.api import fakes\n\n\nclass TestAccessInfoController(test.TestCase):\n\n    def setUp(self):\n        super(TestAccessInfoController, self).setUp()\n        self.driver_api = mock.Mock()\n        self.controller = AccessInfoController()\n        self.mock_object(self.controller, 'driver_api', self.driver_api)\n\n    def test_show(self):\n        self.mock_object(\n            db, 'access_info_get',\n            fakes.fake_access_info_show)\n        req = fakes.HTTPRequest.blank(\n            '/storages/865ffd4d-f1f7-47de-abc3-5541ef44d0c1/access-info')\n\n        res_dict = self.controller.show(\n            req, '865ffd4d-f1f7-47de-abc3-5541ef44d0c1')\n        expctd_dict = {\n            \"model\": \"fake_driver\",\n            \"vendor\": \"fake_storage\",\n            \"storage_id\": \"865ffd4d-f1f7-47de-abc3-5541ef44d0c1\",\n            \"storage_name\": None,\n            \"rest\": {\n                \"host\": \"10.0.0.0\",\n                \"port\": 1234,\n                \"username\": \"admin\"\n            },\n            \"ssh\": None,\n            \"cli\": None,\n            \"smis\": None,\n            \"extra_attributes\": {\n                \"array_id\": \"0001234567897\"\n            },\n            \"created_at\": \"2020-06-15T09:50:31.698956\",\n            \"updated_at\": \"2020-06-15T09:50:31.698956\"\n        }\n\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_show_with_invalid_id(self):\n        self.mock_object(\n            db, 'access_info_get',\n            mock.Mock(side_effect=exception.AccessInfoNotFound('fake_id')))\n        req = fakes.HTTPRequest.blank('/storages/fake_id/access-info')\n        self.assertRaises(exception.AccessInfoNotFound,\n                          self.controller.show,\n                          req, 'fake_id')\n\n    def test_access_info_update(self):\n        self.mock_object(\n            db, 'access_info_get',\n            fakes.fake_access_info_show)\n\n        fake_access_info = fakes.fake_update_access_info(None, None)\n        self.mock_object(\n            self.controller.driver_api, 'update_access_info',\n            mock.Mock(return_value=fake_access_info))\n\n        body = {\n            'rest': {\n                'username': 'admin_modified',\n                'password': 'abcd_modified',\n                'host': '10.0.0.0',\n                'port': 1234\n            },\n            'extra_attributes': {'array_id': '0001234567891'}\n        }\n        req = fakes.HTTPRequest.blank(\n            '/storages/865ffd4d-f1f7-47de-abc3-5541ef44d0c1/access-info')\n        res_dict = self.controller.update(\n            req, '865ffd4d-f1f7-47de-abc3-5541ef44d0c1', body=body)\n        expctd_dict = {\n            \"model\": \"fake_driver\",\n            \"vendor\": \"fake_storage\",\n            \"storage_id\": \"865ffd4d-f1f7-47de-abc3-5541ef44d0c1\",\n            \"storage_name\": None,\n            \"rest\": {\n                \"username\": \"admin_modified\",\n                \"host\": \"10.0.0.0\",\n                \"port\": 1234\n            },\n            \"ssh\": None,\n            \"cli\": None,\n            \"smis\": None,\n            \"extra_attributes\": {\n                \"array_id\": \"0001234567897\"\n            },\n            \"created_at\": \"2020-06-15T09:50:31.698956\",\n            \"updated_at\": \"2020-06-15T09:50:31.698956\"\n        }\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_show_all(self):\n        self.mock_object(\n            db, 'access_info_get_all',\n            fakes.fake_access_infos_show_all\n        )\n        req = fakes.HTTPRequest.blank('/storages/access-infos')\n\n        res_dict = self.controller.show_all(req)\n        expctd_dict = {\n            'access_infos': [{\n                \"model\": \"fake_driver\",\n                \"vendor\": \"fake_storage\",\n                \"storage_id\": \"865ffd4d-f1f7-47de-abc3-5541ef44d0c1\",\n                \"storage_name\": None,\n                \"rest\": {\n                    \"host\": \"10.0.0.0\",\n                    \"port\": 1234,\n                    \"username\": \"admin\"\n                },\n                \"ssh\": None,\n                \"cli\": None,\n                \"smis\": None,\n                \"extra_attributes\": {\n                    \"array_id\": \"0001234567897\"\n                },\n                \"created_at\": \"2020-06-15T09:50:31.698956\",\n                \"updated_at\": \"2020-06-15T09:50:31.698956\"\n            }]\n        }\n\n        self.assertDictEqual(expctd_dict, res_dict)\n"
  },
  {
    "path": "delfin/tests/unit/api/v1/test_alert_source.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom unittest import mock\n\nfrom oslo_utils import importutils\n\nfrom delfin.common import config # noqa\nfrom delfin import cryptor\nfrom delfin import exception\nfrom delfin.tests.unit.api import fakes\n\n\nclass AlertSourceControllerTestCase(unittest.TestCase):\n    ALERT_CONTROLLER_CLASS = 'delfin.api.v1.alert_source.AlertSourceController'\n\n    @mock.patch('delfin.alert_manager.rpcapi.AlertAPI', mock.Mock())\n    def _get_alert_controller(self):\n        alert_controller_class = importutils.import_class(\n            self.ALERT_CONTROLLER_CLASS)\n        alert_controller = alert_controller_class()\n        return alert_controller\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_v3_authpriv_config_create_success(self, mock_alert_source_get,\n                                                   mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        expected_alert_source = {'storage_id': 'abcd-1234-5678',\n                                 'host': '127.0.0.1',\n                                 'version': 'snmpv3',\n                                 'engine_id': '800000d30300000e112245',\n                                 'security_level': None,\n                                 'username': 'test1',\n                                 'auth_protocol': 'HMACMD5',\n                                 'privacy_protocol': 'DES',\n                                 'port': 161,\n                                 'context_name': \"\",\n                                 'retry_num': 1,\n                                 'expiration': 1,\n                                 \"created_at\": '2020-06-15T09:50:31.698956',\n                                 \"updated_at\": '2020-06-15T09:50:31.698956'\n                                 }\n        mock_alert_source_update.return_value = fakes.fake_v3_alert_source()\n        mock_alert_source_get.return_value = fakes.fake_v3_alert_source()\n\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n\n        output_alert_source = alert_controller_inst.put(req, fake_storage_id,\n                                                        body=body)\n        self.assertDictEqual(expected_alert_source, output_alert_source)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_v3_config_noauthnopriv_create_success(self,\n                                                       mock_alert_source_get,\n                                                       mock_alert_source_update\n                                                       ):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = fakes. \\\n            fake_v3_alert_source_noauth_nopriv()\n        mock_alert_source_get.return_value = fakes. \\\n            fake_v3_alert_source_noauth_nopriv()\n        expected_alert_source = {'storage_id': 'abcd-1234-5678',\n                                 'host': '127.0.0.1',\n                                 'version': 'snmpv3',\n                                 'engine_id': '800000d30300000e112245',\n                                 'security_level': 'noAuthnoPriv',\n                                 'username': 'test1',\n                                 'auth_protocol': None,\n                                 'privacy_protocol': None,\n                                 'port': 161,\n                                 'context_name': \"\",\n                                 'retry_num': 1,\n                                 'expiration': 1,\n                                 \"created_at\": '2020-06-15T09:50:31.698956',\n                                 \"updated_at\": '2020-06-15T09:50:31.698956'\n                                 }\n\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n        body['security_level'] = 'noAuthnoPriv'\n\n        output_alert_source = alert_controller_inst.put(req, fake_storage_id,\n                                                        body=body)\n        self.assertDictEqual(expected_alert_source, output_alert_source)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_v3_config_authnopriv_create_success(self,\n                                                     mock_alert_source_get,\n                                                     mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = fakes. \\\n            fake_v3_alert_source_auth_nopriv()\n        mock_alert_source_get.return_value = fakes. \\\n            fake_v3_alert_source_auth_nopriv()\n        expected_alert_source = {'storage_id': 'abcd-1234-5678',\n                                 'host': '127.0.0.1',\n                                 'version': 'snmpv3',\n                                 'engine_id': '800000d30300000e112245',\n                                 'security_level': 'authNoPriv',\n                                 'username': 'test1',\n                                 'auth_protocol': 'HMACMD5',\n                                 'privacy_protocol': None,\n                                 'port': 161,\n                                 'context_name': \"\",\n                                 'retry_num': 1,\n                                 'expiration': 1,\n                                 \"created_at\": '2020-06-15T09:50:31.698956',\n                                 \"updated_at\": '2020-06-15T09:50:31.698956'\n                                 }\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n        body['security_level'] = 'authNoPriv'\n\n        output_alert_source = alert_controller_inst.put(req, fake_storage_id,\n                                                        body=body)\n        self.assertDictEqual(expected_alert_source, output_alert_source)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_v2_config_success(self, mock_alert_source_get,\n                                   mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        return_v2_alert_source = fakes.fake_v2_alert_source()\n        return_v2_alert_source['community_string'] = cryptor.encode(\n            return_v2_alert_source['community_string'])\n        mock_alert_source_update.return_value = return_v2_alert_source\n        mock_alert_source_get.return_value = return_v2_alert_source\n        expected_alert_source = {'storage_id': 'abcd-1234-5678',\n                                 'host': '127.0.0.1',\n                                 'community_string': 'public',\n                                 'version': 'snmpv2c',\n                                 'port': 161,\n                                 \"retry_num\": 1,\n                                 \"expiration\": 1,\n                                 \"created_at\": '2020-06-15T09:50:31.698956',\n                                 \"updated_at\": '2020-06-15T09:50:31.698956'\n                                 }\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v2_alert_source_config()\n\n        output_alert_source = alert_controller_inst.put(req, fake_storage_id,\n                                                        body=body)\n        self.assertDictEqual(expected_alert_source, output_alert_source)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_delete')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_delete_v3_config_success(self, mock_alert_source_get,\n                                      mock_alert_source_delete):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_delete.return_value = {}\n        mock_alert_source_get.return_value = fakes.fake_v3_alert_source()\n\n        alert_controller_inst = self._get_alert_controller()\n        alert_controller_inst.delete(req, fake_storage_id)\n        self.assertTrue(mock_alert_source_delete.called)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_delete')\n    @mock.patch('delfin.db.alert_source_get',\n                fakes.alert_source_get_exception)\n    def test_delete_v3_config_failure(self, mock_alert_source_delete):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_delete.return_value = {}\n\n        alert_controller_inst = self._get_alert_controller()\n        self.assertRaisesRegex(exception.AlertSourceNotFound, \"Alert source \"\n                                                              \"for storage \"\n                                                              \"abcd-1234-5678 \"\n                                                              \"could not be \"\n                                                              \"found\",\n                               alert_controller_inst.delete, req,\n                               fake_storage_id)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_get')\n    def test_show_v3_config(self, mock_alert_source_get):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_get.return_value = fakes.fake_v3_alert_source()\n        expected_alert_source = {'storage_id': 'abcd-1234-5678',\n                                 'host': '127.0.0.1',\n                                 'version': 'snmpv3',\n                                 'engine_id': '800000d30300000e112245',\n                                 'security_level': None,\n                                 'username': 'test1',\n                                 'auth_protocol': 'HMACMD5',\n                                 'privacy_protocol': 'DES',\n                                 'port': 161,\n                                 'context_name': \"\",\n                                 'retry_num': 1,\n                                 'expiration': 1,\n                                 \"created_at\": '2020-06-15T09:50:31.698956',\n                                 \"updated_at\": '2020-06-15T09:50:31.698956'\n                                 }\n        alert_controller_inst = self._get_alert_controller()\n        output_alert_source = alert_controller_inst.show(req, fake_storage_id)\n        self.assertDictEqual(expected_alert_source, output_alert_source)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_v3_authpriv_no_priv_key(self,\n                                         mock_alert_source_get,\n                                         mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = {}\n        mock_alert_source_get.return_value = fakes.fake_v3_alert_source()\n\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n        body['security_level'] = 'authPriv'\n        body['privacy_key'] = ''\n\n        self.assertRaisesRegex(exception.InvalidInput, \"Invalid input for \"\n                                                       \"field/attribute \"\n                                                       \"privacy_key\",\n                               alert_controller_inst.put, req, fake_storage_id,\n                               body=body)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_v3_authpriv_no_priv_protocol(self,\n                                              mock_alert_source_get,\n                                              mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = {}\n        mock_alert_source_get.return_value = fakes.fake_v3_alert_source()\n\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n        body['security_level'] = 'authPriv'\n        body['privacy_protocol'] = ''\n\n        self.assertRaisesRegex(exception.InvalidInput, \"Invalid input for \"\n                                                       \"field/attribute \"\n                                                       \"privacy_protocol\",\n                               alert_controller_inst.put, req, fake_storage_id,\n                               body=body)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_v3_authnopriv_no_auth_protocol(self,\n                                                mock_alert_source_get,\n                                                mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = {}\n        mock_alert_source_get.return_value = fakes.fake_v3_alert_source()\n\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n        body['security_level'] = 'authNoPriv'\n        body['auth_protocol'] = ''\n\n        self.assertRaisesRegex(exception.InvalidInput, \"Invalid input for \"\n                                                       \"field/attribute \"\n                                                       \"auth_protocol\",\n                               alert_controller_inst.put, req, fake_storage_id,\n                               body=body)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_v3_authnopriv_no_auth_key(self,\n                                           mock_alert_source_get,\n                                           mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = {}\n        mock_alert_source_get.return_value = fakes.fake_v3_alert_source()\n\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n        body['security_level'] = 'authNoPriv'\n        body['auth_key'] = ''\n\n        self.assertRaisesRegex(exception.InvalidInput, \"Invalid input for \"\n                                                       \"field/attribute \"\n                                                       \"auth_key\",\n                               alert_controller_inst.put, req, fake_storage_id,\n                               body=body)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_without_username(self, mock_alert_source_get,\n                                  mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = {}\n        mock_alert_source_get.return_value = fakes.fake_v3_alert_source()\n\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n        body['username'] = ''\n\n        self.assertRaisesRegex(exception.InvalidInput, \"Invalid input \"\n                                                       \"received. Invalid \"\n                                                       \"input for \"\n                                                       \"field/attribute \"\n                                                       \"username.\",\n                               alert_controller_inst.put, req, fake_storage_id,\n                               body=body)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_without_engine_id(self, mock_alert_source_get,\n                                   mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = {}\n        mock_alert_source_get.return_value = fakes.fake_v3_alert_source()\n\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n        body['engine_id'] = ''\n\n        self.assertRaisesRegex(exception.InvalidInput, \"Invalid input \"\n                                                       \"received. Invalid \"\n                                                       \"input for \"\n                                                       \"field/attribute \"\n                                                       \"engine_id..\",\n                               alert_controller_inst.put, req, fake_storage_id,\n                               body=body)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    def test_put_without_community_str(self, mock_alert_source_get,\n                                       mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = {}\n        mock_alert_source_get.return_value = fakes.fake_v2_alert_source()\n\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v2_alert_source_config()\n        body['community_string'] = ''\n\n        self.assertRaisesRegex(exception.InvalidInput, \"Invalid input \"\n                                                       \"received. Invalid \"\n                                                       \"input for \"\n                                                       \"field/attribute \"\n                                                       \"community_string.\",\n                               alert_controller_inst.put, req, fake_storage_id,\n                               body=body)\n\n    @mock.patch('delfin.db.storage_get', mock.Mock())\n    @mock.patch('delfin.db.alert_source_update')\n    @mock.patch('delfin.db.alert_source_get')\n    @mock.patch('pysnmp.entity.rfc3413.oneliner.cmdgen.CommandGenerator'\n                '.getCmd', fakes.fake_getcmd_success)\n    def test_put_v3_snmp_validation_success(self,\n                                            mock_alert_source_get,\n                                            mock_alert_source_update):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/snmp-config')\n        fake_storage_id = 'abcd-1234-5678'\n        mock_alert_source_update.return_value = fakes. \\\n            fake_v3_alert_source_auth_nopriv()\n        mock_alert_source_get.return_value = fakes. \\\n            fake_v3_alert_source_auth_nopriv()\n        expected_alert_source = {'storage_id': 'abcd-1234-5678',\n                                 'host': '127.0.0.1',\n                                 'version': 'snmpv3',\n                                 'engine_id': '800000d30300000e112245',\n                                 'security_level': 'authNoPriv',\n                                 'username': 'test1',\n                                 'auth_protocol': 'HMACMD5',\n                                 'privacy_protocol': None,\n                                 'port': 161,\n                                 'context_name': \"\",\n                                 'retry_num': 1,\n                                 'expiration': 1,\n                                 \"created_at\": '2020-06-15T09:50:31.698956',\n                                 \"updated_at\": '2020-06-15T09:50:31.698956'\n                                 }\n        alert_controller_inst = self._get_alert_controller()\n        body = fakes.fake_v3_alert_source_config()\n        body['security_level'] = 'authNoPriv'\n\n        output_alert_source = alert_controller_inst.put(req, fake_storage_id,\n                                                        body=body)\n        self.assertDictEqual(expected_alert_source, output_alert_source)\n\n    @mock.patch('delfin.db.alert_source_get_all')\n    def test_show_all_snmp_configs(self, mock_alert_source_get_all):\n        req = fakes.HTTPRequest.blank('/storages/snmp-configs')\n        mock_alert_source_get_all.return_value = fakes.fake_all_snmp_configs()\n        expected_alert_source = {\n            'snmp_configs': [{'storage_id': 'abcd-1234-5678',\n                              'host': '127.0.0.1',\n                              'version': 'snmpv3',\n                              'engine_id': '800000d30300000e112245',\n                              'security_level': None,\n                              'username': 'test1',\n                              'auth_protocol': 'HMACMD5',\n                              'privacy_protocol': 'DES',\n                              'port': 161,\n                              'context_name': \"\",\n                              'retry_num': 1,\n                              'expiration': 1,\n                              \"created_at\": '2020-06-15T09:50:31.698956',\n                              \"updated_at\": '2020-06-15T09:50:31.698956'\n                              }]\n        }\n        alert_controller_inst = self._get_alert_controller()\n        output_alert_source = alert_controller_inst.show_all(req)\n        self.assertDictEqual(expected_alert_source, output_alert_source)\n"
  },
  {
    "path": "delfin/tests/unit/api/v1/test_alerts.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom unittest import mock\n\nfrom oslo_utils import importutils\n\nfrom delfin import context\nfrom delfin import exception\nfrom delfin.tests.unit.api import fakes\n\n\ndef fake_alert_list():\n    return [{\n        'alert_id': '19660818',\n        'sequence_number': 10,\n        'alert_name': 'SNMP connect failed',\n        'category': 'Fault',\n        'severity': 'Major',\n        'type': 'OperationalViolation',\n        'location': 'NetworkEntity=storage1',\n        'description': \"SNMP connection to the storage failed. \"\n                       \"SNMP traps from storage will not be received.\",\n        'recovery_advice': \"1. The network connection is abnormal. \"\n                           \"2. SNMP authentication parameters \"\n                           \"are invalid.\",\n        'occur_time': 13445566900\n    }]\n\n\ndef fake_storage_info():\n    return {\n        'id': 'abcd-1234-56789',\n        'name': 'storage1',\n        'vendor': 'fake vendor',\n        'model': 'fake model',\n        'serial_number': 'serial-1234'\n    }\n\n\nclass AlertControllerTestCase(unittest.TestCase):\n    ALERT_CONTROLLER_CLASS = 'delfin.api.v1.alerts.AlertController'\n\n    @mock.patch('delfin.alert_manager.rpcapi.AlertAPI', mock.Mock())\n    def _get_alert_controller(self):\n        alert_controller_class = importutils.import_class(\n            self.ALERT_CONTROLLER_CLASS)\n        alert_controller = alert_controller_class()\n        return alert_controller\n\n    @mock.patch('delfin.db.storage_get', fakes.fake_storages_get_all)\n    @mock.patch('delfin.drivers.api.API.clear_alert')\n    @mock.patch('delfin.task_manager.rpcapi.TaskAPI', mock.Mock())\n    def test_delete_alert_success(self, mock_clear_alert):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/alerts'\n                                      '/fake_sequence_number')\n        fake_storage_id = 'abcd-1234-5678'\n        fake_sequence_number = 'abcd-1234'\n\n        alert_controller_inst = self._get_alert_controller()\n        alert_controller_inst.delete(req, fake_storage_id,\n                                     fake_sequence_number)\n        self.assertTrue(mock_clear_alert.called_with(context, fake_storage_id,\n                                                     fake_sequence_number))\n\n    @mock.patch('delfin.db.storage_get', fakes.fake_storage_get_exception)\n    @mock.patch('delfin.drivers.api.API.clear_alert', mock.Mock())\n    @mock.patch('delfin.task_manager.rpcapi.TaskAPI', mock.Mock())\n    def test_delete_alert_failure_storage_not_found(self):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/alerts'\n                                      '/fake_sequence_number')\n        fake_storage_id = 'abcd-1234-5678'\n        fake_sequence_number = 'abcd-1234'\n\n        alert_controller_inst = self._get_alert_controller()\n        self.assertRaisesRegex(exception.StorageNotFound, \"Storage \"\n                                                          \"abcd-1234-5678 \"\n                                                          \"could not be \"\n                                                          \"found\",\n                               alert_controller_inst.delete, req,\n                               fake_storage_id, fake_sequence_number)\n\n    @mock.patch('delfin.db.storage_get')\n    @mock.patch('delfin.drivers.api.API.list_alerts')\n    @mock.patch('delfin.task_manager.rpcapi.TaskAPI', mock.Mock())\n    @mock.patch('delfin.api.views.alerts.build_alerts')\n    def test_list_alert_success(self, mock_build_alerts, mock_fake_alerts,\n                                mock_fake_storage):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/alerts')\n        req.GET['begin_time'] = '123400000'\n        req.GET['end_time'] = '123500000'\n        fake_storage_id = 'abcd-1234-5678'\n\n        expected_alert_output = {\n            'alert_id': '19660818',\n            'sequence_number': 10,\n            'alert_name': 'SNMP connect failed',\n            'category': 'Fault',\n            'severity': 'Major',\n            'type': 'OperationalViolation',\n            'location': 'NetworkEntity=storage1',\n            'description': \"SNMP connection to the storage failed. \"\n                           \"SNMP traps from storage will not be received.\",\n            'recovery_advice': \"1. The network connection is abnormal. \"\n                               \"2. SNMP authentication parameters \"\n                               \"are invalid.\",\n            'occur_time': 13445566900,\n            'storage_id': fake_storage_id,\n            'storage_name': 'storage1',\n            'vendor': 'fake vendor',\n            'model': 'fake model',\n            'serial_number': 'serial-1234'\n        }\n        mock_fake_alerts.return_value = fake_alert_list()\n        mock_fake_storage.return_value = fake_storage_info()\n\n        alert_controller_inst = self._get_alert_controller()\n        alert_controller_inst.show(req, fake_storage_id)\n        self.assertTrue(mock_build_alerts.called_with(expected_alert_output))\n\n    @mock.patch('delfin.task_manager.rpcapi.TaskAPI', mock.Mock())\n    def test_list_alert_invalid_querypara(self):\n        req = fakes.HTTPRequest.blank('/storages/fake_id/alerts')\n        req.GET['begin_time'] = '123400000'\n        req.GET['end_time'] = '120400000'\n        fake_storage_id = 'abcd-1234-5678'\n        alert_controller_inst = self._get_alert_controller()\n        self.assertRaisesRegex(exception.InvalidInput, \"end_time should be \"\n                                                       \"greater than \"\n                                                       \"begin_time\",\n                               alert_controller_inst.show, req,\n                               fake_storage_id)\n"
  },
  {
    "path": "delfin/tests/unit/api/v1/test_storage_pools.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.api.v1.storage_pools import StoragePoolController\nfrom delfin.tests.unit.api import fakes\n\n\nclass TestStoragePoolController(test.TestCase):\n\n    def setUp(self):\n        super(TestStoragePoolController, self).setUp()\n        self.controller = StoragePoolController()\n\n    def test_list(self):\n        self.mock_object(\n            db, 'storage_pool_get_all',\n            fakes.fake_storage_pool_get_all)\n        req = fakes.HTTPRequest.blank('/storage-pools')\n\n        res_dict = self.controller.index(req)\n\n        expctd_dict = {\n            \"storage_pools\": [\n                {\n                    \"created_at\": \"2020-06-10T07:17:08.707356\",\n                    \"updated_at\": \"2020-06-10T07:17:08.707356\",\n                    \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n                    \"name\": \"SRP_1\",\n                    \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n                    \"native_storage_pool_id\": \"SRP_1\",\n                    \"description\": \"fake storage Pool\",\n                    \"status\": \"normal\",\n                    \"storage_type\": \"block\",\n                    \"total_capacity\": 26300318136401,\n                    \"used_capacity\": 19054536509358,\n                    \"free_capacity\": 7245781627043,\n                    'subscribed_capacity': 219902325555200\n                }\n            ]\n        }\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_list_with_filter(self):\n        self.mock_object(\n            db, 'storage_pool_get_all',\n            fakes.fake_storage_pool_get_all)\n        req = fakes.HTTPRequest.blank(\n            '/storage-pools/'\n            '?storage_id=12c2d52f-01bc-41f5-b73f-7abf6f38a2a6'\n            '&sort=name:asc&wrongfilter=remove')\n        res_dict = self.controller.index(req)\n        expctd_dict = {\n            \"storage_pools\": [\n                {\n                    \"created_at\": \"2020-06-10T07:17:08.707356\",\n                    \"updated_at\": \"2020-06-10T07:17:08.707356\",\n                    \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n                    \"name\": \"SRP_1\",\n                    \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n                    \"native_storage_pool_id\": \"SRP_1\",\n                    \"description\": \"fake storage Pool\",\n                    \"status\": \"normal\",\n                    \"storage_type\": \"block\",\n                    \"total_capacity\": 26300318136401,\n                    \"used_capacity\": 19054536509358,\n                    \"free_capacity\": 7245781627043,\n                    'subscribed_capacity': 219902325555200\n                }\n            ]\n        }\n\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_show(self):\n        self.mock_object(\n            db, 'storage_pool_get',\n            fakes.fake_storage_pool_show)\n        req = fakes.HTTPRequest.blank(\n            '/storage-pools/14155a1f-f053-4ccb-a846-ed67e4387428')\n\n        res_dict = self.controller.show(\n            req, '14155a1f-f053-4ccb-a846-ed67e4387428')\n        expctd_dict = {\n            \"created_at\": \"2020-06-10T07:17:08.707356\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n            \"name\": \"SRP_1\",\n            \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n            \"native_storage_pool_id\": \"SRP_1\",\n            \"description\": \"fake storage Pool\",\n            \"status\": \"normal\",\n            \"storage_type\": \"block\",\n            \"total_capacity\": 26300318136401,\n            \"used_capacity\": 19054536509358,\n            \"free_capacity\": 7245781627043,\n            'subscribed_capacity': 219902325555200\n        }\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_show_with_invalid_id(self):\n        self.mock_object(\n            db, 'storage_pool_get',\n            mock.Mock(side_effect=exception.StoragePoolNotFound('fake_id')))\n        req = fakes.HTTPRequest.blank('/storage-pools/fake_id')\n        self.assertRaises(exception.StoragePoolNotFound,\n                          self.controller.show,\n                          req, 'fake_id')\n"
  },
  {
    "path": "delfin/tests/unit/api/v1/test_storages.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.api.v1.storages import StorageController\nfrom delfin.common import constants\nfrom delfin.tests.unit.api import fakes\n\n\nclass TestStorageController(test.TestCase):\n\n    def setUp(self):\n        super(TestStorageController, self).setUp()\n        self.task_rpcapi = mock.Mock()\n        self.metrics_task_rpcapi = mock.Mock()\n        self.driver_api = mock.Mock()\n        self.controller = StorageController()\n        self.mock_object(self.controller, 'task_rpcapi', self.task_rpcapi)\n        self.mock_object(self.controller, 'driver_api', self.driver_api)\n\n    @mock.patch.object(db, 'storage_get',\n                       mock.Mock(return_value={'id': 'fake_id'}))\n    @mock.patch('delfin.task_manager.perf_job_controller.delete_perf_job')\n    def test_delete(self, perf_job_controller):\n        req = fakes.HTTPRequest.blank('/storages/fake_id')\n        self.controller.delete(req, 'fake_id')\n        ctxt = req.environ['delfin.context']\n        db.storage_get.assert_called_once_with(ctxt, 'fake_id')\n        self.task_rpcapi.remove_storage_resource.assert_called_with(\n            ctxt, 'fake_id', mock.ANY)\n        self.assertEqual(perf_job_controller.call_count, 1)\n        self.task_rpcapi.remove_storage_in_cache.assert_called_once_with(\n            ctxt, 'fake_id')\n\n    def test_delete_with_invalid_id(self):\n        self.mock_object(\n            db, 'storage_get',\n            mock.Mock(side_effect=exception.StorageNotFound('fake_id')))\n        req = fakes.HTTPRequest.blank('/storages/fake_id')\n        self.assertRaises(exception.StorageNotFound,\n                          self.controller.delete,\n                          req, 'fake_id')\n\n    def test_list(self):\n        self.mock_object(\n            db, 'storage_get_all',\n            fakes.fake_storages_get_all)\n        req = fakes.HTTPRequest.blank('/storages')\n\n        res_dict = self.controller.index(req)\n\n        expctd_dict = {\n            \"storages\": [\n                {\n                    \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n                    \"created_at\": \"2020-06-09T08:59:48.710890\",\n                    \"free_capacity\": 1045449,\n                    \"updated_at\": \"2020-06-09T08:59:48.769470\",\n                    \"name\": \"fake_driver\",\n                    \"location\": \"HK\",\n                    \"firmware_version\": \"1.0.0\",\n                    \"vendor\": \"fake_vendor\",\n                    \"status\": \"normal\",\n                    \"sync_status\": \"SYNCED\",\n                    \"model\": \"fake_model\",\n                    \"description\": \"it is a fake driver.\",\n                    \"serial_number\": \"2102453JPN12KA0000113\",\n                    \"used_capacity\": 3126,\n                    \"total_capacity\": 1048576,\n                    'raw_capacity': 1610612736000,\n                    'subscribed_capacity': 219902325555200\n                },\n                {\n                    \"id\": \"277a1d8f-a36e-423e-bdd9-db154f32c289\",\n                    \"created_at\": \"2020-06-09T08:58:23.008821\",\n                    \"free_capacity\": 1045449,\n                    \"updated_at\": \"2020-06-09T08:58:23.033601\",\n                    \"name\": \"fake_driver\",\n                    \"location\": \"HK\",\n                    \"firmware_version\": \"1.0.0\",\n                    \"vendor\": \"fake_vendor\",\n                    \"status\": \"normal\",\n                    \"sync_status\": \"SYNCED\",\n                    \"model\": \"fake_model\",\n                    \"description\": \"it is a fake driver.\",\n                    \"serial_number\": \"2102453JPN12KA0000112\",\n                    \"used_capacity\": 3126,\n                    \"total_capacity\": 1048576,\n                    'raw_capacity': 1610612736000,\n                    'subscribed_capacity': 219902325555200\n                }\n            ]\n        }\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_list_with_filter(self):\n        self.mock_object(\n            db, 'storage_get_all',\n            fakes.fake_storages_get_all_with_filter)\n        req = fakes.HTTPRequest.blank(\n            '/storages/?name=fake_driver&sort=name:asc&wrongfilter=remove')\n        res_dict = self.controller.index(req)\n        expctd_dict = {\n            \"storages\": [\n                {\n                    \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n                    \"created_at\": \"2020-06-09T08:59:48.710890\",\n                    \"free_capacity\": 1045449,\n                    \"updated_at\": \"2020-06-09T08:59:48.769470\",\n                    \"name\": \"fake_driver\",\n                    \"location\": \"HK\",\n                    \"firmware_version\": \"1.0.0\",\n                    \"vendor\": \"fake_vendor\",\n                    \"status\": \"normal\",\n                    \"sync_status\": \"SYNCED\",\n                    \"model\": \"fake_model\",\n                    \"description\": \"it is a fake driver.\",\n                    \"serial_number\": \"2102453JPN12KA0000113\",\n                    \"used_capacity\": 3126,\n                    \"total_capacity\": 1048576,\n                    'raw_capacity': 1610612736000,\n                    'subscribed_capacity': 219902325555200\n                }\n            ]\n        }\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_show(self):\n        self.mock_object(\n            db, 'storage_get',\n            fakes.fake_storages_show)\n        req = fakes.HTTPRequest.blank(\n            '/storages/12c2d52f-01bc-41f5-b73f-7abf6f38a2a6')\n\n        res_dict = self.controller.show(req,\n                                        '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6')\n        expctd_dict = {\n            \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n            \"created_at\": \"2020-06-09T08:59:48.710890\",\n            \"free_capacity\": 1045449,\n            \"updated_at\": \"2020-06-09T08:59:48.769470\",\n            \"name\": \"fake_driver\",\n            \"location\": \"HK\",\n            \"firmware_version\": \"1.0.0\",\n            \"vendor\": \"fake_vendor\",\n            \"status\": \"normal\",\n            \"sync_status\": \"SYNCED\",\n            \"model\": \"fake_model\",\n            \"description\": \"it is a fake driver.\",\n            \"serial_number\": \"2102453JPN12KA0000113\",\n            \"used_capacity\": 3126,\n            \"total_capacity\": 1048576,\n            'raw_capacity': 1610612736000,\n            'subscribed_capacity': 219902325555200\n        }\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_show_with_invalid_id(self):\n        self.mock_object(\n            db, 'storage_get',\n            mock.Mock(side_effect=exception.StorageNotFound('fake_id')))\n        req = fakes.HTTPRequest.blank('/storages/fake_id')\n        self.assertRaises(exception.StorageNotFound,\n                          self.controller.show,\n                          req, 'fake_id')\n\n    def test_create(self):\n        self.mock_object(\n            self.controller.driver_api, 'discover_storage',\n            mock.Mock(return_value={\n                \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n                'name': 'fake_driver',\n                'description': 'it is a fake driver.',\n                'vendor': 'fake_vendor',\n                'model': 'fake_model',\n                'status': 'normal',\n                'serial_number': '2102453JPN12KA000011',\n                'firmware_version': '1.0.0',\n                'location': 'HK',\n                'total_capacity': 1024 * 1024,\n                'used_capacity': 3126,\n                'free_capacity': 1045449,\n                \"sync_status\": constants.SyncStatus.SYNCED,\n                'raw_capacity': 1610612736000,\n                'subscribed_capacity': 219902325555200\n            }))\n        self.mock_object(\n            db, 'access_info_get_all',\n            fakes.fake_access_info_get_all)\n        self.mock_object(\n            db, 'storage_get',\n            mock.Mock(side_effect=exception.StorageNotFound('fake_id')))\n        self.mock_object(\n            self.controller, 'sync',\n            fakes.fake_sync)\n        body = {\n            'model': 'fake_driver',\n            'vendor': 'fake_storage',\n            'rest': {\n                'username': 'admin',\n                'password': 'abcd',\n                'host': '10.0.0.76',\n                'port': 1234\n            },\n            'extra_attributes': {'array_id': '0001234567891'}\n        }\n        req = fakes.HTTPRequest.blank(\n            '/storages')\n        res_dict = self.controller.create(req,\n                                          body=body)\n        expctd_dict = {\n            \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n            'name': 'fake_driver',\n            'description': 'it is a fake driver.',\n            'vendor': 'fake_vendor',\n            'model': 'fake_model',\n            'status': 'normal',\n            'serial_number': '2102453JPN12KA000011',\n            'firmware_version': '1.0.0',\n            'location': 'HK',\n            'total_capacity': 1024 * 1024,\n            'used_capacity': 3126,\n            'free_capacity': 1045449,\n            \"sync_status\": \"SYNCED\",\n            'raw_capacity': 1610612736000,\n            'subscribed_capacity': 219902325555200\n        }\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_create_when_storage_already_exists(self):\n        self.mock_object(\n            self.controller.driver_api, 'discover_storage',\n            mock.Mock(return_value={\n                \"id\": \"5f5c806d-2e65-473c-b612-345ef43f0642\",\n                'name': 'fake_driver',\n                'description': 'it is a fake driver.',\n                'vendor': 'fake_vendor',\n                'model': 'fake_model',\n                'status': 'normal',\n                'serial_number': '2102453JPN12KA000011',\n                'firmware_version': '1.0.0',\n                'location': 'HK',\n                'total_capacity': 1024 * 1024,\n                'used_capacity': 3126,\n                'free_capacity': 1045449,\n                \"sync_status\": constants.SyncStatus.SYNCED,\n                'raw_capacity': 1610612736000,\n                'subscribed_capacity': 219902325555200\n            }))\n        self.mock_object(\n            db, 'access_info_get_all',\n            fakes.fake_access_info_get_all)\n        self.mock_object(\n            db, 'storage_get',\n            fakes.fake_storages_show)\n        self.mock_object(\n            self.controller, 'sync',\n            fakes.fake_sync)\n        body = {\n            'model': 'fake_driver',\n            'vendor': 'fake_storage',\n            'rest': {\n                'username': 'admin',\n                'password': 'abcd',\n                'host': '10.0.0.76',\n                'port': 1234\n            },\n            'extra_attributes': {'array_id': '0001234567891'}\n        }\n        req = fakes.HTTPRequest.blank(\n            '/storages')\n        self.assertRaises(exception.StorageAlreadyExists,\n                          self.controller.create,\n                          req, body=body)\n\n    def test_get_capabilities(self):\n        self.mock_object(\n            db, 'storage_get',\n            fakes.fake_storages_show)\n        req = fakes.HTTPRequest.blank(\n            '/storages/12c2d52f-01bc-41f5-b73f-7abf6f38a2a6/capability')\n        self.mock_object(\n            self.driver_api, 'get_capabilities',\n            fakes.fake_get_capabilities)\n\n        resp = self.controller.get_capabilities(\n            req, \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\")\n\n        self.assertEqual(resp['metadata']['vendor'], 'fake_vendor')\n        self.assertEqual(resp['metadata'][\"model\"], 'fake_model')\n\n    def test_get_capabilities_with_invalid_storage_id(self):\n        self.mock_object(\n            db, 'storage_get',\n            fakes.fake_storage_get_exception)\n        req = fakes.HTTPRequest.blank(\n            '/storages/12c2d52f-01bc-41f5-b73f-7abf6f38a2a6/capability')\n\n        storage_id = '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6'\n\n        self.assertRaisesRegex(exception.StorageNotFound, \"Storage \" +\n                                                          storage_id + \" \"\n                                                          \"could not be \"\n                                                          \"found.\",\n                               self.controller.get_capabilities, req,\n                               storage_id)\n\n    def test_get_capabilities_with_none(self):\n        self.mock_object(\n            db, 'storage_get',\n            fakes.fake_storages_show)\n        req = fakes.HTTPRequest.blank(\n            '/storages/12c2d52f-01bc-41f5-b73f-7abf6f38a2a6/capability')\n        self.mock_object(\n            self.driver_api, 'get_capabilities',\n            fakes.custom_fake_get_capabilities(None))\n\n        storage_id = '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6'\n\n        self.assertRaises(exception.StorageCapabilityNotSupported,\n                          self.controller.get_capabilities, req,\n                          storage_id)\n\n    def test_get_capabilities_with_invalid_capabilities(self):\n        self.mock_object(\n            db, 'storage_get',\n            fakes.fake_storages_show)\n        req = fakes.HTTPRequest.blank(\n            '/storages/12c2d52f-01bc-41f5-b73f-7abf6f38a2a6/capability')\n\n        storage_id = '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6'\n\n        cap = fakes.fake_get_capabilities('fake_context', storage_id)\n        cap['additional_field'] = True\n\n        self.mock_object(\n            self.driver_api, 'get_capabilities',\n            fakes.custom_fake_get_capabilities(cap))\n\n        self.assertRaises(exception.InvalidStorageCapability,\n                          self.controller.get_capabilities, req,\n                          storage_id)\n\n    def test_create_with_performance_monitoring(self):\n        self.mock_object(\n            self.controller.driver_api, 'discover_storage',\n            mock.Mock(return_value={\n                \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n                'name': 'fake_driver',\n                'description': 'it is a fake driver.',\n                'vendor': 'fake_vendor',\n                'model': 'fake_model',\n                'status': 'normal',\n                'serial_number': '2102453JPN12KA000011',\n                'firmware_version': '1.0.0',\n                'location': 'HK',\n                'total_capacity': 1024 * 1024,\n                'used_capacity': 3126,\n                'free_capacity': 1045449,\n                \"sync_status\": constants.SyncStatus.SYNCED,\n                'raw_capacity': 1610612736000,\n                'subscribed_capacity': 219902325555200\n            }))\n        self.mock_object(\n            db, 'access_info_get_all',\n            fakes.fake_access_info_get_all)\n        self.mock_object(\n            db, 'storage_get',\n            mock.Mock(side_effect=exception.StorageNotFound('fake_id')))\n        self.mock_object(\n            self.controller, 'sync',\n            fakes.fake_sync)\n        body = {\n            'model': 'fake_driver',\n            'vendor': 'fake_storage',\n            'rest': {\n                'username': 'admin',\n                'password': 'abcd',\n                'host': '10.0.0.76',\n                'port': 1234\n            },\n            'extra_attributes': {'array_id': '0001234567891'}\n        }\n        req = fakes.HTTPRequest.blank(\n            '/storages')\n\n        resource_metrics = {\n            \"storage\": {\n                \"throughput\": {\n                    \"unit\": \"MB/s\",\n                    \"description\": \"Represents how much data is \"\n                                   \"successfully transferred in MB/s\"\n                },\n            }\n        }\n\n        self.mock_object(\n            self.controller.driver_api, 'get_capabilities',\n            mock.Mock(return_value={\n                'is_historic': False,\n                'resource_metrics': resource_metrics\n            }))\n\n        self.mock_object(\n            self.controller.driver_api, 'get_capabilities',\n            mock.Mock(return_value={\n                'is_historic': False,\n                'resource_metrics': resource_metrics\n            }))\n\n        def test_task_create(context, values):\n            self.assertEqual(values['resource_metrics'], resource_metrics)\n\n        db.task_create = test_task_create\n\n        res_dict = self.controller.create(req, body=body)\n        expctd_dict = {\n            \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n            'name': 'fake_driver',\n            'description': 'it is a fake driver.',\n            'vendor': 'fake_vendor',\n            'model': 'fake_model',\n            'status': 'normal',\n            'serial_number': '2102453JPN12KA000011',\n            'firmware_version': '1.0.0',\n            'location': 'HK',\n            'total_capacity': 1024 * 1024,\n            'used_capacity': 3126,\n            'free_capacity': 1045449,\n            \"sync_status\": \"SYNCED\",\n            'raw_capacity': 1610612736000,\n            'subscribed_capacity': 219902325555200\n        }\n\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_create_with_performance_monitoring_with_empty_metric(self):\n        self.mock_object(\n            self.controller.driver_api, 'discover_storage',\n            mock.Mock(return_value={\n                \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n                'name': 'fake_driver',\n                'description': 'it is a fake driver.',\n                'vendor': 'fake_vendor',\n                'model': 'fake_model',\n                'status': 'normal',\n                'serial_number': '2102453JPN12KA000011',\n                'firmware_version': '1.0.0',\n                'location': 'HK',\n                'total_capacity': 1024 * 1024,\n                'used_capacity': 3126,\n                'free_capacity': 1045449,\n                \"sync_status\": constants.SyncStatus.SYNCED,\n                'raw_capacity': 1610612736000,\n                'subscribed_capacity': 219902325555200\n            }))\n        self.mock_object(\n            db, 'access_info_get_all',\n            fakes.fake_access_info_get_all)\n        self.mock_object(\n            db, 'storage_get',\n            mock.Mock(side_effect=exception.StorageNotFound('fake_id')))\n\n        self.mock_object(self.controller, 'sync', fakes.fake_sync)\n        body = {\n            'model': 'fake_driver',\n            'vendor': 'fake_storage',\n            'rest': {\n                'username': 'admin',\n                'password': 'abcd',\n                'host': '10.0.0.76',\n                'port': 1234\n            },\n            'extra_attributes': {'array_id': '0001234567891'}\n        }\n\n        req = fakes.HTTPRequest.blank(\n            '/storages')\n\n        resource_metrics = {}\n\n        self.mock_object(\n            self.controller.driver_api, 'get_capabilities',\n            mock.Mock(return_value={\n                'is_historic': False,\n                'resource_metrics': resource_metrics\n            }))\n\n        self.mock_object(\n            self.controller.driver_api, 'get_capabilities',\n            mock.Mock(return_value={\n                'is_historic': False,\n                'resource_metrics': resource_metrics\n            }))\n\n        res_dict = self.controller.create(req, body=body)\n        expctd_dict = {\n            \"id\": \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\",\n            'name': 'fake_driver',\n            'description': 'it is a fake driver.',\n            'vendor': 'fake_vendor',\n            'model': 'fake_model',\n            'status': 'normal',\n            'serial_number': '2102453JPN12KA000011',\n            'firmware_version': '1.0.0',\n            'location': 'HK',\n            'total_capacity': 1024 * 1024,\n            'used_capacity': 3126,\n            'free_capacity': 1045449,\n            \"sync_status\": \"SYNCED\",\n            'raw_capacity': 1610612736000,\n            'subscribed_capacity': 219902325555200\n        }\n\n        self.assertDictEqual(expctd_dict, res_dict)\n"
  },
  {
    "path": "delfin/tests/unit/api/v1/test_volumes.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.api.v1.volumes import VolumeController\nfrom delfin.tests.unit.api import fakes\n\n\nclass TestVolumeController(test.TestCase):\n\n    def setUp(self):\n        super(TestVolumeController, self).setUp()\n        self.controller = VolumeController()\n\n    def test_list(self):\n        self.mock_object(\n            db, 'volume_get_all',\n            fakes.fake_volume_get_all)\n        req = fakes.HTTPRequest.blank('/volumes')\n\n        res_dict = self.controller.index(req)\n\n        expctd_dict = {\n            \"volumes\": [\n                {\n                    \"created_at\": \"2020-06-10T07:17:31.157079\",\n                    \"updated_at\": \"2020-06-10T07:17:31.157079\",\n                    \"id\": \"d7fe425b-fddc-4ba4-accb-4343c142dc47\",\n                    \"name\": \"004DF\",\n                    \"storage_id\": \"5f5c806d-2e65-473c-b612-345ef43f0642\",\n                    \"native_storage_pool_id\": \"SRP_1\",\n                    \"description\": \"fake_storage 'thin device' volume\",\n                    \"status\": \"available\",\n                    \"native_volume_id\": \"004DF\",\n                    \"wwn\": \"60000970000297801855533030344446\",\n                    \"type\": 'thin',\n                    \"total_capacity\": 1075838976,\n                    \"used_capacity\": 0,\n                    \"free_capacity\": 1075838976,\n                    \"compressed\": True,\n                    \"deduplicated\": False\n                },\n                {\n                    \"created_at\": \"2020-06-10T07:17:31.157079\",\n                    \"updated_at\": \"2020-06-10T07:17:31.157079\",\n                    \"id\": \"dad84a1f-db8d-49ab-af40-048fc3544c12\",\n                    \"name\": \"004E0\",\n                    \"storage_id\": \"5f5c806d-2e65-473c-b612-345ef43f0642\",\n                    \"native_storage_pool_id\": \"SRP_1\",\n                    \"description\": \"fake_storage 'thin device' volume\",\n                    \"status\": \"available\",\n                    \"native_volume_id\": \"004E0\",\n                    \"wwn\": \"60000970000297801855533030344530\",\n                    \"type\": 'thin',\n                    \"total_capacity\": 1075838976,\n                    \"used_capacity\": 0,\n                    \"free_capacity\": 1075838976,\n                    \"compressed\": True,\n                    \"deduplicated\": False\n                }\n            ]\n        }\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_list_with_filter(self):\n        self.mock_object(\n            db, 'volume_get_all',\n            fakes.fake_volume_get_all)\n        req = fakes.HTTPRequest.blank(\n            '/volumes/'\n            '?storage_id=12c2d52f-01bc-41f5-b73f-7abf6f38a2a6'\n            '&sort=name:asc&wrongfilter=remove')\n        res_dict = self.controller.index(req)\n        expctd_dict = {\n            \"volumes\": [\n                {\n                    \"created_at\": \"2020-06-10T07:17:31.157079\",\n                    \"updated_at\": \"2020-06-10T07:17:31.157079\",\n                    \"id\": \"d7fe425b-fddc-4ba4-accb-4343c142dc47\",\n                    \"name\": \"004DF\",\n                    \"storage_id\": \"5f5c806d-2e65-473c-b612-345ef43f0642\",\n                    \"native_storage_pool_id\": \"SRP_1\",\n                    \"description\": \"fake_storage 'thin device' volume\",\n                    \"status\": \"available\",\n                    \"native_volume_id\": \"004DF\",\n                    \"wwn\": \"60000970000297801855533030344446\",\n                    \"type\": 'thin',\n                    \"total_capacity\": 1075838976,\n                    \"used_capacity\": 0,\n                    \"free_capacity\": 1075838976,\n                    \"compressed\": True,\n                    \"deduplicated\": False\n                },\n                {\n                    \"created_at\": \"2020-06-10T07:17:31.157079\",\n                    \"updated_at\": \"2020-06-10T07:17:31.157079\",\n                    \"id\": \"dad84a1f-db8d-49ab-af40-048fc3544c12\",\n                    \"name\": \"004E0\",\n                    \"storage_id\": \"5f5c806d-2e65-473c-b612-345ef43f0642\",\n                    \"native_storage_pool_id\": \"SRP_1\",\n                    \"description\": \"fake_storage 'thin device' volume\",\n                    \"status\": \"available\",\n                    \"native_volume_id\": \"004E0\",\n                    \"wwn\": \"60000970000297801855533030344530\",\n                    \"type\": 'thin',\n                    \"total_capacity\": 1075838976,\n                    \"used_capacity\": 0,\n                    \"free_capacity\": 1075838976,\n                    \"compressed\": True,\n                    \"deduplicated\": False\n                }\n            ]\n        }\n\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_show(self):\n        self.mock_object(\n            db, 'volume_get',\n            fakes.fake_volume_show)\n        req = fakes.HTTPRequest.blank(\n            '/volumes/d7fe425b-fddc-4ba4-accb-4343c142dc47')\n\n        res_dict = self.controller.show(\n            req, 'd7fe425b-fddc-4ba4-accb-4343c142dc47')\n        expctd_dict = {\n            \"created_at\": \"2020-06-10T07:17:31.157079\",\n            \"updated_at\": \"2020-06-10T07:17:31.157079\",\n            \"id\": \"d7fe425b-fddc-4ba4-accb-4343c142dc47\",\n            \"name\": \"004DF\",\n            \"storage_id\": \"5f5c806d-2e65-473c-b612-345ef43f0642\",\n            \"native_storage_pool_id\": \"SRP_1\",\n            \"description\": \"fake_storage 'thin device' volume\",\n            \"status\": \"available\",\n            \"native_volume_id\": \"004DF\",\n            \"wwn\": \"60000970000297801855533030344446\",\n            \"type\": 'thin',\n            \"total_capacity\": 1075838976,\n            \"used_capacity\": 0,\n            \"free_capacity\": 1075838976,\n            \"compressed\": True,\n            \"deduplicated\": False\n        }\n\n        self.assertDictEqual(expctd_dict, res_dict)\n\n    def test_show_with_invalid_id(self):\n        self.mock_object(\n            db, 'volume_get',\n            mock.Mock(side_effect=exception.VolumeNotFound('fake_id')))\n        req = fakes.HTTPRequest.blank('/volumes/fake_id')\n        self.assertRaises(exception.VolumeNotFound,\n                          self.controller.show,\n                          req, 'fake_id')\n"
  },
  {
    "path": "delfin/tests/unit/conf_fixture.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport os\n\nfrom oslo_service import wsgi\n\nfrom delfin.common import config\n\nCONF = config.CONF\n\n\ndef set_defaults(conf):\n    _safe_set_of_opts(conf, 'verbose', True)\n    _safe_set_of_opts(conf, 'state_path', os.path.abspath(\n        os.path.join(os.path.dirname(__file__),\n                     '..',\n                     '..')))\n    _safe_set_of_opts(conf, 'connection', \"sqlite://\", group='database')\n    _safe_set_of_opts(conf, 'sqlite_synchronous', False)\n    _API_PASTE_PATH = os.path.abspath(\n        os.path.join(CONF.state_path,\n                     'etc/delfin/api-paste.ini'))\n    wsgi.register_opts(conf)\n    _safe_set_of_opts(conf, 'api_paste_config', _API_PASTE_PATH)\n\n\ndef _safe_set_of_opts(conf, *args, **kwargs):\n    try:\n        conf.set_default(*args, **kwargs)\n    except config.cfg.NoSuchOptError:\n        # Assumed that opt is not imported and not used\n        pass\n"
  },
  {
    "path": "delfin/tests/unit/db/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/db/test_db_api.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\nfrom delfin import context, exception\nfrom delfin import test\nfrom delfin.db import api as db_api\nfrom delfin.db.sqlalchemy import api, models\nfrom delfin.tests.unit import fake_data, utils\n\nctxt = context.get_admin_context()\n\n\nclass TestIMDBAPIStoragePool(test.TestCase):\n    @mock.patch('sqlalchemy.create_engine', mock.Mock())\n    def test_register_db(self):\n        db_api.register_db()\n\n    def test_get_session(self):\n        api.get_session()\n\n    def test_get_engine(self):\n        api.get_engine()\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_basic_storage_pool_create(self, mock_session):\n        storage_pool_model_lst = fake_data.fake_storage_pool_create()\n        expected = fake_data.fake_expected_storage_pool_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.storage_pools_create(ctxt, storage_pool_model_lst)\n        utils.validate_db_schema_model(got[0], models.StoragePool)\n        utils.validate_db_schema_model(expected[0], models.StoragePool)\n        self.assertDictMatch(got[0], expected[0])\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_unknown_attribute_storage_pool_model_create(self, mock_session):\n        storage_pool_model_lst = fake_data.fake_storage_pool_create()\n        expected = fake_data.fake_expected_storage_pool_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.storage_pools_create(ctxt, storage_pool_model_lst)\n        self.assertRaisesRegex(AssertionError, \"\",\n                               utils.validate_db_schema_model,\n                               got[1], models.StoragePool)\n\n\nclass TestSIMDBAPI(test.TestCase):\n\n    @mock.patch('sqlalchemy.create_engine', mock.Mock())\n    def test_register_db(self):\n        db_api.register_db()\n\n    def test_get_session(self):\n        api.get_session()\n\n    def test_get_engine(self):\n        api.get_engine()\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_get(self, mock_session):\n        fake_storage = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage\n        result = db_api.storage_get(ctxt,\n                                    'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_update(self, mock_session):\n        fake_storage = models.Storage()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage\n        result = db_api.storage_update(ctxt,\n                                       'c5c91c98-91aa-40e6-85ac-37a1d3b32bda',\n                                       fake_storage)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_delete(self, mock_session):\n        fake_storage = models.Storage()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage\n        result = db_api.storage_delete(ctxt,\n                                       'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_create(self, mock_session):\n        fake_storage = models.Storage()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage\n        result = db_api.storage_create(ctxt, fake_storage)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_get_all(self, mock_session):\n        fake_storage = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage\n        result = db_api.storage_get_all(ctxt)\n        assert len(result) == 0\n\n        mock_session.return_value.__enter__.return_value.query = fake_storage\n        result = db_api.storage_get_all(ctxt, filters={'status': 'Normal'})\n        assert len(result) == 0\n\n        result = db_api.storage_get_all(ctxt, limit=1)\n        assert len(result) == 0\n\n        result = db_api.storage_get_all(ctxt, offset=3)\n        assert len(result) == 0\n\n        result = db_api.storage_get_all(ctxt, sort_dirs=['desc'],\n                                        sort_keys=['name'])\n        assert len(result) == 0\n\n        self.assertRaises(exception.InvalidInput, api.storage_get_all,\n                          ctxt, sort_dirs=['desc', 'asc'],\n                          sort_keys=['name'])\n\n        self.assertRaises(exception.InvalidInput, api.storage_get_all,\n                          ctxt, sort_dirs=['desc_err'],\n                          sort_keys=['name'])\n\n        result = db_api.storage_get_all(ctxt, sort_dirs=['desc'],\n                                        sort_keys=['name', 'id'])\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_pool_get(self, mock_session):\n        fake_storage_pool = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage_pool\n        result = db_api.storage_pool_get(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_pool_get_all(self, mock_session):\n        fake_storage_pool = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage_pool\n        result = api.storage_pool_get_all(context)\n        assert len(result) == 0\n\n        result = db_api.storage_pool_get_all(context,\n                                             filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_pools_update(self, mock_session):\n        storage_pools = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = storage_pools\n        result = db_api.storage_pools_update(context, storage_pools)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_pool_update(self, mock_session):\n        values = {'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = values\n        result = db_api.storage_pool_update(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', values)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_pools_delete(self, mock_session):\n        fake_storage_pools = [models.StoragePool().id]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage_pools\n        result = db_api.storage_pools_delete(context, fake_storage_pools)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_pools_create(self, mock_session):\n        fake_storage_pools = [models.StoragePool()]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage_pools\n        result = db_api.storage_pools_create(context, fake_storage_pools)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_pool_create(self, mock_session):\n        fake_storage_pool = models.StoragePool()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_storage_pool\n        result = db_api.storage_pool_create(context, fake_storage_pool)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volume_get(self, mock_session):\n        fake_volume = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_volume\n        result = db_api.volume_get(ctxt,\n                                   'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volumes_update(self, mock_session):\n        volumes = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = volumes\n        result = db_api.volumes_update(ctxt, volumes)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volume_update(self, mock_session):\n        volumes = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = volumes\n        result = db_api.volume_update(ctxt,\n                                      'c5c91c98-91aa-40e6-85ac-37a1d3b32bd',\n                                      volumes)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volumes_delete(self, mock_session):\n        fake_volume = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd']\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_volume\n        result = db_api.volumes_delete(ctxt, fake_volume)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volumes_create(self, mock_session):\n        fake_volume = [models.Volume()]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_volume\n        result = db_api.volumes_create(ctxt, fake_volume)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volume_create(self, mock_session):\n        fake_volume = models.Volume()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_volume\n        result = db_api.volume_create(ctxt, fake_volume)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volume_get_all(self, mock_session):\n        fake_volume = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_volume\n        result = db_api.volume_get_all(ctxt)\n        assert len(result) == 0\n\n        result = db_api.volume_get_all(ctxt, filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_controller_get(self, mock_session):\n        fake_controller = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_controller\n        result = db_api.controller_get(ctxt,\n                                       'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_controllers_update(self, mock_session):\n        controllers = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = controllers\n        result = db_api.controllers_update(ctxt, controllers)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_controller_update(self, mock_session):\n        controllers = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = controllers\n        result = db_api.controller_update(\n            ctxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', controllers)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_controllers_delete(self, mock_session):\n        fake_controller = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd']\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_controller\n        result = db_api.controllers_delete(ctxt, fake_controller)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_controllers_create(self, mock_session):\n        fake_controller = [models.Volume()]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_controller\n        result = db_api.controllers_create(ctxt, fake_controller)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_controller_create(self, mock_session):\n        fake_controller = models.Volume()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_controller\n        result = db_api.controller_create(ctxt, fake_controller)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_controller_get_all(self, mock_session):\n        fake_controller = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_controller\n        result = db_api.controller_get_all(ctxt)\n        assert len(result) == 0\n\n        result = db_api.controller_get_all(ctxt, filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_port_get(self, mock_session):\n        fake_port = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_port\n        result = db_api.port_get(ctxt,\n                                 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_ports_update(self, mock_session):\n        ports = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = ports\n        result = db_api.ports_update(ctxt, ports)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_port_update(self, mock_session):\n        ports = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = ports\n        result = db_api.port_update(ctxt,\n                                    'c5c91c98-91aa-40e6-85ac-37a1d3b32bd',\n                                    ports)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_ports_delete(self, mock_session):\n        fake_port = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd']\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_port\n        result = db_api.ports_delete(ctxt, fake_port)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_ports_create(self, mock_session):\n        fake_port = [models.Volume()]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_port\n        result = db_api.ports_create(ctxt, fake_port)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_port_create(self, mock_session):\n        fake_port = models.Volume()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_port\n        result = db_api.port_create(ctxt, fake_port)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_port_get_all(self, mock_session):\n        fake_port = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_port\n        result = db_api.port_get_all(ctxt)\n        assert len(result) == 0\n\n        result = db_api.port_get_all(ctxt, filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_disk_get(self, mock_session):\n        fake_disk = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_disk\n        result = db_api.disk_get(ctxt,\n                                 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_disks_update(self, mock_session):\n        disks = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = disks\n        result = db_api.disks_update(ctxt, disks)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_disk_update(self, mock_session):\n        disks = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = disks\n        result = db_api.disk_update(ctxt,\n                                    'c5c91c98-91aa-40e6-85ac-37a1d3b32bd',\n                                    disks)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_disks_delete(self, mock_session):\n        fake_disk = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd']\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_disk\n        result = db_api.disks_delete(ctxt, fake_disk)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_disks_create(self, mock_session):\n        fake_disk = [models.Volume()]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_disk\n        result = db_api.disks_create(ctxt, fake_disk)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_disk_create(self, mock_session):\n        fake_disk = models.Volume()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_disk\n        result = db_api.disk_create(ctxt, fake_disk)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_disk_get_all(self, mock_session):\n        fake_disk = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_disk\n        result = db_api.disk_get_all(ctxt)\n        assert len(result) == 0\n\n        result = db_api.disk_get_all(ctxt, filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_filesystem_get(self, mock_session):\n        fake_filesystem = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_filesystem\n        result = db_api.filesystem_get(ctxt,\n                                       'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_filesystems_update(self, mock_session):\n        filesystems = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = filesystems\n        result = db_api.filesystems_update(ctxt, filesystems)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_filesystem_update(self, mock_session):\n        filesystems = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = filesystems\n        result = db_api.filesystem_update(\n            ctxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', filesystems)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_filesystems_delete(self, mock_session):\n        fake_filesystem = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd']\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_filesystem\n        result = db_api.filesystems_delete(ctxt, fake_filesystem)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_filesystems_create(self, mock_session):\n        fake_filesystem = [models.Volume()]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_filesystem\n        result = db_api.filesystems_create(ctxt, fake_filesystem)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_filesystem_create(self, mock_session):\n        fake_filesystem = models.Volume()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_filesystem\n        result = db_api.filesystem_create(ctxt, fake_filesystem)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_filesystem_get_all(self, mock_session):\n        fake_filesystem = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_filesystem\n        result = db_api.filesystem_get_all(ctxt)\n        assert len(result) == 0\n\n        result = db_api.filesystem_get_all(ctxt, filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_qtree_get(self, mock_session):\n        fake_qtree = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_qtree\n        result = db_api.qtree_get(ctxt,\n                                  'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_qtrees_update(self, mock_session):\n        qtrees = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = qtrees\n        result = db_api.qtrees_update(ctxt, qtrees)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_qtree_update(self, mock_session):\n        qtrees = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = qtrees\n        result = db_api.qtree_update(ctxt,\n                                     'c5c91c98-91aa-40e6-85ac-37a1d3b32bd',\n                                     qtrees)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_qtrees_delete(self, mock_session):\n        fake_qtree = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd']\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_qtree\n        result = db_api.qtrees_delete(ctxt, fake_qtree)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_qtrees_create(self, mock_session):\n        fake_qtree = [models.Volume()]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_qtree\n        result = db_api.qtrees_create(ctxt, fake_qtree)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_qtree_create(self, mock_session):\n        fake_qtree = models.Volume()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_qtree\n        result = db_api.qtree_create(ctxt, fake_qtree)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_qtree_get_all(self, mock_session):\n        fake_qtree = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_qtree\n        result = db_api.qtree_get_all(ctxt)\n        assert len(result) == 0\n\n        result = db_api.qtree_get_all(ctxt, filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_quota_get(self, mock_session):\n        fake_quota = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_quota\n        result = db_api.quota_get(ctxt,\n                                  'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_quotas_update(self, mock_session):\n        quotas = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = quotas\n        result = db_api.quotas_update(ctxt, quotas)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_quota_update(self, mock_session):\n        quotas = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = quotas\n        result = db_api.quota_update(ctxt,\n                                     'c5c91c98-91aa-40e6-85ac-37a1d3b32bd',\n                                     quotas)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_quotas_delete(self, mock_session):\n        fake_quota = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd']\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_quota\n        result = db_api.quotas_delete(ctxt, fake_quota)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_quotas_create(self, mock_session):\n        fake_quota = [models.Volume()]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_quota\n        result = db_api.quotas_create(ctxt, fake_quota)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_quota_create(self, mock_session):\n        fake_quota = models.Volume()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_quota\n        result = db_api.quota_create(ctxt, fake_quota)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_quota_get_all(self, mock_session):\n        fake_quota = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_quota\n        result = db_api.quota_get_all(ctxt)\n        assert len(result) == 0\n\n        result = db_api.quota_get_all(ctxt, filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_share_get(self, mock_session):\n        fake_share = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_share\n        result = db_api.share_get(ctxt,\n                                  'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_shares_update(self, mock_session):\n        shares = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = shares\n        result = db_api.shares_update(ctxt, shares)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_share_update(self, mock_session):\n        shares = [{'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = shares\n        result = db_api.share_update(ctxt,\n                                     'c5c91c98-91aa-40e6-85ac-37a1d3b32bd',\n                                     shares)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_shares_delete(self, mock_session):\n        fake_share = ['c5c91c98-91aa-40e6-85ac-37a1d3b32bd']\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_share\n        result = db_api.shares_delete(ctxt, fake_share)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_shares_create(self, mock_session):\n        fake_share = [models.Volume()]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_share\n        result = db_api.shares_create(ctxt, fake_share)\n        assert len(result) == 1\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_share_create(self, mock_session):\n        fake_share = models.Volume()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_share\n        result = db_api.share_create(ctxt, fake_share)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_share_get_all(self, mock_session):\n        fake_share = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_share\n        result = db_api.share_get_all(ctxt)\n        assert len(result) == 0\n\n        result = db_api.share_get_all(ctxt, filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_access_info_get_all(self, mock_session):\n        fake_access_info = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_access_info\n        result = db_api.access_info_get_all(ctxt)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_access_info_get(self, mock_session):\n        fake_access_info = models.AccessInfo()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_access_info\n        result = db_api.access_info_get(ctxt,\n                                        'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_access_info_create(self, mock_session):\n        fake_access_info = models.AccessInfo()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_access_info\n        result = db_api.access_info_create(ctxt, fake_access_info)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_access_info_update(self, mock_session):\n        fake_access_info = models.AccessInfo()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_access_info\n        result = db_api.access_info_update(\n            ctxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', fake_access_info)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_alert_source_get_all(self, mock_session):\n        fake_alert_source = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_alert_source\n        result = db_api.alert_source_get_all(ctxt)\n        assert len(result) == 0\n\n        result = db_api.alert_source_get_all(ctxt,\n                                             filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_alert_source_update(self, mock_session):\n        fake_alert_source = models.AlertSource()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_alert_source\n        result = db_api.alert_source_update(\n            ctxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', fake_alert_source)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_alert_source_delete(self, mock_session):\n        fake_alert_source = models.AlertSource()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_alert_source\n        result = db_api.alert_source_delete(\n            ctxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_alert_source_create(self, mock_session):\n        fake_alert_source = models.AlertSource()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_alert_source\n        result = db_api.alert_source_create(ctxt, fake_alert_source)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_task_create(self, mock_session):\n        fake_task = models.Task()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_task\n        result = db_api.task_create(context, fake_task)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_task_update(self, mock_session):\n        values = {'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = values\n        result = db_api.task_update(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', values)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_task_get(self, mock_session):\n        fake_task = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_task\n        result = db_api.task_get(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_task_get_all(self, mock_session):\n        fake_task = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_task\n        result = api.task_get_all(context)\n        assert len(result) == 0\n\n        result = db_api.task_get_all(context,\n                                     filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_task_delete(self, mock_session):\n        fake_task = [models.Task().id]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_task\n        result = db_api.task_delete(context, fake_task)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_task_delete_by_storage(self, mock_session):\n        fake_task_storage_id = [models.Task().storage_id]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_task_storage_id\n        result = db_api \\\n            .task_delete_by_storage(context, fake_task_storage_id)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_failed_task_create(self, mock_session):\n        fake_failed_task = models.FailedTask()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_failed_task\n        result = db_api.failed_task_create(context, fake_failed_task)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_failed_task_update(self, mock_session):\n        values = {'id': 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd'}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = values\n        result = db_api.failed_task_update(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd', values)\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_failed_task_get(self, mock_session):\n        fake_failed_task = {}\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_failed_task\n        result = db_api.failed_task_get(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bd')\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_failed_task_get_all(self, mock_session):\n        fake_failed_task = []\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_failed_task\n        result = api.failed_task_get_all(context)\n        assert len(result) == 0\n\n        result = db_api.failed_task_get_all(context,\n                                            filters={'status': 'Normal'})\n        assert len(result) == 0\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_failed_task_delete(self, mock_session):\n        fake_failed_task = [models.FailedTask().id]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_failed_task\n        result = db_api.task_delete(context, fake_failed_task)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_failed_task_delete_by_task_id(self, mock_session):\n        fake_failed_task_id \\\n            = [models.FailedTask().task_id]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_failed_task_id\n        result = db_api \\\n            .failed_task_delete_by_task_id(context,\n                                           fake_failed_task_id)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_failed_task_delete_by_storage(self, mock_session):\n        fake_failed_task_storage_id = [models.FailedTask().storage_id]\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = fake_failed_task_storage_id\n        result = db_api \\\n            .task_delete_by_storage(context, fake_failed_task_storage_id)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_basic_storage_pool_create(self, mock_session):\n        storage_pool_model_lst = fake_data.fake_storage_pool_create()\n        expected = fake_data.fake_expected_storage_pool_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.storage_pools_create(ctxt, storage_pool_model_lst)\n        utils.validate_db_schema_model(got[0], models.StoragePool)\n        utils.validate_db_schema_model(expected[0], models.StoragePool)\n        self.assertDictMatch(got[0], expected[0])\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_unknown_attribute_storage_pool_model_create(self, mock_session):\n        storage_pool_model_lst = fake_data.fake_storage_pool_create()\n        expected = fake_data.fake_expected_storage_pool_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.storage_pools_create(ctxt, storage_pool_model_lst)\n        self.assertRaisesRegex(AssertionError, \"\",\n                               utils.validate_db_schema_model,\n                               got[1], models.StoragePool)\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_initiator_create(self, mock_session):\n        storage_host_initiator_model_lst \\\n            = fake_data.fake_storage_host_initiator_create()\n        expected = fake_data.fake_expected_storage_host_initiator_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.storage_host_initiators_create(\n            ctxt, storage_host_initiator_model_lst)\n        utils.validate_db_schema_model(got[0], models.StorageHostInitiator)\n        utils.validate_db_schema_model(expected[0],\n                                       models.StorageHostInitiator)\n        self.assertDictMatch(got[0], expected[0])\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_initiator_update(self, mock_session):\n        storage_host_initiator_model_lst \\\n            = fake_data.fake_storage_host_initiator_create()\n        expected = fake_data.fake_expected_storage_host_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        result = db_api.storage_host_initiators_update(\n            ctxt, storage_host_initiator_model_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_initiator_delete(self, mock_session):\n        storage_host_initiator_model_lst \\\n            = fake_data.fake_storage_host_initiator_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = storage_host_initiator_model_lst\n        result = db_api.storage_host_initiators_delete(\n            ctxt, storage_host_initiator_model_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_initiator_delete_by_storage(self, mock_session):\n        storage_host_initiator_model_lst \\\n            = fake_data.fake_storage_host_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = storage_host_initiator_model_lst\n        result = db_api.storage_host_initiators_delete_by_storage(\n            ctxt, storage_host_initiator_model_lst[0]['storage_id'])\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_create(self, mock_session):\n        storage_host_model_lst \\\n            = fake_data.fake_storage_host_create()\n        expected = fake_data.fake_expected_storage_host_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.storage_hosts_create(\n            ctxt, storage_host_model_lst)\n        utils.validate_db_schema_model(got[0], models.StorageHost)\n        utils.validate_db_schema_model(expected[0], models.StorageHost)\n        self.assertDictMatch(got[0], expected[0])\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_update(self, mock_session):\n        storage_host_model_lst \\\n            = fake_data.fake_storage_host_create()\n        expected = fake_data.fake_expected_storage_host_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        result = db_api.storage_hosts_update(ctxt, storage_host_model_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_delete(self, mock_session):\n        storage_host_model_lst \\\n            = fake_data.fake_storage_host_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = storage_host_model_lst\n        result = db_api.storage_hosts_delete(ctxt, storage_host_model_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_delete_by_storage(self, mock_session):\n        storage_host_model_lst \\\n            = fake_data.fake_storage_host_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = storage_host_model_lst\n        result = db_api.storage_hosts_delete_by_storage(\n            ctxt, storage_host_model_lst[0]['storage_id'])\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_groups_create(self, mock_session):\n        storage_host_group_lst \\\n            = fake_data.fake_storage_host_group_create()\n        expected = fake_data.fake_expected_storage_host_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.storage_host_groups_create(\n            ctxt, storage_host_group_lst)\n        utils.validate_db_schema_model(got[0], models.StorageHostGroup)\n        utils.validate_db_schema_model(expected[0], models.StorageHostGroup)\n        self.assertDictMatch(got[0], expected[0])\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_group_update(self, mock_session):\n        storage_host_group_lst \\\n            = fake_data.fake_storage_host_group_create()\n        expected = fake_data.fake_expected_storage_host_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        result = db_api.storage_host_groups_update(ctxt,\n                                                   storage_host_group_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_group_delete(self, mock_session):\n        storage_host_group_lst \\\n            = fake_data.fake_storage_host_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = storage_host_group_lst\n        result = db_api.storage_host_groups_delete(ctxt,\n                                                   storage_host_group_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_storage_host_group_delete_by_storage(self, mock_session):\n        storage_host_group_lst \\\n            = fake_data.fake_storage_host_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = storage_host_group_lst\n        result = db_api.storage_host_groups_delete_by_storage(\n            ctxt, storage_host_group_lst[0]['storage_id'])\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_port_groups_create(self, mock_session):\n        port_group_lst \\\n            = fake_data.fake_port_group_create()\n        expected = fake_data.fake_expected_port_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.port_groups_create(\n            ctxt, port_group_lst)\n        utils.validate_db_schema_model(got[0], models.PortGroup)\n        utils.validate_db_schema_model(expected[0], models.PortGroup)\n        self.assertDictMatch(got[0], expected[0])\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_port_group_update(self, mock_session):\n        port_group_lst \\\n            = fake_data.fake_port_group_create()\n        expected = fake_data.fake_expected_port_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        result = db_api.port_groups_update(ctxt, port_group_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_port_group_delete(self, mock_session):\n        port_group_lst \\\n            = fake_data.fake_port_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = port_group_lst\n        result = db_api.port_groups_delete(ctxt, port_group_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_port_group_delete_by_storage(self, mock_session):\n        port_group_lst \\\n            = fake_data.fake_port_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = port_group_lst\n        result = db_api.port_groups_delete_by_storage(\n            ctxt, port_group_lst[0]['storage_id'])\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volume_groups_create(self, mock_session):\n        volume_group_lst \\\n            = fake_data.fake_volume_group_create()\n        expected = fake_data.fake_expected_volume_groups_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.volume_groups_create(\n            ctxt, volume_group_lst)\n        utils.validate_db_schema_model(got[0], models.VolumeGroup)\n        utils.validate_db_schema_model(expected[0], models.VolumeGroup)\n        self.assertDictMatch(got[0], expected[0])\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volume_group_update(self, mock_session):\n        volume_group_lst \\\n            = fake_data.fake_volume_group_create()\n        expected = fake_data.fake_expected_volume_groups_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        result = db_api.volume_groups_update(ctxt, volume_group_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volume_group_delete(self, mock_session):\n        volume_group_lst \\\n            = fake_data.fake_volume_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = volume_group_lst\n        result = db_api.volume_groups_delete(ctxt, volume_group_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_volume_group_delete_by_storage(self, mock_session):\n        volume_group_lst \\\n            = fake_data.fake_volume_group_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = volume_group_lst\n        result = db_api.volume_groups_delete_by_storage(\n            ctxt, volume_group_lst[0]['storage_id'])\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_masking_views_create(self, mock_session):\n        masking_view_lst \\\n            = fake_data.fake_masking_view_create()\n        expected = fake_data.fake_expected_masking_views_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        got = db_api.masking_views_create(\n            ctxt, masking_view_lst)\n        utils.validate_db_schema_model(got[0], models.MaskingView)\n        utils.validate_db_schema_model(expected[0], models.MaskingView)\n        self.assertDictMatch(got[0], expected[0])\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_masking_view_update(self, mock_session):\n        masking_view_lst \\\n            = fake_data.fake_masking_view_create()\n        expected = fake_data.fake_expected_masking_views_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = expected\n        result = db_api.masking_views_update(ctxt, masking_view_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_masking_view_delete(self, mock_session):\n        masking_view_lst \\\n            = fake_data.fake_masking_view_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = masking_view_lst\n        result = db_api.masking_views_delete(ctxt, masking_view_lst)\n        assert result is None\n\n    @mock.patch('delfin.db.sqlalchemy.api.get_session')\n    def test_masking_view_delete_by_storage(self, mock_session):\n        masking_view_lst \\\n            = fake_data.fake_masking_view_create()\n        mock_session.return_value.__enter__.return_value.query.return_value \\\n            = masking_view_lst\n        result = db_api.masking_views_delete_by_storage(\n            ctxt, masking_view_lst[0]['storage_id'])\n        assert result is None\n"
  },
  {
    "path": "delfin/tests/unit/drivers/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/power_store/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/power_store/test_power_store.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport sys\nfrom unittest import mock, TestCase\n\nimport six\n\nfrom delfin import context\nfrom delfin.common import constants\nfrom delfin.drivers.dell_emc.power_store import consts\nfrom delfin.drivers.utils.rest_client import RestClient\n\nsys.modules['delfin.cryptor'] = mock.Mock()\nfrom delfin.drivers.dell_emc.power_store.power_store import PowerStoreDriver\nfrom delfin.drivers.dell_emc.power_store.rest_handler import RestHandler\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"rest\": {\n        \"host\": \"192.168.33.241\",\n        \"port\": 443,\n        \"username\": \"admin\",\n        \"password\": \"password\"\n    }\n}\nclusters = [\n    {\n        \"id\": \"0\",\n        \"global_id\": \"PS0234fd139f29\",\n        \"name\": \"Powerstore1000T\",\n        \"physical_mtu\": 1500,\n        \"master_appliance_id\": \"A1\",\n        \"primary_appliance_id\": \"A1\",\n        \"state\": \"Configured\",\n        \"appliance_count\": 1,\n        \"management_address\": \"192.168.3.241\",\n        \"compatibility_level\": 15,\n        \"system_time\": \"2022-11-23T03:46:47.317Z\",\n        \"state_l10n\": \"Configured\"\n    }\n]\nappliance = [\n    {\n        \"id\": \"A1\",\n        \"name\": \"Powerstore1000T-appliance-1\",\n        \"model\": \"PowerStore 1000T\",\n        \"drive_failure_tolerance_level\": \"Single\",\n        \"service_tag\": \"4YBRFR3\",\n        \"nodes\": [\n            {\n                \"id\": \"N1\"\n            },\n            {\n                \"id\": \"N2\"\n            }\n        ],\n        \"software_installed\": [\n            {\n                \"id\": \"f412271a-987b-47b5-ae8f-46cb895b822b\"\n            }\n        ],\n        \"volumes\": [\n            {\n                \"id\": \"c557beb6-8112-4aa5-8be6-178d34dfc961\"\n            }\n        ]\n    }\n]\nappliance_capacity = [{\n    \"appliance_id\": \"A1\",\n    \"timestamp\": \"2022-11-23T06:05:00Z\",\n    \"logical_provisioned\": 644245094400,\n    \"logical_used\": 0,\n    \"logical_used_volume\": 0,\n    \"logical_used_file_system\": 0,\n    \"logical_used_vvol\": 0,\n    \"shared_logical_used_volume\": 0,\n    \"shared_logical_used_file_system\": 0,\n    \"shared_logical_used_vvol\": 0,\n    \"physical_total\": 6969013934489,\n    \"physical_used\": 5990644187,\n    \"data_physical_used\": 0,\n    \"efficiency_ratio\": 0.0,\n    \"data_reduction\": 0.0,\n    \"snapshot_savings\": 0.0,\n    \"thin_savings\": 0.0,\n    \"shared_logical_used\": 0,\n    \"repeat_count\": 1,\n    \"response_definition\": \"space_metrics_by_appliance\",\n    \"entity\": \"space_metrics_by_appliance\"\n}]\npools_data = [{'name': 'Powerstore1000T-appliance-1', 'storage_id': '12345',\n               'native_storage_pool_id': 'A1', 'status': 'normal',\n               'storage_type': 'block', 'total_capacity': 6969013934489,\n               'used_capacity': 5990644187, 'free_capacity': 6963023290302}]\nnode_info = [\n    {\n        \"appliance_id\": \"A1\",\n        \"id\": \"N1\",\n        \"slot\": 0,\n    },\n    {\n        \"appliance_id\": \"A1\",\n        \"id\": \"N2\",\n        \"slot\": 1,\n    }\n]\nip_pool_address = [\n    {\n        \"id\": \"IP1\",\n        \"name\": \"Default Management Network (192.168.3.241)\",\n        \"address\": \"192.168.3.241\",\n        \"appliance_id\": None,\n        \"node_id\": None,\n        \"purposes\": [\n            \"Mgmt_Cluster_Floating\"\n        ],\n    },\n    {\n        \"id\": \"IP16\",\n        \"name\": \"Default Management Network (192.168.3.245)\",\n        \"address\": \"192.168.3.245\",\n        \"appliance_id\": None,\n        \"node_id\": None,\n        \"purposes\": [\n            \"Unused\"\n        ],\n    },\n    {\n        \"id\": \"IP17\",\n        \"name\": \"Default Management Network (192.168.3.246)\",\n        \"address\": \"192.168.3.246\",\n        \"appliance_id\": None,\n        \"network_id\": \"NW1\",\n        \"node_id\": None,\n        \"purposes\": [\n            \"Unused\"\n        ],\n    },\n    {\n        \"id\": \"IP2\",\n        \"name\": \"Default Management Network (192.168.3.242)\",\n        \"address\": \"192.168.3.242\",\n        \"appliance_id\": \"A1\",\n        \"network_id\": \"NW1\",\n        \"node_id\": None,\n        \"purposes\": [\n            \"Mgmt_Appliance_Floating\"\n        ],\n    },\n    {\n        \"id\": \"IP3\",\n        \"name\": \"Default Management Network (192.168.3.243)\",\n        \"address\": \"192.168.3.243\",\n        \"appliance_id\": \"A1\",\n        \"node_id\": \"N1\",\n        \"purposes\": [\n            \"Mgmt_Node_CoreOS\"\n        ],\n    },\n    {\n        \"id\": \"IP4\",\n        \"name\": \"Default Management Network (192.168.3.244)\",\n        \"address\": \"192.168.3.244\",\n        \"appliance_id\": \"A1\",\n        \"node_id\": \"N2\",\n        \"purposes\": [\n            \"Mgmt_Node_CoreOS\"\n        ],\n    },\n    {\n        \"id\": \"IP8\",\n        \"name\": \"Default ICM Network (fd4e:c5b3:1db3::201:44f0:49e5:21c3)\",\n        \"address\": \"fd4e:c5b3:1db3::201:44f0:49e5:21c3\",\n        \"appliance_id\": \"A1\",\n        \"node_id\": \"N1\",\n        \"purposes\": [\n            \"ICM_Node_CoreOS\"\n        ],\n    }\n]\nhardware_info = [\n    {\n        \"appliance_id\": \"A1\",\n        \"name\": \"Drive_0_0_24\",\n        \"parent_id\": \"c8e719e6d68a4fcc9cb9cf5a808db37f\",\n        \"stale_state\": \"Not_Stale\",\n        \"status_led_state\": \"Off\",\n        \"extra_details\": {\n            \"firmware_version\": \"3.1.11.0\",\n            \"drive_type\": \"NVMe_NVRAM\",\n            \"size\": 8484552704,\n            \"encryption_status\": \"Disabled\",\n            \"fips_status\": \"FIPS_Compliance_None\"\n        },\n        \"id\": \"0bd7eaefeaf141b7b1c041aa439e65d9\",\n        \"is_marked\": False,\n        \"lifecycle_state\": \"Healthy\",\n        \"part_number\": \"005053655\",\n        \"serial_number\": \"1A48F300ADB\",\n        \"slot\": 24,\n        \"type\": \"Drive\",\n        \"appliance\": {\n            \"id\": \"A1\"\n        },\n        \"children\": [],\n        \"parent\": {\n            \"id\": \"c8e719e6d68a4fcc9cb9cf5a808db37f\"\n        },\n        \"hardware_parent_eth_ports\": [],\n        \"hardware_parent_fc_ports\": [],\n        \"hardware_parent_sas_ports\": [],\n        \"io_module_eth_ports\": []\n    }, {\n        \"appliance_id\": \"A1\",\n        \"name\": \"Drive_0_0_4\",\n        \"parent_id\": \"c8e719e6d68a4fcc9cb9cf5a808db37f\",\n        \"stale_state\": \"Not_Stale\",\n        \"status_led_state\": \"Off\",\n        \"extra_details\": {\n            \"firmware_version\": \"VPV1ET0K\",\n            \"drive_type\": \"NVMe_SSD\",\n            \"size\": 1920383410176,\n            \"encryption_status\": \"Disabled\",\n            \"fips_status\": \"FIPS_Compliance_None\"\n        },\n        \"id\": \"3dd4393290cc4665bb0649cb06fbc8ea\",\n        \"is_marked\": False,\n        \"lifecycle_state\": \"Healthy\",\n        \"part_number\": \"005052920\",\n        \"serial_number\": \"PHLP040101AK2P0A\",\n        \"slot\": 4,\n        \"type\": \"Drive\",\n        \"appliance\": {\n            \"id\": \"A1\"\n        },\n        \"children\": [],\n        \"parent\": {\n            \"id\": \"c8e719e6d68a4fcc9cb9cf5a808db37f\"\n        },\n        \"hardware_parent_eth_ports\": [],\n        \"hardware_parent_fc_ports\": [],\n        \"hardware_parent_sas_ports\": [],\n        \"io_module_eth_ports\": []\n    }, {\n        \"appliance_id\": \"A1\",\n        \"name\": \"BaseEnclosure-NodeA\",\n        \"parent_id\": \"c8e719e6d68a4fcc9cb9cf5a808db37f\",\n        \"stale_state\": \"Not_Stale\",\n        \"status_led_state\": \"Off\",\n        \"extra_details\": {\n            \"physical_memory_size_gb\": 192,\n            \"cpu_model\": \"Intel(R) Xeon(R) Silver 4108 CPU @ 1.80GHz\",\n            \"cpu_cores\": 8,\n            \"cpu_sockets\": 2\n        },\n        \"id\": \"3f75d31ca1dc4d8fb95707c7f996a063\",\n        \"is_marked\": False,\n        \"lifecycle_state\": \"Healthy\",\n        \"part_number\": \"110-558-301A-00\",\n        \"serial_number\": \"FXTSP221300674\",\n        \"slot\": 0,\n        \"type\": \"Node\",\n        \"appliance\": {\n            \"id\": \"A1\"\n        },\n        \"children\": [\n            {\n                \"id\": \"8ae8e0207f9b4145ab3acaf6963941db\"\n            }\n        ],\n        \"parent\": {\n            \"id\": \"c8e719e6d68a4fcc9cb9cf5a808db37f\"\n        },\n        \"hardware_parent_eth_ports\": [],\n        \"hardware_parent_fc_ports\": [],\n        \"hardware_parent_sas_ports\": [],\n        \"io_module_eth_ports\": []\n    }, {\n        \"appliance_id\": \"A1\",\n        \"name\": \"BaseEnclosure-NodeB\",\n        \"parent_id\": \"c8e719e6d68a4fcc9cb9cf5a808db37f\",\n        \"stale_state\": \"Not_Stale\",\n        \"status_led_state\": \"Off\",\n        \"extra_details\": {\n            \"physical_memory_size_gb\": 192,\n            \"cpu_model\": \"Intel(R) Xeon(R) Silver 4108 CPU @ 1.80GHz\",\n            \"cpu_cores\": 8,\n            \"cpu_sockets\": 2\n        },\n        \"id\": \"5c93d9044ea94afea7d0ac853b89886c\",\n        \"is_marked\": False,\n        \"lifecycle_state\": \"Healthy\",\n        \"part_number\": \"110-558-301A-00\",\n        \"serial_number\": \"FXTSP221300970\",\n        \"slot\": 1,\n        \"type\": \"Node\",\n        \"appliance\": {\n            \"id\": \"A1\"\n        },\n        \"children\": [\n            {\n                \"id\": \"ef01e2005bcd4f9a8af4fd076a49e34c\"\n            }\n        ],\n        \"parent\": {\n            \"id\": \"c8e719e6d68a4fcc9cb9cf5a808db37f\"\n        },\n        \"hardware_parent_eth_ports\": [],\n        \"hardware_parent_fc_ports\": [],\n        \"hardware_parent_sas_ports\": [],\n        \"io_module_eth_ports\": []\n    },\n]\ndisk_data = [{'name': 'Drive_0_0_4', 'storage_id': '12345',\n              'native_disk_id': '3dd4393290cc4665bb0649cb06fbc8ea',\n              'serial_number': 'PHLP040101AK2P0A', 'manufacturer': 'DELL EMC',\n              'firmware': 'VPV1ET0K', 'capacity': 1920383410176,\n              'status': 'normal', 'physical_type': 'nvme-ssd',\n              'logical_type': 'unknown', 'location': '4'}]\nsoftware_installed = [\n    {\n        \"id\": \"f412271a-987b-47b5-ae8f-46cb895b822b\",\n        \"build_version\": \"2.1.1.1\",\n        \"release_version\": \"2.1.1.1\",\n        \"appliance\": {\n            \"id\": \"A1\"\n        }\n    },\n    {\n        \"id\": \"f9c0b631-a14f-4d1b-bb28-831de6e78242\",\n        \"build_version\": \"2.1.1.1\",\n        \"release_version\": \"2.1.1.1\",\n        \"appliance\": None\n    }\n]\nstorage_data = {'model': 'PowerStore 1000T', 'total_capacity': 6969013934489,\n                'raw_capacity': 1920383410176, 'used_capacity': 5990644187,\n                'free_capacity': 6963023290302, 'vendor': 'DELL EMC',\n                'name': 'Powerstore1000T', 'serial_number': 'PS0234fd139f29',\n                'firmware_version': '2.1.1.1', 'status': 'normal'}\nvolume_info = [{\n    \"app_type\": \"Business_Applications_ERP_SAP\",\n    \"app_type_l10n\": \"ERP / SAP\",\n    \"appliance_id\": \"A1\",\n    \"description\": \"什么 都不是\",\n    \"state\": \"Ready\",\n    \"type\": \"Primary\",\n    \"wwn\": \"naa.68ccf0980048d7ab86ec9f7fdfa9945d\",\n    \"size\": 3221225472,\n    \"name\": \"wu-003\",\n    \"id\": \"022ece9c-4921-46ba-ba4f-91c167a90cbe\",\n    \"appliance\": {\n        \"id\": \"A1\"\n    }}, {\n    \"app_type\": \"Business_Applications_ERP_SAP\",\n    \"app_type_l10n\": \"ERP / SAP\",\n    \"appliance_id\": \"A1\",\n    \"description\": \"什么 都不是\",\n    \"state\": \"Ready\",\n    \"type\": \"Snapshot\",\n    \"wwn\": \"naa.68ccf0980084d02c7649966d51f07a2d\",\n    \"size\": 3221225472,\n    \"name\": \"wu-013\",\n    \"id\": \"030c4053-ccd5-40e7-a96a-5d32ea507468\",\n    \"appliance\": {\n        \"id\": \"A1\"\n    }},\n]\nvolume_generate = [\n    {\n        \"volume_id\": \"022ece9c-4921-46ba-ba4f-91c167a90cbe\",\n        \"appliance_id\": \"A1\",\n        \"timestamp\": \"2022-11-22T06:55:00Z\",\n        \"logical_provisioned\": 3221225472,\n        \"logical_used\": 0,\n        \"thin_savings\": 0.0,\n        \"repeat_count\": 288,\n        \"response_definition\": \"space_metrics_by_volume\",\n        \"entity\": \"space_metrics_by_volume\"\n    }\n]\nvolume_data = [{'name': 'wu-003', 'storage_id': '12345',\n                'description': '什么 都不是', 'status': 'normal',\n                'native_volume_id': '022ece9c-4921-46ba-ba4f-91c167a90cbe',\n                'native_storage_pool_id': 'A1',\n                'wwn': 'naa.68ccf0980048d7ab86ec9f7fdfa9945d', 'type': 'thin',\n                'total_capacity': 3221225472, 'used_capacity': 0,\n                'free_capacity': 3221225472}]\nalerts_info = [\n    {\n        \"id\": \"0032b92b-e0bc-4259-b572-db562238b4b4\",\n        \"description_l10n\": \"Management ports are properly connected to\"\n                            \" different management switches.\",\n        \"severity\": \"Info\",\n        \"resource_name\": \"BaseEnclosure-NodeB\",\n        \"resource_type\": \"hardware\",\n        \"acknowledged_timestamp\": None,\n        \"generated_timestamp\": \"2022-11-09T06:58:14.479496+00:00\",\n        \"cleared_timestamp\": \"2022-11-09T06:58:14.479496+00:00\",\n        \"resource_id\": \"5c93d9044ea94afea7d0ac853b89886c\",\n        \"state\": \"ACTIVE\",\n        \"raised_timestamp\": \"2022-11-06T06:18:14.446541+00:00\",\n        \"email_sent_timestamp\": None,\n        \"called_home_timestamp\": None,\n        \"is_acknowledged\": False,\n        \"snmp_sent_timestamp\": None\n    }, {\n        \"id\": \"0032b92b-e0bc-4259-b572-db562238b4b5\",\n        \"description_l10n\": \"Management ports are properly connected to\"\n                            \" different management switches.\",\n        \"severity\": \"Info\",\n        \"resource_name\": \"BaseEnclosure-NodeA\",\n        \"resource_type\": \"hardware\",\n        \"acknowledged_timestamp\": None,\n        \"generated_timestamp\": \"2022-11-09T06:58:14.479496+00:00\",\n        \"cleared_timestamp\": \"2022-11-09T06:58:14.479496+00:00\",\n        \"resource_id\": \"5c93d9044ea94afea7d0ac853b89886c\",\n        \"state\": \"CLEARED\",\n        \"raised_timestamp\": \"2022-11-06T06:18:14.446541+00:00\",\n        \"email_sent_timestamp\": None,\n        \"called_home_timestamp\": None,\n        \"is_acknowledged\": False,\n        \"snmp_sent_timestamp\": None\n    }\n]\nalerts_data = [{\n    'alert_id': '0032b92b-e0bc-4259-b572-db562238b4b4',\n    'occur_time': 1667715494446, 'severity': 'Informational',\n    'category': 'Fault', 'location': 'hardware:BaseEnclosure-NodeB',\n    'type': 'EquipmentAlarm', 'resource_type': 'hardware',\n    'alert_name': 'Management ports are properly connected to different '\n                  'management switches.',\n    'match_key': '0042968d19d8788229f78abb4f842121',\n    'description': 'Management ports are properly connected to different '\n                   'management switches.'}]\nsnmp_alert_data = {\n    'alert_id': 'b89d0e0a9cec32fc20a21b05071c9d5e',\n    'occur_time': 1667708609278, 'severity': 'Major',\n    'category': 'Fault', 'location': 'appliance:Powerstore1000T-appliance-1',\n    'type': 'EquipmentAlarm', 'resource_type': 'appliance',\n    'alert_name': 'All configured DNS servers are unavailable.',\n    'match_key': 'b89d0e0a9cec32fc20a21b05071c9d5e',\n    'description': 'All configured DNS servers are unavailable.'\n}\ncontrollers_data = [\n    {'name': 'NodeA', 'storage_id': '12345',\n     'native_controller_id': '3f75d31ca1dc4d8fb95707c7f996a063',\n     'status': 'normal', 'location': 'NodeA:Slot-0',\n     'mgmt_ip': '192.168.3.243',\n     'cpu_info': 'Intel(R) Xeon(R) Silver 4108 CPU @ 1.80GHz',\n     'cpu_count': 1, 'memory_size': 206158430208},\n    {'name': 'NodeB', 'storage_id': '12345',\n     'native_controller_id': '5c93d9044ea94afea7d0ac853b89886c',\n     'status': 'normal', 'location': 'NodeB:Slot-1',\n     'mgmt_ip': '192.168.3.244',\n     'cpu_info': 'Intel(R) Xeon(R) Silver 4108 CPU @ 1.80GHz',\n     'cpu_count': 1, 'memory_size': 206158430208}]\nalert_sources_data = [{'host': '192.168.3.243'}, {'host': '192.168.3.244'}]\nfc_info = [{\n    \"appliance_id\": \"A1\",\n    \"current_speed\": \"16_Gbps\",\n    \"id\": \"090d75723f4147808fd5624582708381\",\n    \"is_link_up\": False,\n    \"name\": \"BaseEnclosure-NodeA-IoModule0-FEPort2\",\n    \"partner_id\": \"ed8e44cab9484f8c92d488a12e5bc6d8\",\n    \"port_connector_type\": \"LC\",\n    \"supported_speeds\": [\n        \"Auto\",\n        \"8_Gbps\",\n        \"16_Gbps\"\n    ],\n    \"wwn\": \"58:cc:f0:90:4d:22:19:fb\",\n    \"stale_state\": \"Not_Stale\",\n    \"node_id\": \"3f75d31ca1dc4d8fb95707c7f996a063\",\n    \"sfp_id\": \"343b016544744268b383294867424252\",\n}, {\n    \"appliance_id\": \"A1\",\n    \"current_speed\": None,\n    \"id\": \"210d046f161b4c2ebbcfd3090d706370\",\n    \"is_link_up\": False,\n    \"name\": \"BaseEnclosure-NodeA-IoModule0-FEPort0\",\n    \"partner_id\": \"78d83f317dc446a3992b24e7cbe4d9bc\",\n    \"port_connector_type\": \"LC\",\n    \"supported_speeds\": [\n        \"Auto\",\n        \"8_Gbps\",\n        \"16_Gbps\"\n    ],\n    \"wwn\": \"58:cc:f0:90:4d:20:19:fb\",\n    \"stale_state\": \"Not_Stale\",\n    \"node_id\": \"3f75d31ca1dc4d8fb95707c7f996a063\",\n    \"sfp_id\": \"16416450ec514ca089cdf83764b1cfec\",\n}]\nhardware_port_info = [{\n    \"appliance_id\": \"A1\",\n    \"name\": \"BaseEnclosure-NodeB-IoModule0-SFP2\",\n    \"parent_id\": \"ffc6bc05bcd84d7095d380d72c88fc0b\",\n    \"stale_state\": \"Not_Stale\",\n    \"status_led_state\": None,\n    \"extra_details\": {\n        \"mode\": \"Multi_Mode\",\n        \"supported_protocol\": \"FC\",\n        \"connector_type\": \"LC\",\n        \"supported_speeds\": [\n            \"4_Gbps\",\n            \"8_Gbps\",\n            \"16_Gbps\"\n        ]\n    },\n    \"id\": \"343b016544744268b383294867424252\",\n    \"is_marked\": None,\n    \"lifecycle_state\": \"Healthy\",\n    \"part_number\": \"019-078-045\",\n    \"serial_number\": \"P66DEY1         \",\n    \"slot\": 2,\n    \"type\": \"SFP\",\n    \"appliance\": {\n        \"id\": \"A1\"\n    },\n    \"children\": [],\n    \"parent\": {\n        \"id\": \"ffc6bc05bcd84d7095d380d72c88fc0b\"\n    },\n    \"hardware_parent_eth_ports\": [],\n    \"hardware_parent_fc_ports\": [],\n    \"hardware_parent_sas_ports\": [],\n    \"io_module_eth_ports\": []\n}, {\n    \"appliance_id\": \"A1\",\n    \"name\": \"BaseEnclosure-NodeA-IoModule0-SFP0\",\n    \"parent_id\": \"2f44d24b4332475d90edab9d316c6d9b\",\n    \"stale_state\": \"Not_Stale\",\n    \"status_led_state\": None,\n    \"extra_details\": {\n        \"mode\": \"Multi_Mode\",\n        \"supported_protocol\": \"FC\",\n        \"connector_type\": \"LC\",\n        \"supported_speeds\": [\n            \"4_Gbps\",\n            \"8_Gbps\",\n            \"16_Gbps\"\n        ]\n    },\n    \"id\": \"16416450ec514ca089cdf83764b1cfec\",\n    \"is_marked\": None,\n    \"lifecycle_state\": \"Healthy\",\n    \"part_number\": \"019-078-045\",\n    \"serial_number\": \"P66DD3H         \",\n    \"slot\": 0,\n    \"type\": \"SFP\",\n    \"appliance\": {\n        \"id\": \"A1\"\n    },\n    \"children\": [],\n    \"parent\": {\n        \"id\": \"2f44d24b4332475d90edab9d316c6d9b\"\n    },\n    \"hardware_parent_eth_ports\": [],\n    \"hardware_parent_fc_ports\": [],\n    \"hardware_parent_sas_ports\": [],\n    \"io_module_eth_ports\": []\n}, {\n    \"appliance_id\": \"A1\",\n    \"name\": \"BaseEnclosure-NodeA-4PortCard-SFP1\",\n    \"parent_id\": \"fd5cb1c32a434ecbba63f79060fa605b\",\n    \"stale_state\": \"Not_Stale\",\n    \"status_led_state\": None,\n    \"extra_details\": {\n        \"mode\": \"Unknown\",\n        \"supported_protocol\": \"Unknown\",\n        \"connector_type\": \"Unknown\",\n        \"supported_speeds\": []\n    },\n    \"id\": \"e387bf548a8b45a1831982857407adb1\",\n    \"is_marked\": None,\n    \"lifecycle_state\": \"Empty\",\n    \"part_number\": None,\n    \"serial_number\": None,\n    \"slot\": 1,\n    \"type\": \"SFP\",\n    \"appliance\": {\n        \"id\": \"A1\"\n    },\n    \"children\": [],\n    \"parent\": {\n        \"id\": \"fd5cb1c32a434ecbba63f79060fa605b\"\n    },\n    \"hardware_parent_eth_ports\": [],\n    \"hardware_parent_fc_ports\": [],\n    \"hardware_parent_sas_ports\": [],\n    \"io_module_eth_ports\": []\n}, {\n    \"appliance_id\": \"A1\",\n    \"name\": \"BaseEnclosure-NodeA-EmbeddedModule-SFP0\",\n    \"parent_id\": \"9ba194e115454c3db22a7da88164379e\",\n    \"stale_state\": \"Not_Stale\",\n    \"status_led_state\": None,\n    \"extra_details\": {\n        \"mode\": \"Unknown\",\n        \"supported_protocol\": \"Unknown\",\n        \"connector_type\": \"Unknown\",\n        \"supported_speeds\": []\n    },\n    \"id\": \"66696ff8755b4e8ca8c853021b7e668f\",\n    \"is_marked\": None,\n    \"lifecycle_state\": \"Empty\",\n    \"part_number\": None,\n    \"serial_number\": None,\n    \"slot\": 0,\n    \"type\": \"SFP\",\n    \"appliance\": {\n        \"id\": \"A1\"\n    },\n    \"children\": [],\n    \"parent\": {\n        \"id\": \"9ba194e115454c3db22a7da88164379e\"\n    },\n    \"hardware_parent_eth_ports\": [],\n    \"hardware_parent_fc_ports\": [],\n    \"hardware_parent_sas_ports\": [],\n    \"io_module_eth_ports\": []\n}, {\n    \"appliance_id\": \"A1\",\n    \"name\": \"BaseEnclosure-NodeB-EmbeddedModule-SFP0\",\n    \"parent_id\": \"0585c10b69de44258bee4a23ec91c601\",\n    \"stale_state\": \"Not_Stale\",\n    \"status_led_state\": None,\n    \"extra_details\": {\n        \"mode\": \"Unknown\",\n        \"supported_protocol\": \"Unknown\",\n        \"connector_type\": \"Unknown\",\n        \"supported_speeds\": []\n    },\n    \"id\": \"ea2b7ebba6784d35bfd123e255078947\",\n    \"is_marked\": None,\n    \"lifecycle_state\": \"Empty\",\n    \"part_number\": None,\n    \"serial_number\": None,\n    \"slot\": 0,\n    \"type\": \"SFP\",\n    \"appliance\": {\n        \"id\": \"A1\"\n    },\n    \"children\": [],\n    \"parent\": {\n        \"id\": \"0585c10b69de44258bee4a23ec91c601\"\n    },\n    \"hardware_parent_eth_ports\": [],\n    \"hardware_parent_fc_ports\": [],\n    \"hardware_parent_sas_ports\": [],\n    \"io_module_eth_ports\": []\n}]\nperf_fc_info = [{\n    \"appliance_id\": \"A1\",\n    \"current_speed\": None,\n    \"id\": \"090d75723f4147808fd5624582708381\",\n    \"is_link_up\": False,\n    \"name\": \"BaseEnclosure-NodeA-IoModule0-FEPort2\",\n    \"partner_id\": \"ed8e44cab9484f8c92d488a12e5bc6d8\",\n    \"port_connector_type\": \"LC\",\n    \"supported_speeds\": [\n        \"Auto\",\n        \"8_Gbps\",\n        \"16_Gbps\"\n    ],\n    \"wwn\": \"58:cc:f0:90:4d:22:19:fb\",\n    \"stale_state\": \"Not_Stale\",\n    \"node_id\": \"3f75d31ca1dc4d8fb95707c7f996a063\",\n}]\neth_info = [{\n    \"id\": \"0133e5496e6b4671b37bb2fc94be1a25\",\n    \"name\": \"BaseEnclosure-NodeA-4PortCard-FEPort1\",\n    \"appliance_id\": \"A1\",\n    \"current_mtu\": 1500,\n    \"current_speed\": None,\n    \"hardware_parent_id\": \"fd5cb1c32a434ecbba63f79060fa605b\",\n    \"is_link_up\": False,\n    \"mac_address\": \"0c48c6c9d455\",\n    \"node_id\": \"3f75d31ca1dc4d8fb95707c7f996a063\",\n    \"partner_id\": \"dddae67b03fc49b796bc27cb23932c84\",\n    \"sfp_id\": \"e387bf548a8b45a1831982857407adb1\",\n    \"port_connector_type\": \"Unknown\",\n    \"stale_state\": \"Not_Stale\",\n    \"supported_speeds\": [\n        \"Auto\"\n    ],\n}, {\n    \"id\": \"0235963690ab404c8e6a3485d5a58198\",\n    \"name\": \"BaseEnclosure-NodeB-EmbeddedModule-ServicePort\",\n    \"appliance_id\": \"A1\",\n    \"current_mtu\": 1500,\n    \"current_speed\": None,\n    \"hardware_parent_id\": \"0585c10b69de44258bee4a23ec91c601\",\n    \"is_link_up\": False,\n    \"mac_address\": \"006016d7a783\",\n    \"node_id\": \"5c93d9044ea94afea7d0ac853b89886c\",\n    \"sfp_id\": None,\n    \"partner_id\": \"de32885d015e4922a571db515a1b8659\",\n    \"port_connector_type\": \"RJ45\",\n    \"port_index\": 1,\n    \"stale_state\": \"Not_Stale\",\n    \"supported_speeds\": [\n        \"Auto\",\n        \"10_Mbps\",\n        \"100_Mbps\",\n        \"1_Gbps\"\n    ],\n}]\nsas_info = [\n    {\n        \"appliance_id\": \"A1\",\n        \"id\": \"7774e11064704ccc9b90d1d2c3c7da2c\",\n        \"is_link_up\": False,\n        \"name\": \"BaseEnclosure-NodeA-EmbeddedModule-BEPort0\",\n        \"partner_id\": \"87f9ddbef894406aa7c43501f3d6a008\",\n        \"speed\": None,\n        \"node_id\": \"3f75d31ca1dc4d8fb95707c7f996a063\",\n        \"sfp_id\": \"66696ff8755b4e8ca8c853021b7e668f\"\n    },\n    {\n        \"appliance_id\": \"A1\",\n        \"id\": \"87f9ddbef894406aa7c43501f3d6a008\",\n        \"is_link_up\": False,\n        \"name\": \"BaseEnclosure-NodeB-EmbeddedModule-BEPort0\",\n        \"partner_id\": \"7774e11064704ccc9b90d1d2c3c7da2c\",\n        \"speed\": None,\n        \"node_id\": \"5c93d9044ea94afea7d0ac853b89886c\",\n        \"sfp_id\": \"ea2b7ebba6784d35bfd123e255078947\"\n    }]\nports_data = [\n    {'name': 'BaseEnclosure-NodeA-IoModule0-FEPort2', 'storage_id': '12345',\n     'native_port_id': '090d75723f4147808fd5624582708381',\n     'location': 'Powerstore1000T-appliance-1:BaseEnclosure-'\n                 'NodeA-IoModule0-FEPort2',\n     'connection_status': 'disconnected', 'health_status': 'normal',\n     'type': 'fc', 'speed': 16000000000, 'max_speed': 16000000000,\n     'native_parent_id': '3f75d31ca1dc4d8fb95707c7f996a063',\n     'wwn': '58:cc:f0:90:4d:22:19:fb'},\n    {'name': 'BaseEnclosure-NodeA-IoModule0-FEPort0', 'storage_id': '12345',\n     'native_port_id': '210d046f161b4c2ebbcfd3090d706370',\n     'location': 'Powerstore1000T-appliance-1:BaseEnclosure-'\n                 'NodeA-IoModule0-FEPort0',\n     'connection_status': 'disconnected', 'health_status': 'normal',\n     'type': 'fc', 'speed': None, 'max_speed': 16000000000,\n     'native_parent_id': '3f75d31ca1dc4d8fb95707c7f996a063',\n     'wwn': '58:cc:f0:90:4d:20:19:fb'},\n    {'name': 'BaseEnclosure-NodeA-4PortCard-FEPort1', 'storage_id': '12345',\n     'native_port_id': '0133e5496e6b4671b37bb2fc94be1a25',\n     'location': 'Powerstore1000T-appliance-1:BaseEnclosure-'\n                 'NodeA-4PortCard-FEPort1',\n     'connection_status': 'disconnected', 'health_status': 'unknown',\n     'type': 'eth', 'speed': None, 'max_speed': None,\n     'native_parent_id': '3f75d31ca1dc4d8fb95707c7f996a063',\n     'mac_address': '0c48c6c9d455'},\n    {'name': 'BaseEnclosure-NodeB-EmbeddedModule-ServicePort',\n     'storage_id': '12345',\n     'native_port_id': '0235963690ab404c8e6a3485d5a58198',\n     'location': 'Powerstore1000T-appliance-1:BaseEnclosure-NodeB-'\n                 'EmbeddedModule-ServicePort',\n     'connection_status': 'disconnected', 'health_status': 'unknown',\n     'type': 'eth', 'speed': None, 'max_speed': 1000000000,\n     'native_parent_id': '5c93d9044ea94afea7d0ac853b89886c',\n     'mac_address': '006016d7a783'},\n    {'name': 'BaseEnclosure-NodeA-EmbeddedModule-BEPort0',\n     'storage_id': '12345',\n     'native_port_id': '7774e11064704ccc9b90d1d2c3c7da2c',\n     'location': 'Powerstore1000T-appliance-1:BaseEnclosure-NodeA-'\n                 'EmbeddedModule-BEPort0',\n     'connection_status': 'disconnected', 'health_status': 'unknown',\n     'type': 'sas', 'speed': None,\n     'native_parent_id': '3f75d31ca1dc4d8fb95707c7f996a063'},\n    {'name': 'BaseEnclosure-NodeB-EmbeddedModule-BEPort0',\n     'storage_id': '12345',\n     'native_port_id': '87f9ddbef894406aa7c43501f3d6a008',\n     'location': 'Powerstore1000T-appliance-1:BaseEnclosure-NodeB-'\n                 'EmbeddedModule-BEPort0',\n     'connection_status': 'disconnected', 'health_status': 'unknown',\n     'type': 'sas', 'speed': None,\n     'native_parent_id': '5c93d9044ea94afea7d0ac853b89886c'}]\nalert = {\n    '1.3.6.1.2.1.1.3.0': '1669372584',\n    '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1139.205.1.2.2',\n    '1.3.6.1.4.1.1139.205.1.1.1': '0x01800103',\n    '1.3.6.1.4.1.1139.205.1.1.2':\n        'All configured DNS servers are unavailable.',\n    '1.3.6.1.4.1.1139.205.1.1.3':\n        'DNS servers availability status. (fully_unavailable)',\n    '1.3.6.1.4.1.1139.205.1.1.4': 'appliance',\n    '1.3.6.1.4.1.1139.205.1.1.5': 'A1',\n    '1.3.6.1.4.1.1139.205.1.1.6': 'Powerstore1000T-appliance-1',\n    '1.3.6.1.4.1.1139.205.1.1.7': 'ACTIVE',\n    '1.3.6.1.4.1.1139.205.1.1.10': '2022-11-06T04:23:29.278Z',\n    '1.3.6.1.4.1.1139.205.1.1.11': '',\n    '1.3.6.1.4.1.1139.205.1.1.8': 'A1',\n    '1.3.6.1.4.1.1139.205.1.1.9': '2022-11-25T10:36:16.822Z',\n    'transport_address': '192.168.3.241',\n    'storage_id': '38825735-0c48-481f-9551-95076950eebf',\n    'controller_name': 'NodeA'\n}\nresource_metrics = {\n    constants.ResourceType.STORAGE: consts.STORAGE_CAP,\n    constants.ResourceType.STORAGE_POOL: consts.STORAGE_POOL_CAP,\n    constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n    constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP,\n    constants.ResourceType.PORT: consts.PORT_CAP\n}\nhost_info = [\n    {\n        \"id\": \"52c74385-b5a1-4af0-b9c6-18c47a04e27d\",\n        \"name\": \"hg02\",\n        \"host_initiators\": [\n            {\n                \"port_name\": \"11:00:22:a4:24:b5:32:25\",\n                \"port_type\": \"FC\",\n                \"active_sessions\": [],\n                \"chap_mutual_username\": None,\n                \"chap_single_username\": None\n            }\n        ],\n        \"os_type\": \"HP-UX\",\n        \"description\": \"hp\",\n        \"host_group_id\": None,\n        \"type\": \"External\",\n        \"host_virtual_volume_mappings\": [],\n        \"mapped_hosts\": [\n            {\n                \"id\": \"36cb8bf8-f9c1-4592-a7fd-fed5b0e29885\"\n            },\n            {\n                \"id\": \"7ea3ce7d-9d0f-4bc2-a207-b9283dcacb81\"\n            },\n            {\n                \"id\": \"db678691-7bfa-49e4-81a1-53dfb2188f8a\"\n            }\n        ]\n    },\n    {\n        \"id\": \"aa0793ff-5ee5-4593-8fdd-c28a58f7cf4f\",\n        \"name\": \"host01\",\n        \"host_initiators\": [\n            {\n                \"port_name\": \"iqn.2001-05.com.exampld:name2\",\n                \"port_type\": \"iSCSI\",\n                \"active_sessions\": [],\n                \"chap_mutual_username\": None,\n                \"chap_single_username\": None\n            },\n            {\n                \"port_name\": \"iqn.2001-05.com.exampld:name1\",\n                \"port_type\": \"iSCSI\",\n                \"active_sessions\": [],\n                \"chap_mutual_username\": None,\n                \"chap_single_username\": None\n            }\n        ],\n        \"os_type\": \"ESXi\",\n        \"description\": \"host\",\n        \"host_group_id\": \"ea01240f-4692-44f4-817b-924efd2c8519\",\n        \"type\": \"External\",\n        \"mapped_hosts\": []\n    }\n]\ninitiators_info = [\n    {'port_name': '11:00:22:a4:24:b5:32:25',\n     'id': '11:00:22:a4:24:b5:32:25',\n     'host_id': '52c74385-b5a1-4af0-b9c6-18c47a04e27d',\n     'port_type': 'FC'\n     },\n    {'port_name': 'iqn.2001-05.com.exampld:name2',\n     'id': 'iqn.2001-05.com.exampld:name2',\n     'host_id': 'aa0793ff-5ee5-4593-8fdd-c28a58f7cf4f',\n     'port_type': 'iSCSI'\n     }\n]\ninitiators_data = [\n    {'native_storage_host_initiator_id': '11:00:22:a4:24:b5:32:25',\n     'native_storage_host_id': '52c74385-b5a1-4af0-b9c6-18c47a04e27d',\n     'name': '11:00:22:a4:24:b5:32:25', 'type': 'fc', 'status': 'unknown',\n     'wwn': '11:00:22:a4:24:b5:32:25', 'storage_id': '12345'}, {\n        'native_storage_host_initiator_id': 'iqn.2001-05.com.exampld:name2',\n        'native_storage_host_id': 'aa0793ff-5ee5-4593-8fdd-c28a58f7cf4f',\n        'name': 'iqn.2001-05.com.exampld:name2', 'type': 'iscsi',\n        'status': 'unknown', 'wwn': 'iqn.2001-05.com.exampld:name2',\n        'storage_id': '12345'}, {\n        'native_storage_host_initiator_id': 'iqn.2001-05.com.exampld:name1',\n        'native_storage_host_id': 'aa0793ff-5ee5-4593-8fdd-c28a58f7cf4f',\n        'name': 'iqn.2001-05.com.exampld:name1', 'type': 'iscsi',\n        'status': 'unknown', 'wwn': 'iqn.2001-05.com.exampld:name1',\n        'storage_id': '12345'}]\ninitiators_upgrade_data = [\n    {'native_storage_host_initiator_id': '11:00:22:a4:24:b5:32:25',\n     'native_storage_host_id': '52c74385-b5a1-4af0-b9c6-18c47a04e27d',\n     'name': '11:00:22:a4:24:b5:32:25', 'type': 'fc', 'status': 'unknown',\n     'wwn': '11:00:22:a4:24:b5:32:25', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': 'iqn.2001-05.com.exampld:name2',\n     'native_storage_host_id': 'aa0793ff-5ee5-4593-8fdd-c28a58f7cf4f',\n     'name': 'iqn.2001-05.com.exampld:name2', 'type': 'iscsi',\n     'status': 'unknown', 'wwn': 'iqn.2001-05.com.exampld:name2',\n     'storage_id': '12345'}]\nhost_data = [{'name': 'hg02', 'storage_id': '12345',\n              'native_storage_host_id': '52c74385-b5a1-4af0-b9c6-18c47a04e27d',\n              'description': 'hp', 'os_type': 'HP-UX', 'status': 'normal'},\n             {'name': 'host01', 'storage_id': '12345',\n              'native_storage_host_id': 'aa0793ff-5ee5-4593-8fdd-c28a58f7cf4f',\n              'description': 'host', 'os_type': 'VMware ESX',\n              'status': 'normal'}]\nhost_group_info = [\n    {\n        \"id\": \"ea01240f-4692-44f4-817b-924efd2c8519\",\n        \"name\": \"hg01\",\n        \"description\": \"hg\",\n        \"hosts\": [\n            {\n                \"id\": \"aa0793ff-5ee5-4593-8fdd-c28a58f7cf4f\"\n            }\n        ]\n    }\n]\nhost_group_data = {\n    'storage_host_groups': [\n        {\n            'native_storage_host_group_id':\n                'ea01240f-4692-44f4-817b-924efd2c8519',\n            'name': 'hg01', 'description': 'hg', 'storage_id': '12345'\n        }],\n    'storage_host_grp_host_rels': [{\n        'native_storage_host_group_id': 'ea01240f-4692-44f4-817b-924efd2c8519',\n        'storage_id': '12345',\n        'native_storage_host_id': 'aa0793ff-5ee5-4593-8fdd-c28a58f7cf4f'}\n    ]\n}\nvolume_groups_info = [\n    {\n        \"description\": \"null_volume_g\",\n        \"name\": \"null_vg\",\n        \"id\": \"0d434c72-5f1c-43b3-8a63-6a0cb5fd7cd9\",\n        \"volumes\": []\n    },\n    {\n        \"description\": \"vg\",\n        \"name\": \"vg02\",\n        \"id\": \"2018dec2-bb56-48aa-a2d3-c4402d57faf4\",\n        \"volumes\": [\n            {\n                \"id\": \"1e387f96-ec3a-4bba-8298-ab6764f7d772\"\n            },\n            {\n                \"id\": \"c73a7790-cd04-4ac1-b01d-32a7ac5d84d7\"\n            },\n            {\n                \"id\": \"286e0694-800e-47e8-b0de-5262c57e9a30\"\n            }\n        ]\n    }]\nvolume_group_data = {\n    'volume_groups': [\n        {'name': 'null_vg', 'storage_id': '12345',\n         'native_volume_group_id': '0d434c72-5f1c-43b3-8a63-6a0cb5fd7cd9',\n         'description': 'null_volume_g'},\n        {'name': 'vg02', 'storage_id': '12345',\n         'native_volume_group_id': '2018dec2-bb56-48aa-a2d3-c4402d57faf4',\n         'description': 'vg'}],\n    'vol_grp_vol_rels': [\n        {'storage_id': '12345',\n         'native_volume_group_id': '2018dec2-bb56-48aa-a2d3-c4402d57faf4',\n         'native_volume_id': '1e387f96-ec3a-4bba-8298-ab6764f7d772'},\n        {'storage_id': '12345',\n         'native_volume_group_id': '2018dec2-bb56-48aa-a2d3-c4402d57faf4',\n         'native_volume_id': 'c73a7790-cd04-4ac1-b01d-32a7ac5d84d7'},\n        {'storage_id': '12345',\n         'native_volume_group_id': '2018dec2-bb56-48aa-a2d3-c4402d57faf4',\n         'native_volume_id': '286e0694-800e-47e8-b0de-5262c57e9a30'}]}\nmasking_info = [\n    {\n        \"host_group_id\": None,\n        \"host_id\": \"52c74385-b5a1-4af0-b9c6-18c47a04e27d\",\n        \"id\": \"36cb8bf8-f9c1-4592-a7fd-fed5b0e29885\",\n        \"logical_unit_number\": 3,\n        \"volume_id\": \"40ce0f3c-d250-4efc-b78a-1b1c768788f4\",\n    },\n    {\n        \"host_group_id\": \"ea01240f-4692-44f4-817b-924efd2c8519\",\n        \"host_id\": None,\n        \"id\": \"3d5c3954-853e-481e-b7de-821854697ed2\",\n        \"logical_unit_number\": 2,\n        \"volume_id\": \"40ce0f3c-d250-4efc-b78a-1b1c768788f4\",\n    }\n]\nmasking_data = [\n    {'native_masking_view_id': '36cb8bf8-f9c1-4592-a7fd-fed5b0e29885',\n     'name': '36cb8bf8-f9c1-4592-a7fd-fed5b0e29885',\n     'native_volume_id': '40ce0f3c-d250-4efc-b78a-1b1c768788f4',\n     'storage_id': '12345',\n     'native_storage_host_id': '52c74385-b5a1-4af0-b9c6-18c47a04e27d'},\n    {'native_masking_view_id': '3d5c3954-853e-481e-b7de-821854697ed2',\n     'name': '3d5c3954-853e-481e-b7de-821854697ed2',\n     'native_volume_id': '40ce0f3c-d250-4efc-b78a-1b1c768788f4',\n     'storage_id': '12345',\n     'native_storage_host_group_id': 'ea01240f-4692-44f4-817b-924efd2c8519'}]\ncluster_perf_info = [\n    {\n        \"timestamp\": \"2022-11-28T02:59:20Z\",\n        \"cluster_id\": \"0\",\n        \"avg_read_latency\": 0.0,\n        \"avg_latency\": 0.0,\n        \"avg_write_latency\": 0.0,\n        \"avg_read_size\": 0.0,\n        \"avg_write_size\": 0.0,\n        \"avg_io_size\": 0.0,\n        \"read_iops\": 0.0,\n        \"read_bandwidth\": 0.0,\n        \"total_iops\": 0.0,\n        \"total_bandwidth\": 0.0,\n        \"write_iops\": 0.0,\n        \"write_bandwidth\": 0.0,\n        \"repeat_count\": 1,\n        \"response_definition\": \"performance_metrics_by_cluster\",\n        \"entity\": \"performance_metrics_by_cluster\"\n    },\n    {\n        \"timestamp\": \"2022-11-28T02:59:40Z\",\n        \"cluster_id\": \"0\",\n        \"avg_read_latency\": 0.0,\n        \"avg_latency\": 0.0,\n        \"avg_write_latency\": 0.0,\n        \"avg_read_size\": 0.0,\n        \"avg_write_size\": 0.0,\n        \"avg_io_size\": 0.0,\n        \"read_iops\": 0.0,\n        \"read_bandwidth\": 0.0,\n        \"total_iops\": 0.0,\n        \"total_bandwidth\": 0.0,\n        \"write_iops\": 0.0,\n        \"write_bandwidth\": 0.0,\n        \"repeat_count\": 1,\n        \"response_definition\": \"performance_metrics_by_cluster\",\n        \"entity\": \"performance_metrics_by_cluster\"\n    }\n]\nappliance_perf_info = [\n    {\n        \"appliance_id\": \"A1\",\n        \"timestamp\": \"2022-11-28T02:59:40Z\",\n        \"avg_read_latency\": 0.0,\n        \"avg_write_latency\": 0.0,\n        \"avg_latency\": 0.0,\n        \"avg_read_size\": 0.0,\n        \"avg_write_size\": 0.0,\n        \"avg_io_size\": 0.0,\n        \"read_iops\": 0.0,\n        \"write_iops\": 0.0,\n        \"total_iops\": 0.0,\n        \"read_bandwidth\": 0.0,\n        \"write_bandwidth\": 0.0,\n        \"total_bandwidth\": 0.0,\n        \"io_workload_cpu_utilization\": 9.419034756121814E-4,\n        \"repeat_count\": 1,\n        \"response_definition\": \"performance_metrics_by_appliance\",\n        \"entity\": \"performance_metrics_by_appliance\"\n    },\n    {\n        \"appliance_id\": \"A1\",\n        \"timestamp\": \"2022-11-28T03:00:00Z\",\n        \"avg_read_latency\": 0.0,\n        \"avg_write_latency\": 0.0,\n        \"avg_latency\": 0.0,\n        \"avg_read_size\": 0.0,\n        \"avg_write_size\": 0.0,\n        \"avg_io_size\": 0.0,\n        \"read_iops\": 0.0,\n        \"write_iops\": 0.0,\n        \"total_iops\": 0.0,\n        \"read_bandwidth\": 0.0,\n        \"write_bandwidth\": 0.0,\n        \"total_bandwidth\": 0.0,\n        \"io_workload_cpu_utilization\": 6.913627946170662E-4,\n        \"repeat_count\": 1,\n        \"response_definition\": \"performance_metrics_by_appliance\",\n        \"entity\": \"performance_metrics_by_appliance\"\n    }\n]\nperf_volume_info = [{\n    \"app_type\": \"Business_Applications_ERP_SAP\",\n    \"app_type_l10n\": \"ERP / SAP\",\n    \"appliance_id\": \"A1\",\n    \"description\": \"什么 都不是\",\n    \"state\": \"Ready\",\n    \"type\": \"Primary\",\n    \"wwn\": \"naa.68ccf0980048d7ab86ec9f7fdfa9945d\",\n    \"size\": 3221225472,\n    \"name\": \"wu-003\",\n    \"id\": \"022ece9c-4921-46ba-ba4f-91c167a90cbe\",\n    \"appliance\": {\n        \"id\": \"A1\"\n    }}\n]\nvolume_perf_info = [\n    {\n        \"volume_id\": \"022ece9c-4921-46ba-ba4f-91c167a90cbe\",\n        \"timestamp\": \"2022-11-28T02:59:40Z\",\n        \"avg_read_latency\": 0.0,\n        \"avg_read_size\": 0.0,\n        \"avg_latency\": 0.0,\n        \"avg_write_latency\": 0.0,\n        \"avg_write_size\": 0.0,\n        \"read_iops\": 0.0,\n        \"read_bandwidth\": 0.0,\n        \"total_iops\": 0.0,\n        \"total_bandwidth\": 0.0,\n        \"write_iops\": 0.0,\n        \"write_bandwidth\": 0.0,\n        \"avg_io_size\": 0.0,\n        \"appliance_id\": \"A1\",\n        \"repeat_count\": 195,\n        \"response_definition\": \"performance_metrics_by_volume\",\n        \"entity\": \"performance_metrics_by_volume\"\n    }\n]\nperf_node_info = [\n    {\n        \"appliance_id\": \"A1\",\n        \"id\": \"N1\",\n        \"slot\": 0,\n    }\n]\ncontrollers_perf_info = [\n    {\n        \"timestamp\": \"2022-11-28T03:00:00Z\",\n        \"node_id\": \"N1\",\n        \"appliance_id\": \"A1\",\n        \"avg_read_latency\": 0.0,\n        \"avg_latency\": 0.0,\n        \"avg_write_latency\": 0.0,\n        \"avg_read_size\": 0.0,\n        \"avg_write_size\": 0.0,\n        \"avg_io_size\": 0.0,\n        \"io_workload_cpu_utilization\": 6.839733981557592E-4,\n        \"read_iops\": 0.0,\n        \"read_bandwidth\": 0.0,\n        \"total_iops\": 0.0,\n        \"total_bandwidth\": 0.0,\n        \"write_iops\": 0.0,\n        \"write_bandwidth\": 0.0,\n        \"current_logins\": 0,\n        \"unaligned_write_bandwidth\": 0.0,\n        \"unaligned_read_bandwidth\": 0.0,\n        \"unaligned_read_iops\": 0.0,\n        \"unaligned_write_iops\": 0.0,\n        \"unaligned_bandwidth\": 0.0,\n        \"unaligned_iops\": 0.0,\n        \"repeat_count\": 1,\n        \"response_definition\": \"performance_metrics_by_node\",\n        \"entity\": \"performance_metrics_by_node\"\n    },\n    {\n        \"timestamp\": \"2022-11-28T03:00:20Z\",\n        \"node_id\": \"N1\",\n        \"appliance_id\": \"A1\",\n        \"avg_read_latency\": 0.0,\n        \"avg_latency\": 0.0,\n        \"avg_write_latency\": 0.0,\n        \"avg_read_size\": 0.0,\n        \"avg_write_size\": 0.0,\n        \"avg_io_size\": 0.0,\n        \"io_workload_cpu_utilization\": 9.434143408848538E-4,\n        \"read_iops\": 0.0,\n        \"read_bandwidth\": 0.0,\n        \"total_iops\": 0.0,\n        \"total_bandwidth\": 0.0,\n        \"write_iops\": 0.0,\n        \"write_bandwidth\": 0.0,\n        \"current_logins\": 0,\n        \"unaligned_write_bandwidth\": 0.0,\n        \"unaligned_read_bandwidth\": 0.0,\n        \"unaligned_read_iops\": 0.0,\n        \"unaligned_write_iops\": 0.0,\n        \"unaligned_bandwidth\": 0.0,\n        \"unaligned_iops\": 0.0,\n        \"repeat_count\": 1,\n        \"response_definition\": \"performance_metrics_by_node\",\n        \"entity\": \"performance_metrics_by_node\"\n    }\n]\nfc_perf_info = [\n    {\n        \"node_id\": \"3f75d31ca1dc4d8fb95707c7f996a063\",\n        \"timestamp\": \"2022-11-28T03:00:00Z\",\n        \"appliance_id\": \"A1\",\n        \"avg_read_latency\": 0.0,\n        \"avg_read_size\": 0.0,\n        \"avg_latency\": 0.0,\n        \"avg_write_latency\": 0.0,\n        \"avg_write_size\": 0.0,\n        \"read_iops\": 0.0,\n        \"read_bandwidth\": 0.0,\n        \"total_iops\": 0.0,\n        \"total_bandwidth\": 0.0,\n        \"write_iops\": 0.0,\n        \"write_bandwidth\": 0.0,\n        \"current_logins\": 0,\n        \"unaligned_write_bandwidth\": 0.0,\n        \"unaligned_read_bandwidth\": 0.0,\n        \"unaligned_read_iops\": 0.0,\n        \"unaligned_write_iops\": 0.0,\n        \"unaligned_bandwidth\": 0.0,\n        \"unaligned_iops\": 0.0,\n        \"avg_io_size\": 0.0,\n        \"fe_port_id\": \"090d75723f4147808fd5624582708381\",\n        \"dumped_frames_ps\": 0.0,\n        \"loss_of_signal_count_ps\": 0.0,\n        \"invalid_crc_count_ps\": 0.0,\n        \"loss_of_sync_count_ps\": 0.0,\n        \"invalid_tx_word_count_ps\": 0.0,\n        \"prim_seq_prot_err_count_ps\": 0.0,\n        \"link_failure_count_ps\": 0.0,\n        \"repeat_count\": 1,\n        \"response_definition\": \"performance_metrics_by_fe_fc_port\",\n        \"entity\": \"performance_metrics_by_fe_fc_port\"\n    },\n    {\n        \"node_id\": \"3f75d31ca1dc4d8fb95707c7f996a063\",\n        \"timestamp\": \"2022-11-28T03:00:20Z\",\n        \"appliance_id\": \"A1\",\n        \"avg_read_latency\": 0.0,\n        \"avg_read_size\": 0.0,\n        \"avg_latency\": 0.0,\n        \"avg_write_latency\": 0.0,\n        \"avg_write_size\": 0.0,\n        \"read_iops\": 0.0,\n        \"read_bandwidth\": 0.0,\n        \"total_iops\": 0.0,\n        \"total_bandwidth\": 0.0,\n        \"write_iops\": 0.0,\n        \"write_bandwidth\": 0.0,\n        \"current_logins\": 0,\n        \"unaligned_write_bandwidth\": 0.0,\n        \"unaligned_read_bandwidth\": 0.0,\n        \"unaligned_read_iops\": 0.0,\n        \"unaligned_write_iops\": 0.0,\n        \"unaligned_bandwidth\": 0.0,\n        \"unaligned_iops\": 0.0,\n        \"avg_io_size\": 0.0,\n        \"fe_port_id\": \"090d75723f4147808fd5624582708381\",\n        \"dumped_frames_ps\": 0.0,\n        \"loss_of_signal_count_ps\": 0.0,\n        \"invalid_crc_count_ps\": 0.0,\n        \"loss_of_sync_count_ps\": 0.0,\n        \"invalid_tx_word_count_ps\": 0.0,\n        \"prim_seq_prot_err_count_ps\": 0.0,\n        \"link_failure_count_ps\": 0.0,\n        \"repeat_count\": 1,\n        \"response_definition\": \"performance_metrics_by_fe_fc_port\",\n        \"entity\": \"performance_metrics_by_fe_fc_port\"\n    }\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef create_driver():\n    RestHandler.login = mock.Mock(\n        return_value={None})\n    return PowerStoreDriver(**ACCESS_INFO)\n\n\nclass test_PowerStoreDriver(TestCase):\n    driver = create_driver()\n\n    def test_init(self):\n        RestClient.do_call = mock.Mock(return_value={None})\n        PowerStoreDriver(**ACCESS_INFO)\n\n    def test_get_storage(self):\n        RestHandler.get_storage_pools = mock.Mock(return_value=pools_data)\n        RestHandler.get_disks = mock.Mock(return_value=disk_data)\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[clusters, appliance, software_installed])\n        storages = self.driver.get_storage(context)\n        self.assertEqual(storages, storage_data)\n\n    def test_get_storage_error(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[None])\n        try:\n            storages = self.driver.get_storage(context)\n        except Exception as e:\n            LOG.error(six.text_type(e))\n            storages = {}\n        self.assertDictEqual(storages, {})\n\n    def test_get_storage_pools(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[appliance, appliance_capacity])\n        pools = self.driver.list_storage_pools(context)\n        self.assertListEqual(pools, pools_data)\n\n    def test_get_disks(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[hardware_info])\n        disks = self.driver.list_disks(context)\n        self.assertListEqual(disks, disk_data)\n\n    def test_list_volumes(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[volume_info, volume_generate])\n        volumes = self.driver.list_volumes(context)\n        self.assertListEqual(volumes, volume_data)\n\n    def test_list_alerts(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[alerts_info])\n        query_para = {'begin_time': 1667292765000, 'end_time': 1668502365000}\n        alerts = self.driver.list_alerts(context, query_para)\n        self.assertListEqual(alerts, alerts_data)\n\n    def test_parse_alerts(self):\n        alerts = self.driver.parse_alert(context, alert)\n        self.assertDictEqual(alerts, snmp_alert_data)\n\n    def test_list_controllers(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[node_info, ip_pool_address, hardware_info])\n        controllers = self.driver.list_controllers(context)\n        self.assertListEqual(controllers, controllers_data)\n\n    def test_get_alert_sources(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[node_info, ip_pool_address, hardware_info])\n        alert_sources = self.driver.get_alert_sources(context)\n        self.assertListEqual(alert_sources, alert_sources_data)\n\n    def test_list_ports(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[hardware_port_info, appliance, fc_info, eth_info,\n                         sas_info])\n        ports = self.driver.list_ports(context)\n        self.assertListEqual(ports, ports_data)\n\n    def test_reset_connection(self):\n        return_value = __class__\n        return_value.status_code = 400\n        return_value.text = 'error'\n        RestClient.do_call = mock.Mock(return_value=return_value)\n        res = None\n        try:\n            self.driver.reset_connection(context)\n        except Exception as e:\n            LOG.info(six.text_type(e))\n            res = {}\n        self.assertEqual(res, {})\n\n    def test_get_access_url(self):\n        url = self.driver.get_access_url()\n        self.assertEqual(url, url)\n\n    def test_collect_perf_metrics(self):\n\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[clusters, cluster_perf_info,\n                         appliance, appliance_perf_info,\n                         perf_volume_info, volume_perf_info,\n                         hardware_info, perf_node_info, controllers_perf_info,\n                         perf_fc_info, fc_perf_info])\n        collect = self.driver.collect_perf_metrics(context, ACCESS_INFO.get(\n            'storage_id'), resource_metrics, 1669604280000, 1669604580000)\n        self.assertEqual(collect[0].values.get(1669604340000), 0)\n\n    def test_get_capabilities(self):\n        capabilities = self.driver.get_capabilities(context)\n        self.assertDictEqual(capabilities.get('resource_metrics'),\n                             resource_metrics)\n\n    def test_get_latest_perf_timestamp(self):\n        RestHandler.rest_call = mock.Mock(side_effect=[clusters])\n        perf_timestamp = self.driver.get_latest_perf_timestamp(context)\n        self.assertEqual(perf_timestamp, perf_timestamp)\n\n    def test_list_storage_host_initiators(self):\n        RestHandler.rest_call = mock.Mock(side_effect=[[], host_info])\n        initiators = self.driver.list_storage_host_initiators(context)\n        self.assertEqual(initiators, initiators_data)\n\n    def test_list_storage_host_initiators_upgrade(self):\n        RestHandler.rest_call = mock.Mock(side_effect=[initiators_info])\n        initiators = self.driver.list_storage_host_initiators(context)\n        self.assertEqual(initiators, initiators_upgrade_data)\n\n    def test_list_storage_hosts(self):\n        RestHandler.rest_call = mock.Mock(side_effect=[host_info])\n        hosts = self.driver.list_storage_hosts(context)\n        self.assertEqual(hosts, host_data)\n\n    def test_list_storage_host_groups(self):\n        RestHandler.rest_call = mock.Mock(side_effect=[host_group_info])\n        host_groups = self.driver.list_storage_host_groups(context)\n        self.assertEqual(host_groups, host_group_data)\n\n    def test_list_volume_groups(self):\n        RestHandler.rest_call = mock.Mock(side_effect=[volume_groups_info])\n        volume_groups = self.driver.list_volume_groups(context)\n        self.assertEqual(volume_groups, volume_group_data)\n\n    def test_list_masking_views(self):\n        RestHandler.rest_call = mock.Mock(side_effect=[masking_info])\n        masking_views = self.driver.list_masking_views(context)\n        self.assertEqual(masking_views, masking_data)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/scaleio/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/scaleio/test_constans.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nSYSTEM_INFO = [\n    {\n        \"systemVersionName\": \"DellEMC ScaleIO Version: R2_5.0.254\",\n        \"capacityAlertHighThresholdPercent\": 80,\n        \"capacityAlertCriticalThresholdPercent\": 90,\n        \"remoteReadOnlyLimitState\": False,\n        \"upgradeState\": \"NoUpgrade\",\n        \"mdmManagementPort\": 6611,\n        \"sdcMdmNetworkDisconnectionsCounterParameters\": {\n            \"shortWindow\": {\n                \"threshold\": 300,\n                \"windowSizeInSec\": 60\n            },\n            \"mediumWindow\": {\n                \"threshold\": 500,\n                \"windowSizeInSec\": 3600\n            },\n            \"longWindow\": {\n                \"threshold\": 700,\n                \"windowSizeInSec\": 86400\n            }\n        },\n        \"sdcSdsNetworkDisconnectionsCounterParameters\": {\n            \"shortWindow\": {\n                \"threshold\": 800,\n                \"windowSizeInSec\": 60\n            },\n            \"mediumWindow\": {\n                \"threshold\": 4000,\n                \"windowSizeInSec\": 3600\n            },\n            \"longWindow\": {\n                \"threshold\": 20000,\n                \"windowSizeInSec\": 86400\n            }\n        },\n        \"sdcMemoryAllocationFailuresCounterParameters\": {\n            \"shortWindow\": {\n                \"threshold\": 300,\n                \"windowSizeInSec\": 60\n            },\n            \"mediumWindow\": {\n                \"threshold\": 500,\n                \"windowSizeInSec\": 3600\n            },\n            \"longWindow\": {\n                \"threshold\": 700,\n                \"windowSizeInSec\": 86400\n            }\n        },\n        \"sdcSocketAllocationFailuresCounterParameters\": {\n            \"shortWindow\": {\n                \"threshold\": 300,\n                \"windowSizeInSec\": 60\n            },\n            \"mediumWindow\": {\n                \"threshold\": 500,\n                \"windowSizeInSec\": 3600\n            },\n            \"longWindow\": {\n                \"threshold\": 700,\n                \"windowSizeInSec\": 86400\n            }\n        },\n        \"sdcLongOperationsCounterParameters\": {\n            \"shortWindow\": {\n                \"threshold\": 10000,\n                \"windowSizeInSec\": 60\n            },\n            \"mediumWindow\": {\n                \"threshold\": 100000,\n                \"windowSizeInSec\": 3600\n            },\n            \"longWindow\": {\n                \"threshold\": 1000000,\n                \"windowSizeInSec\": 86400\n            }\n        },\n        \"cliPasswordAllowed\": True,\n        \"managementClientSecureCommunicationEnabled\": True,\n        \"tlsVersion\": \"TLSv1.2\",\n        \"showGuid\": True,\n        \"authenticationMethod\": \"Native\",\n        \"mdmToSdsPolicy\": \"Authentication\",\n        \"mdmCluster\": {\n            \"clusterState\": \"ClusteredNormal\",\n            \"clusterMode\": \"ThreeNodes\",\n            \"goodNodesNum\": 3,\n            \"goodReplicasNum\": 2,\n            \"id\": \"8049148500852184920\"\n        },\n        \"perfProfile\": \"Default\",\n        \"installId\": \"7b940dfb71191770\",\n        \"daysInstalled\": 6,\n        \"maxCapacityInGb\": \"Unlimited\",\n        \"capacityTimeLeftInDays\": \"Unlimited\",\n        \"isInitialLicense\": True,\n        \"defaultIsVolumeObfuscated\": False,\n        \"restrictedSdcModeEnabled\": False,\n        \"restrictedSdcMode\": \"None\",\n        \"enterpriseFeaturesEnabled\": True,\n        \"id\": \"6fb451ea51a99758\",\n        \"links\": [\n            {\n                \"rel\": \"/api/System/relationship/Statistics\",\n                \"href\": \"/api/instances/System::6fb451ea51a99758/\"\n                        \"relationships/Statistics\"\n            }\n        ]\n    }\n]\n\nSYSTEM_DETAIL = {\n    \"pendingMovingOutBckRebuildJobs\": 0,\n    \"rfcachePoolWritePending\": 0,\n    \"degradedHealthyCapacityInKb\": 0,\n    \"activeMovingOutFwdRebuildJobs\": 0,\n    \"rfcachePoolWritePendingG1Sec\": 0,\n    \"bckRebuildWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"primaryReadFromDevBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"BackgroundScannedInMB\": 0,\n    \"rfcacheReadsSkippedAlignedSizeTooLarge\": 0,\n    \"rfcachePoolSize\": 0,\n    \"pendingMovingInRebalanceJobs\": 0,\n    \"rfcacheWritesSkippedHeavyLoad\": 0,\n    \"rfcachePoolPagesInuse\": 0,\n    \"unusedCapacityInKb\": 14107527168,\n    \"rmcacheEntryEvictionCount\": 0,\n    \"rfcacheFdAvgWriteTime\": 0,\n    \"totalReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"totalWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rmPendingAllocatedInKb\": 0,\n    \"numOfVolumes\": 19,\n    \"rfcacheIosOutstanding\": 0,\n    \"rmcacheBigBlockEvictionSizeCountInKb\": 0,\n    \"capacityAvailableForVolumeAllocationInKb\": 7038042112,\n    \"numOfMappedToAllVolumes\": 0,\n    \"numOfScsiInitiators\": 0,\n    \"rebuildPerReceiveJobNetThrottlingInKbps\": 0,\n    \"rmcache32kbEntryCount\": 0,\n    \"rfcachePoolEvictions\": 0,\n    \"rfcachePoolNumCacheDevs\": 0,\n    \"activeMovingInNormRebuildJobs\": 0,\n    \"rfcacheFdWriteTimeGreater500Millis\": 0,\n    \"rmcacheSkipCountCacheAllBusy\": 0,\n    \"fixedReadErrorCount\": 0,\n    \"rfcachePoolNumSrcDevs\": 0,\n    \"numOfSdc\": 3,\n    \"rfcacheFdMonitorErrorStuckIo\": 0,\n    \"rfcacheReadsSkippedInternalError\": 0,\n    \"pendingMovingInBckRebuildJobs\": 0,\n    \"rfcachePoolWritePendingG500Micro\": 0,\n    \"activeBckRebuildCapacityInKb\": 0,\n    \"rebalanceCapacityInKb\": 0,\n    \"rfcachePoolInLowMemoryCondition\": 0,\n    \"rfcacheReadsSkippedLowResources\": 0,\n    \"thinCapacityInUseInKb\": 4096,\n    \"rfcachePoolLowResourcesInitiatedPassthroughMode\": 0,\n    \"rfcachePoolWritePendingG10Millis\": 0,\n    \"rfcacheWritesSkippedInternalError\": 0,\n    \"rfcachePoolWriteHit\": 0,\n    \"rmcache128kbEntryCount\": 0,\n    \"rfcacheWritesSkippedCacheMiss\": 0,\n    \"rfcacheFdReadTimeGreater5Sec\": 0,\n    \"numOfFaultSets\": 0,\n    \"degradedFailedCapacityInKb\": 0,\n    \"BackgroundScanCompareCount\": 0,\n    \"activeNormRebuildCapacityInKb\": 0,\n    \"snapCapacityInUseInKb\": 20967424,\n    \"rfcacheWriteMiss\": 0,\n    \"rfcacheFdIoErrors\": 0,\n    \"primaryReadFromRmcacheBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"numOfVtrees\": 19,\n    \"rfacheReadHit\": 0,\n    \"rfcachePooIosOutstanding\": 0,\n    \"pendingMovingCapacityInKb\": 0,\n    \"numOfSnapshots\": 0,\n    \"sdcIds\": [\n        \"7bec302f00000000\",\n        \"7bec303000000001\",\n        \"7bec303100000002\"\n    ],\n    \"pendingFwdRebuildCapacityInKb\": 0,\n    \"rmcacheBigBlockEvictionCount\": 0,\n    \"rmcacheNoEvictionCount\": 0,\n    \"rmcacheCurrNumOf128kbEntries\": 0,\n    \"normRebuildCapacityInKb\": 0,\n    \"rfcachePoolReadPendingG1Millis\": 0,\n    \"primaryWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"numOfThickBaseVolumes\": 14,\n    \"rmcacheSizeInUseInKb\": 0,\n    \"rfcachePoolReadPendingG10Millis\": 0,\n    \"activeRebalanceCapacityInKb\": 0,\n    \"rfcacheReadsSkippedLockIos\": 0,\n    \"unreachableUnusedCapacityInKb\": 0,\n    \"rfcachePoolReadPendingG500Micro\": 0,\n    \"rmcache8kbEntryCount\": 0,\n    \"numOfVolumesInDeletion\": 0,\n    \"maxCapacityInKb\": 17932736512,\n    \"pendingMovingOutFwdRebuildJobs\": 0,\n    \"rmcacheSkipCountLargeIo\": 0,\n    \"protectedCapacityInKb\": 3825209343,\n    \"secondaryWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"normRebuildReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"thinCapacityAllocatedInKb\": 2097152000,\n    \"thinCapacityAllocatedInKm\": 2097152000,\n    \"rebalanceWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rmcacheCurrNumOf8kbEntries\": 0,\n    \"primaryVacInKb\": 2961178624,\n    \"secondaryVacInKb\": 2961178624,\n    \"numOfDevices\": 10,\n    \"rfcachePoolWriteMiss\": 0,\n    \"rfcachePoolReadPendingG1Sec\": 0,\n    \"failedCapacityInKb\": 0,\n    \"rebalanceWaitSendQLength\": 0,\n    \"rfcacheFdReadTimeGreater1Min\": 0,\n    \"rmcache4kbEntryCount\": 0,\n    \"rfcachePoolWritePendingG1Millis\": 0,\n    \"rebalancePerReceiveJobNetThrottlingInKbps\": 0,\n    \"rfcacheReadsFromCache\": 0,\n    \"activeMovingOutBckRebuildJobs\": 0,\n    \"rfcacheFdReadTimeGreater1Sec\": 0,\n    \"rmcache64kbEntryCount\": 0,\n    \"pendingMovingInNormRebuildJobs\": 0,\n    \"primaryReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"failedVacInKb\": 0,\n    \"pendingRebalanceCapacityInKb\": 0,\n    \"rfcacheAvgReadTime\": 0,\n    \"semiProtectedCapacityInKb\": 0,\n    \"rfcachePoolSourceIdMismatch\": 0,\n    \"rfcacheFdAvgReadTime\": 0,\n    \"fwdRebuildReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheWritesReceived\": 0,\n    \"rfcachePoolSuspendedIos\": 0,\n    \"protectedVacInKb\": 5922357248,\n    \"activeMovingRebalanceJobs\": 0,\n    \"bckRebuildCapacityInKb\": 0,\n    \"activeMovingInFwdRebuildJobs\": 0,\n    \"pendingMovingRebalanceJobs\": 0,\n    \"degradedHealthyVacInKb\": 0,\n    \"rfcachePoolLockTimeGreater1Sec\": 0,\n    \"semiProtectedVacInKb\": 0,\n    \"userDataReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"pendingBckRebuildCapacityInKb\": 0,\n    \"rmcacheCurrNumOf4kbEntries\": 0,\n    \"capacityLimitInKb\": 17932736512,\n    \"numOfProtectionDomains\": 1,\n    \"activeMovingCapacityInKb\": 0,\n    \"rfcacheIosSkipped\": 0,\n    \"scsiInitiatorIds\": [],\n    \"rfcacheFdWriteTimeGreater5Sec\": 0,\n    \"userDataWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"inMaintenanceVacInKb\": 0,\n    \"rfcacheReadsSkipped\": 0,\n    \"rfcachePoolReadHit\": 0,\n    \"rebuildWaitSendQLength\": 0,\n    \"numOfUnmappedVolumes\": 17,\n    \"rmcacheCurrNumOf64kbEntries\": 0,\n    \"rfcacheWritesSkippedMaxIoSize\": 0,\n    \"rfacheWriteHit\": 0,\n    \"atRestCapacityInKb\": 3825209344,\n    \"bckRebuildReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheSourceDeviceWrites\": 0,\n    \"spareCapacityInKb\": 0,\n    \"rfcacheFdInlightReads\": 0,\n    \"normRebuildWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"numOfSds\": 3,\n    \"rfcacheIoErrors\": 0,\n    \"capacityInUseInKb\": 3825209344,\n    \"rebalanceReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rmcacheSkipCountUnaligned4kbIo\": 0,\n    \"rfcacheReadsSkippedMaxIoSize\": 0,\n    \"secondaryReadFromDevBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcachePoolSuspendedPequestsRedundantSearchs\": 0,\n    \"secondaryReadFromRmcacheBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheWritesSkippedStuckIo\": 0,\n    \"secondaryReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"numOfStoragePools\": 2,\n    \"rfcachePoolCachePages\": 0,\n    \"inMaintenanceCapacityInKb\": 0,\n    \"protectionDomainIds\": [\n        \"4389836100000000\"\n    ],\n    \"inUseVacInKb\": 5922357248,\n    \"fwdRebuildCapacityInKb\": 0,\n    \"thickCapacityInUseInKb\": 3825205248,\n    \"activeMovingInRebalanceJobs\": 0,\n    \"rmcacheCurrNumOf32kbEntries\": 0,\n    \"rfcacheWritesSkippedLowResources\": 0,\n    \"rfcacheFdCacheOverloaded\": 0,\n    \"rmcache16kbEntryCount\": 0,\n    \"rmcacheEntryEvictionSizeCountInKb\": 0,\n    \"rfcacheSkippedUnlinedWrite\": 0,\n    \"rfcacheAvgWriteTime\": 0,\n    \"pendingNormRebuildCapacityInKb\": 0,\n    \"rfcacheFdReadTimeGreater500Millis\": 0,\n    \"pendingMovingOutNormrebuildJobs\": 0,\n    \"rfcacheSourceDeviceReads\": 0,\n    \"rmcacheCurrNumOf16kbEntries\": 0,\n    \"rfcacheReadsPending\": 0,\n    \"fwdRebuildWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheReadsSkippedHeavyLoad\": 0,\n    \"rfcacheFdInlightWrites\": 0,\n    \"rfcacheReadMiss\": 0,\n    \"rfcacheFdReadsReceived\": 0,\n    \"activeMovingInBckRebuildJobs\": 0,\n    \"movingCapacityInKb\": 0,\n    \"pendingMovingInFwdRebuildJobs\": 0,\n    \"rfcacheReadsReceived\": 0,\n    \"rfcachePoolReadsPending\": 0,\n    \"snapCapacityInUseOccupiedInKb\": 0,\n    \"activeFwdRebuildCapacityInKb\": 0,\n    \"rfcacheReadsSkippedStuckIo\": 0,\n    \"activeMovingOutNormRebuildJobs\": 0,\n    \"rfcacheFdWritesReceived\": 0,\n    \"rmcacheSizeInKb\": 393216,\n    \"rfcacheFdWriteTimeGreater1Min\": 0,\n    \"rfcacheWritePending\": 0,\n    \"rfcacheFdWriteTimeGreater1Sec\": 0,\n    \"numOfThinBaseVolumes\": 5,\n    \"numOfRfcacheDevices\": 0,\n    \"degradedFailedVacInKb\": 0,\n    \"rfcachePoolIoTimeGreater1Min\": 0,\n    \"rfcachePoolReadMiss\": 0\n}\nSYSTEM_STORAGE_POOL_INFO = [\n    {\n        \"protectionDomainId\": \"4389836100000000\",\n        \"sparePercentage\": 0,\n        \"rmcacheWriteHandlingMode\": \"Cached\",\n        \"checksumEnabled\": False,\n        \"useRfcache\": False,\n        \"rebuildEnabled\": True,\n        \"rebalanceEnabled\": True,\n        \"numOfParallelRebuildRebalanceJobsPerDevice\": 2,\n        \"capacityAlertHighThreshold\": 80,\n        \"capacityAlertCriticalThreshold\": 90,\n        \"rebalanceIoPriorityPolicy\": \"favorAppIos\",\n        \"rebuildIoPriorityNumOfConcurrentIosPerDevice\": 1,\n        \"rebuildIoPriorityPolicy\": \"limitNumOfConcurrentIos\",\n        \"rebalanceIoPriorityNumOfConcurrentIosPerDevice\": 1,\n        \"rebuildIoPriorityBwLimitPerDeviceInKbps\": 10240,\n        \"rebalanceIoPriorityBwLimitPerDeviceInKbps\": 10240,\n        \"rebuildIoPriorityAppIopsPerDeviceThreshold\": None,\n        \"rebalanceIoPriorityAppIopsPerDeviceThreshold\": None,\n        \"rebuildIoPriorityAppBwPerDeviceThresholdInKbps\": None,\n        \"rebalanceIoPriorityAppBwPerDeviceThresholdInKbps\": None,\n        \"rebuildIoPriorityQuietPeriodInMsec\": None,\n        \"rebalanceIoPriorityQuietPeriodInMsec\": None,\n        \"zeroPaddingEnabled\": False,\n        \"useRmcache\": False,\n        \"backgroundScannerMode\": \"Disabled\",\n        \"backgroundScannerBWLimitKBps\": 0,\n        \"name\": \"StoragePool\",\n        \"id\": \"b1566d0f00000000\",\n        \"links\": [\n            {\n                \"rel\": \"/api/StoragePool/relationship/Statistics\",\n                \"href\": \"/api/instances/StoragePool::b1566d0f00000000/\"\n                        \"relationships/Statistics\"\n            }\n        ]\n    }\n]\n\nSYSTEM_POOL_DETAIL = {\n    \"pendingMovingOutBckRebuildJobs\": 0,\n    \"deviceIds\": [\n        \"6afe148700000000\",\n        \"6afe3b9c00000001\",\n        \"6afe3b9d00000002\",\n        \"6afe148500010000\",\n        \"6afe3b9900010001\",\n        \"6afe3b9a00010002\",\n        \"6afe3b9b00010003\",\n        \"bbfe3b9600020000\",\n        \"bbfe3b9e00020001\",\n        \"bbfe3b9f00020002\"\n    ],\n    \"secondaryVacInKb\": 2961178624,\n    \"numOfDevices\": 10,\n    \"degradedHealthyCapacityInKb\": 0,\n    \"activeMovingOutFwdRebuildJobs\": 0,\n    \"bckRebuildWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"failedCapacityInKb\": 0,\n    \"primaryReadFromDevBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"BackgroundScannedInMB\": 0,\n    \"volumeIds\": [\n        \"851005a700000000\",\n        \"851005a800000001\",\n        \"851005a900000002\",\n        \"851005aa00000003\",\n        \"851005ab00000004\",\n        \"851005ac00000005\",\n        \"851005ad00000006\",\n        \"851005ae00000007\",\n        \"851005af00000008\",\n        \"851005b000000009\",\n        \"851005b10000000a\",\n        \"851005b20000000b\",\n        \"851005b30000000c\",\n        \"851005b40000000d\",\n        \"851005b50000000e\",\n        \"851005b60000000f\",\n        \"85102cb300000010\",\n        \"85102cb400000011\",\n        \"85102cb500000012\"\n    ],\n    \"activeMovingOutBckRebuildJobs\": 0,\n    \"rfcacheReadsFromCache\": 0,\n    \"pendingMovingInNormRebuildJobs\": 0,\n    \"rfcacheReadsSkippedAlignedSizeTooLarge\": 0,\n    \"primaryReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"failedVacInKb\": 0,\n    \"pendingMovingInRebalanceJobs\": 0,\n    \"pendingRebalanceCapacityInKb\": 0,\n    \"rfcacheWritesSkippedHeavyLoad\": 0,\n    \"unusedCapacityInKb\": 14107527168,\n    \"rfcacheAvgReadTime\": 0,\n    \"semiProtectedCapacityInKb\": 0,\n    \"totalReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"fwdRebuildReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"totalWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheWritesReceived\": 0,\n    \"rmPendingAllocatedInKb\": 0,\n    \"numOfVolumes\": 19,\n    \"rfcacheIosOutstanding\": 0,\n    \"protectedVacInKb\": 5922357248,\n    \"capacityAvailableForVolumeAllocationInKb\": 7038042112,\n    \"numOfMappedToAllVolumes\": 0,\n    \"bckRebuildCapacityInKb\": 0,\n    \"activeMovingInFwdRebuildJobs\": 0,\n    \"activeMovingRebalanceJobs\": 0,\n    \"pendingMovingRebalanceJobs\": 0,\n    \"degradedHealthyVacInKb\": 0,\n    \"semiProtectedVacInKb\": 0,\n    \"userDataReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"pendingBckRebuildCapacityInKb\": 0,\n    \"capacityLimitInKb\": 17932736512,\n    \"vtreeIds\": [\n        \"d39b454d00000000\",\n        \"d39b454e00000001\",\n        \"d39b454f00000002\",\n        \"d39b455000000003\",\n        \"d39b455100000004\",\n        \"d39b455200000005\",\n        \"d39b455300000006\",\n        \"d39b455400000007\",\n        \"d39b455500000008\",\n        \"d39b455600000009\",\n        \"d39b45570000000a\",\n        \"d39b45580000000b\",\n        \"d39b45590000000c\",\n        \"d39b455a0000000d\",\n        \"d39b455b0000000e\",\n        \"d39b455c0000000f\",\n        \"d39b6c5700000010\",\n        \"d39b6c5800000011\",\n        \"d39b6c5900000012\"\n    ],\n    \"activeMovingInNormRebuildJobs\": 0,\n    \"activeMovingCapacityInKb\": 0,\n    \"rfcacheIosSkipped\": 0,\n    \"userDataWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"inMaintenanceVacInKb\": 0,\n    \"rfcacheReadsSkipped\": 0,\n    \"numOfUnmappedVolumes\": 17,\n    \"rfcacheWritesSkippedMaxIoSize\": 0,\n    \"fixedReadErrorCount\": 0,\n    \"rfacheWriteHit\": 0,\n    \"atRestCapacityInKb\": 3825209344,\n    \"bckRebuildReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"pendingMovingInBckRebuildJobs\": 0,\n    \"rfcacheReadsSkippedInternalError\": 0,\n    \"activeBckRebuildCapacityInKb\": 0,\n    \"rfcacheSourceDeviceWrites\": 0,\n    \"spareCapacityInKb\": 0,\n    \"rebalanceCapacityInKb\": 0,\n    \"normRebuildWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheIoErrors\": 0,\n    \"capacityInUseInKb\": 3825209344,\n    \"rfcacheReadsSkippedLowResources\": 0,\n    \"rebalanceReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"thinCapacityInUseInKb\": 4096,\n    \"rfcacheReadsSkippedMaxIoSize\": 0,\n    \"secondaryReadFromDevBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"secondaryReadFromRmcacheBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"secondaryReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheWritesSkippedStuckIo\": 0,\n    \"rfcacheWritesSkippedInternalError\": 0,\n    \"inMaintenanceCapacityInKb\": 0,\n    \"inUseVacInKb\": 5922357248,\n    \"fwdRebuildCapacityInKb\": 0,\n    \"rfcacheWritesSkippedCacheMiss\": 0,\n    \"thickCapacityInUseInKb\": 3825205248,\n    \"activeMovingInRebalanceJobs\": 0,\n    \"degradedFailedCapacityInKb\": 0,\n    \"BackgroundScanCompareCount\": 0,\n    \"activeNormRebuildCapacityInKb\": 0,\n    \"snapCapacityInUseInKb\": 26210304,\n    \"rfcacheWriteMiss\": 0,\n    \"rfcacheWritesSkippedLowResources\": 0,\n    \"primaryReadFromRmcacheBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"numOfVtrees\": 19,\n    \"rfacheReadHit\": 0,\n    \"rfcacheSkippedUnlinedWrite\": 0,\n    \"rfcacheAvgWriteTime\": 0,\n    \"pendingMovingCapacityInKb\": 0,\n    \"numOfSnapshots\": 0,\n    \"pendingNormRebuildCapacityInKb\": 0,\n    \"pendingFwdRebuildCapacityInKb\": 0,\n    \"pendingMovingOutNormrebuildJobs\": 0,\n    \"normRebuildCapacityInKb\": 0,\n    \"rfcacheSourceDeviceReads\": 0,\n    \"primaryWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"numOfThickBaseVolumes\": 14,\n    \"rfcacheReadsPending\": 0,\n    \"rfcacheReadsSkippedHeavyLoad\": 0,\n    \"fwdRebuildWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheReadMiss\": 0,\n    \"activeRebalanceCapacityInKb\": 0,\n    \"activeMovingInBckRebuildJobs\": 0,\n    \"movingCapacityInKb\": 0,\n    \"rfcacheReadsSkippedLockIos\": 0,\n    \"unreachableUnusedCapacityInKb\": 0,\n    \"pendingMovingInFwdRebuildJobs\": 0,\n    \"rfcacheReadsReceived\": 0,\n    \"numOfVolumesInDeletion\": 0,\n    \"maxCapacityInKb\": 17932736512,\n    \"snapCapacityInUseOccupiedInKb\": 0,\n    \"pendingMovingOutFwdRebuildJobs\": 0,\n    \"activeFwdRebuildCapacityInKb\": 0,\n    \"rfcacheReadsSkippedStuckIo\": 0,\n    \"activeMovingOutNormRebuildJobs\": 0,\n    \"protectedCapacityInKb\": 3825209343,\n    \"secondaryWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheWritePending\": 0,\n    \"normRebuildReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"numOfThinBaseVolumes\": 5,\n    \"thinCapacityAllocatedInKb\": 2097152000,\n    \"degradedFailedVacInKb\": 0,\n    \"thinCapacityAllocatedInKm\": 2097152000,\n    \"rebalanceWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"primaryVacInKb\": 2961178624\n}\n\nSYSTEM_STORAGE_VOLUME_INFO = [\n    {\n        \"mappedSdcInfo\": [\n            {\n                \"sdcId\": \"7bec302f00000000\",\n                \"sdcIp\": \"192.168.3.240\",\n                \"limitIops\": 0,\n                \"limitBwInMbps\": 0\n            },\n            {\n                \"sdcId\": \"7bec303100000002\",\n                \"sdcIp\": \"192.168.3.239\",\n                \"limitIops\": 0,\n                \"limitBwInMbps\": 0\n            },\n            {\n                \"sdcId\": \"7bec303000000001\",\n                \"sdcIp\": \"192.168.3.241\",\n                \"limitIops\": 0,\n                \"limitBwInMbps\": 0\n            }\n        ],\n        \"mappingToAllSdcsEnabled\": False,\n        \"isVvol\": False,\n        \"sizeInKb\": 209715200,\n        \"vtreeId\": \"d39b455100000004\",\n        \"isObfuscated\": False,\n        \"volumeType\": \"ThinProvisioned\",\n        \"consistencyGroupId\": None,\n        \"ancestorVolumeId\": None,\n        \"useRmcache\": False,\n        \"storagePoolId\": \"b1566d0f00000000\",\n        \"creationTime\": 1653359703,\n        \"name\": \"volume023\",\n        \"id\": \"851005ab00000004\",\n        \"links\": [\n            {\n                \"rel\": \"/api/Volume/relationship/Statistics\",\n                \"href\": \"/api/instances/Volume::851005ab00000004/\"\n                        \"relationships/Statistics\"\n            }\n        ]\n    }\n]\nSYSTEM_VOLUME_DETAIL = {\n    \"descendantVolumeIds\": [],\n    \"numOfMappedScsiInitiators\": 0,\n    \"numOfChildVolumes\": 0,\n    \"numOfMappedSdcs\": 3,\n    \"userDataReadBwc\": {\n        \"numSeconds\": 1,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"userDataWriteBwc\": {\n        \"numSeconds\": 1,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"numOfDescendantVolumes\": 0,\n    \"childVolumeIds\": [],\n    \"mappedSdcIds\": [\n        \"7bec302f00000000\",\n        \"7bec303100000002\",\n        \"7bec303000000001\"\n    ]\n}\n\nSYSTEM_STORAGE_DISK_INFO = [\n    {\n        \"sdsId\": \"29ab6a0a00000000\",\n        \"deviceState\": \"Normal\",\n        \"capacityLimitInKb\": 942668800,\n        \"maxCapacityInKb\": 942668800,\n        \"ledSetting\": \"Off\",\n        \"storagePoolId\": \"b1566d0f00000000\",\n        \"errorState\": \"None\",\n        \"name\": \"sd09\",\n        \"id\": \"6afe3b9d00000002\"\n    }\n]\n\nSYSTEM_DISK_DETAIL = {\n    \"rfcacheReadsSkippedInternalError\": 0,\n    \"pendingMovingInBckRebuildJobs\": 0,\n    \"avgReadLatencyInMicrosec\": 618,\n    \"rfcacheSourceDeviceWrites\": 0,\n    \"pendingMovingOutBckRebuildJobs\": 0,\n    \"secondaryVacInKb\": 150192128,\n    \"normRebuildWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheIoErrors\": 0,\n    \"capacityInUseInKb\": 222265344,\n    \"rfcacheReadsSkippedLowResources\": 0,\n    \"rebalanceReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"thinCapacityInUseInKb\": 0,\n    \"activeMovingOutFwdRebuildJobs\": 0,\n    \"rfcacheReadsSkippedMaxIoSize\": 0,\n    \"secondaryReadFromDevBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"bckRebuildWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"avgWriteLatencyInMicrosec\": 0,\n    \"secondaryReadFromRmcacheBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"secondaryReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheWritesSkippedInternalError\": 0,\n    \"rfcacheWritesSkippedStuckIo\": 0,\n    \"primaryReadFromDevBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"BackgroundScannedInMB\": 0,\n    \"rfcacheReadsFromCache\": 0,\n    \"activeMovingOutBckRebuildJobs\": 0,\n    \"pendingMovingInNormRebuildJobs\": 0,\n    \"inUseVacInKb\": 310329344,\n    \"rfcacheWritesSkippedCacheMiss\": 0,\n    \"rfcacheReadsSkippedAlignedSizeTooLarge\": 0,\n    \"primaryReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"failedVacInKb\": 0,\n    \"pendingMovingInRebalanceJobs\": 0,\n    \"rfcacheWritesSkippedHeavyLoad\": 0,\n    \"thickCapacityInUseInKb\": 222265344,\n    \"unusedCapacityInKb\": 719354880,\n    \"rfcacheAvgReadTime\": 0,\n    \"activeMovingInRebalanceJobs\": 0,\n    \"BackgroundScanCompareCount\": 0,\n    \"totalReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"snapCapacityInUseInKb\": 1048576,\n    \"rfcacheWriteMiss\": 0,\n    \"rfcacheWritesSkippedLowResources\": 0,\n    \"primaryReadFromRmcacheBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"fwdRebuildReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"totalWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheWritesReceived\": 0,\n    \"rmPendingAllocatedInKb\": 0,\n    \"rfacheReadHit\": 0,\n    \"rfcacheIosOutstanding\": 0,\n    \"rfcacheSkippedUnlinedWrite\": 0,\n    \"protectedVacInKb\": 310329344,\n    \"activeMovingInFwdRebuildJobs\": 0,\n    \"activeMovingRebalanceJobs\": 0,\n    \"rfcacheAvgWriteTime\": 0,\n    \"pendingMovingRebalanceJobs\": 0,\n    \"pendingMovingOutNormrebuildJobs\": 0,\n    \"degradedHealthyVacInKb\": 0,\n    \"rfcacheSourceDeviceReads\": 0,\n    \"avgWriteSizeInBytes\": 0,\n    \"primaryWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheReadsPending\": 0,\n    \"rfcacheReadsSkippedHeavyLoad\": 0,\n    \"semiProtectedVacInKb\": 0,\n    \"fwdRebuildWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheReadMiss\": 0,\n    \"avgReadSizeInBytes\": 1024,\n    \"activeMovingInBckRebuildJobs\": 0,\n    \"capacityLimitInKb\": 942668800,\n    \"rfcacheReadsSkippedLockIos\": 0,\n    \"unreachableUnusedCapacityInKb\": 0,\n    \"pendingMovingInFwdRebuildJobs\": 0,\n    \"rfcacheReadsReceived\": 0,\n    \"activeMovingInNormRebuildJobs\": 0,\n    \"rfcacheIosSkipped\": 0,\n    \"inMaintenanceVacInKb\": 0,\n    \"rfcacheReadsSkipped\": 0,\n    \"snapCapacityInUseOccupiedInKb\": 0,\n    \"maxCapacityInKb\": 942668800,\n    \"pendingMovingOutFwdRebuildJobs\": 0,\n    \"rfcacheReadsSkippedStuckIo\": 0,\n    \"activeMovingOutNormRebuildJobs\": 0,\n    \"secondaryWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"normRebuildReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"rfcacheWritePending\": 0,\n    \"rfcacheWritesSkippedMaxIoSize\": 0,\n    \"fixedReadErrorCount\": 0,\n    \"thinCapacityAllocatedInKb\": 88064000,\n    \"degradedFailedVacInKb\": 0,\n    \"thinCapacityAllocatedInKm\": 88064000,\n    \"rfacheWriteHit\": 0,\n    \"rebalanceWriteBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"bckRebuildReadBwc\": {\n        \"numSeconds\": 0,\n        \"totalWeightInKb\": 0,\n        \"numOccured\": 0\n    },\n    \"primaryVacInKb\": 160137216\n}\n\nSYSTEM_HOST_INFO = [\n    {\n        \"sdcApproved\": True,\n        \"osType\": \"Linux\",\n        \"mdmConnectionState\": \"Connected\",\n        \"memoryAllocationFailure\": None,\n        \"socketAllocationFailure\": None,\n        \"softwareVersionInfo\": \"R2_5.0.0\",\n        \"sdcGuid\": \"ADEF3BC8-693F-4FCF-A423-6890508731C8\",\n        \"installedSoftwareVersionInfo\": \"R2_5.0.0\",\n        \"kernelVersion\": \"3.10.0\",\n        \"kernelBuildNumber\": None,\n        \"sdcIp\": \"192.168.3.240\",\n        \"sdcApprovedIps\": None,\n        \"versionInfo\": \"R2_5.0.0\",\n        \"perfProfile\": \"Default\",\n        \"systemId\": \"6fb451ea51a99758\",\n        \"name\": None,\n        \"id\": \"7bec302f00000000\",\n    },\n    {\n        \"sdcApproved\": True,\n        \"osType\": \"Linux\",\n        \"mdmConnectionState\": \"Connected\",\n        \"memoryAllocationFailure\": None,\n        \"socketAllocationFailure\": None,\n        \"softwareVersionInfo\": \"R2_5.0.0\",\n        \"sdcGuid\": \"FBAD6944-6F2D-442C-9AA1-9FF0403B7235\",\n        \"installedSoftwareVersionInfo\": \"R2_5.0.0\",\n        \"kernelVersion\": \"3.10.0\",\n        \"kernelBuildNumber\": None,\n        \"sdcIp\": \"192.168.3.241\",\n        \"sdcApprovedIps\": None,\n        \"versionInfo\": \"R2_5.0.0\",\n        \"perfProfile\": \"Default\",\n        \"systemId\": \"6fb451ea51a99758\",\n        \"name\": None,\n        \"id\": \"7bec303000000001\",\n    },\n    {\n        \"sdcApproved\": True,\n        \"osType\": \"Linux\",\n        \"mdmConnectionState\": \"Connected\",\n        \"memoryAllocationFailure\": None,\n        \"socketAllocationFailure\": None,\n        \"softwareVersionInfo\": \"R2_5.0.0\",\n        \"sdcGuid\": \"FFA0F6C3-E2CD-45F5-AF7E-0C1DDF570303\",\n        \"installedSoftwareVersionInfo\": \"R2_5.0.0\",\n        \"kernelVersion\": \"3.10.0\",\n        \"kernelBuildNumber\": None,\n        \"sdcIp\": \"192.168.3.239\",\n        \"sdcApprovedIps\": None,\n        \"versionInfo\": \"R2_5.0.0\",\n        \"perfProfile\": \"Default\",\n        \"systemId\": \"6fb451ea51a99758\",\n        \"name\": None,\n        \"id\": \"7bec303100000002\",\n    }\n]\n\nSYSTEM_INITIATORS_INFO = [\n    {\n        \"protectionDomainId\": \"4389836100000000\",\n        \"faultSetId\": None,\n        \"sdsState\": \"Normal\",\n        \"membershipState\": \"Joined\",\n        \"mdmConnectionState\": \"Connected\",\n        \"drlMode\": \"Volatile\",\n        \"rmcacheEnabled\": True,\n        \"rmcacheSizeInKb\": 131072,\n        \"rmcacheFrozen\": False,\n        \"rmcacheMemoryAllocationState\": \"AllocationPending\",\n        \"rfcacheEnabled\": True,\n        \"maintenanceState\": \"NoMaintenance\",\n        \"sdsDecoupled\": None,\n        \"sdsConfigurationFailure\": None,\n        \"sdsReceiveBufferAllocationFailures\": None,\n        \"rfcacheErrorLowResources\": False,\n        \"rfcacheErrorApiVersionMismatch\": False,\n        \"rfcacheErrorInconsistentCacheConfiguration\": False,\n        \"rfcacheErrorInconsistentSourceConfiguration\": False,\n        \"rfcacheErrorInvalidDriverPath\": False,\n        \"authenticationError\": \"None\",\n        \"softwareVersionInfo\": \"R2_5.0.0\",\n        \"rfcacheErrorDeviceDoesNotExist\": False,\n        \"numOfIoBuffers\": None,\n        \"perfProfile\": \"Default\",\n        \"ipList\": [\n            {\n                \"ip\": \"192.168.3.241\",\n                \"role\": \"all\"\n            }\n        ],\n        \"onVmWare\": True,\n        \"name\": \"SDS_192.168.3.241\",\n        \"port\": 7072,\n        \"id\": \"29ab911800000002\",\n    },\n    {\n        \"protectionDomainId\": \"4389836100000000\",\n        \"faultSetId\": None,\n        \"sdsState\": \"Normal\",\n        \"membershipState\": \"Joined\",\n        \"mdmConnectionState\": \"Connected\",\n        \"drlMode\": \"Volatile\",\n        \"rmcacheEnabled\": True,\n        \"rmcacheSizeInKb\": 131072,\n        \"rmcacheFrozen\": False,\n        \"rmcacheMemoryAllocationState\": \"AllocationPending\",\n        \"rfcacheEnabled\": True,\n        \"maintenanceState\": \"NoMaintenance\",\n        \"sdsDecoupled\": None,\n        \"sdsConfigurationFailure\": None,\n        \"sdsReceiveBufferAllocationFailures\": None,\n        \"rfcacheErrorLowResources\": False,\n        \"rfcacheErrorApiVersionMismatch\": False,\n        \"rfcacheErrorInconsistentCacheConfiguration\": False,\n        \"rfcacheErrorInconsistentSourceConfiguration\": False,\n        \"rfcacheErrorInvalidDriverPath\": False,\n        \"authenticationError\": \"None\",\n        \"softwareVersionInfo\": \"R2_5.0.0\",\n        \"rfcacheErrorDeviceDoesNotExist\": False,\n        \"numOfIoBuffers\": None,\n        \"perfProfile\": \"Default\",\n        \"ipList\": [\n            {\n                \"ip\": \"192.168.3.239\",\n                \"role\": \"all\"\n            }\n        ],\n        \"onVmWare\": True,\n        \"name\": \"SDS_192.168.3.239\",\n        \"port\": 7072,\n        \"id\": \"29ab6a0a00000000\",\n    },\n    {\n        \"protectionDomainId\": \"4389836100000000\",\n        \"faultSetId\": None,\n        \"sdsState\": \"Normal\",\n        \"membershipState\": \"Joined\",\n        \"mdmConnectionState\": \"Connected\",\n        \"drlMode\": \"Volatile\",\n        \"rmcacheEnabled\": True,\n        \"rmcacheSizeInKb\": 131072,\n        \"rmcacheFrozen\": False,\n        \"rmcacheMemoryAllocationState\": \"AllocationPending\",\n        \"rfcacheEnabled\": True,\n        \"maintenanceState\": \"NoMaintenance\",\n        \"sdsDecoupled\": None,\n        \"sdsConfigurationFailure\": None,\n        \"sdsReceiveBufferAllocationFailures\": None,\n        \"rfcacheErrorLowResources\": False,\n        \"rfcacheErrorApiVersionMismatch\": False,\n        \"rfcacheErrorInconsistentCacheConfiguration\": False,\n        \"rfcacheErrorInconsistentSourceConfiguration\": False,\n        \"rfcacheErrorInvalidDriverPath\": False,\n        \"authenticationError\": \"None\",\n        \"softwareVersionInfo\": \"R2_5.0.0\",\n        \"rfcacheErrorDeviceDoesNotExist\": False,\n        \"numOfIoBuffers\": None,\n        \"perfProfile\": \"Default\",\n        \"ipList\": [\n            {\n                \"ip\": \"192.168.3.240\",\n                \"role\": \"all\"\n            }\n        ],\n        \"onVmWare\": True,\n        \"name\": \"SDS_192.168.3.240\",\n        \"port\": 7072,\n        \"id\": \"29ab6a0800000001\",\n    }\n]\nSYSTEM_STORAGE = {\n    \"name\": \"ScaleIO\",\n    \"vendor\": \"DELL EMC\",\n    \"model\": \"DellEMC ScaleIO\",\n    \"status\": \"normal\",\n    \"serial_number\": \"6fb451ea51a99758\",\n    \"firmware_version\": \"R2_5.0.254\",\n    \"raw_capacity\": 965292851200,\n    \"total_capacity\": 18363122188288,\n    \"used_capacity\": 3917014368256,\n    \"free_capacity\": 14446107820032\n}\n\nSYSTEM_ALERT_INFO = [\n    {\n        \"alertType\": \"TRIAL_LICENSE_USED\",\n        \"severity\": \"ALERT_LOW\",\n        \"affectedObject\": {\n            \"type\": \"com.emc.ecs.api.model.gen.System\",\n            \"id\": \"6fb451ea51a99758\",\n            \"objectId\": \"6fb451ea51a99758\"\n        },\n        \"alertValues\": {},\n        \"lastObserved\": \"2022-05-27T03:10:52.552Z\",\n        \"uuid\": \"31d682d5-e696-466e-990a-57d0f9616b21\",\n        \"startTime\": \"2022-05-26T18:00:13.336Z\",\n        \"name\": \"31d682d5-e696-466e-990a-57d0f9616b21\",\n        \"id\": \"31d682d5-e696-466e-990a-57d0f9616b21\",\n    }\n]\n\nSYSTEM_TRAP_ALERT = 'system.sysUpTime.0=6132004 S:1.1.4.1.0=E:1139.101.1 ' \\\n                    'E:1139.101.1.1=5 ' \\\n                    'E:1139.101.1.2=\"MDM.MDM_Cluster.MDM_CONNECTION_LOST\" ' \\\n                    'E:1139.101.1.3=\"hjfadsfa42524533\" ' \\\n                    'E:1139.101.1.4=\"SIO02.01.0000008\"'\n\nSYSTEM_STORAGE_POOL = [\n    {\n        \"name\": \"StoragePool\",\n        \"storage_id\": \"12345\",\n        \"native_storage_pool_id\": \"b1566d0f00000000\",\n        \"status\": \"normal\",\n        \"storage_type\": \"block\",\n        \"total_capacity\": 18363122188288,\n        \"used_capacity\": 3917014368256,\n        \"free_capacity\": 14446107820032\n    }\n]\n\nSYSTEM_STORAGE_VOLUME = [\n    {\n        \"name\": \"volume023\",\n        \"storage_id\": \"12345\",\n        \"description\": \"volume023\",\n        \"status\": \"normal\",\n        \"native_volume_id\": \"851005ab00000004\",\n        \"native_storage_pool_id\": \"b1566d0f00000000\",\n        \"wwn\": \"851005ab00000004\",\n        \"type\": \"thin\",\n        \"total_capacity\": 214748364800,\n        \"free_capacit\": 0,\n        \"used_capacity\": 0,\n        \"compressed\": True,\n        \"deduplicated\": True\n    }\n]\n\nSYSTEM_STORAGE_DISK = [\n    {\n        'native_disk_id': '6afe3b9d00000002',\n        'name': 'sd09',\n        'status': 'normal',\n        'storage_id': '12345',\n        'native_disk_group_id': '29ab6a0a00000000',\n        'serial_number': '6afe3b9d00000002',\n        'capacity': 965292851200,\n        'health_score': 'normal'\n    }\n]\n\n\nSYSTEM_ALERT = [\n    {\n        'alert_id': '31d682d5-e696-466e-990a-57d0f9616b21',\n        'alert_name': 'TRIAL_LICENSE_USED31d682d5-e696-466e-990a-57d0f9616b21',\n        'severity': 'Minor',\n        'category': 'Fault',\n        'type': 'TRIAL_LICENSE_USED',\n        'sequence_number': '31d682d5-e696-466e-990a-57d0f9616b21',\n        'description': 'trial license used',\n        'occur_time': 1653588013336,\n        'match_key': '10648e5e11b1d6daf4f5cf989349967d'\n    }\n]\n\nSYSTEM_HOST = [\n    {\n        \"name\": \"ADEF3BC8-693F-4FCF-A423-6890508731C8\",\n        \"description\": \"192.168.3.240R2_5.0.0\",\n        \"storage_id\": \"12345\",\n        \"native_storage_host_id\": \"7bec302f00000000\",\n        \"os_type\": \"Linux\",\n        \"status\": \"normal\",\n        \"ip_address\": \"192.168.3.240\"\n    }, {\n        \"name\": \"FBAD6944-6F2D-442C-9AA1-9FF0403B7235\",\n        \"description\": \"192.168.3.241R2_5.0.0\",\n        \"storage_id\": \"12345\",\n        \"native_storage_host_id\": \"7bec303000000001\",\n        \"os_type\": \"Linux\",\n        \"status\": \"normal\",\n        \"ip_address\": \"192.168.3.241\"\n    }, {\n        \"name\": \"FFA0F6C3-E2CD-45F5-AF7E-0C1DDF570303\",\n        \"description\": \"192.168.3.239R2_5.0.0\",\n        \"storage_id\": \"12345\",\n        \"native_storage_host_id\": \"7bec303100000002\",\n        \"os_type\": \"Linux\",\n        \"status\": \"normal\",\n        \"ip_address\": \"192.168.3.239\"\n    }\n]\n\nSYSTEM_VIEW_MAPPING = [\n    {'name': 'volume0237bec302f00000000851005ab00000004',\n     'description': 'volume023',\n     'storage_id': '12345',\n     'native_masking_view_id': 'volume0237bec302f00000000851005ab00000004',\n     'native_volume_id': '851005ab00000004',\n     'native_storage_host_id': '7bec302f00000000'\n     }, {\n        'name': 'volume0237bec303100000002851005ab00000004',\n        'description': 'volume023',\n        'storage_id': '12345',\n        'native_masking_view_id': 'volume0237bec303100000002851005ab00000004',\n        'native_volume_id': '851005ab00000004',\n        'native_storage_host_id': '7bec303100000002'\n    }, {\n        'name': 'volume0237bec303000000001851005ab00000004',\n        'description': 'volume023',\n        'storage_id': '12345',\n        'native_masking_view_id': 'volume0237bec303000000001851005ab00000004',\n        'native_volume_id': '851005ab00000004',\n        'native_storage_host_id': '7bec303000000001'\n    }\n]\n\n\nSYSTEM_INITIATORS = [\n    {\"name\": \"SDS_192.168.3.241\",\n     \"storage_id\": \"12345\",\n     \"native_storage_host_initiator_id\": \"29ab911800000002\",\n     \"wwn\": \"29ab911800000002\",\n     \"type\": \"unknown\",\n     \"status\": \"online\",\n     \"native_storage_host_id\": \"7bec303000000001\"\n     }, {\n        \"name\": \"SDS_192.168.3.239\",\n        \"storage_id\": \"12345\",\n        \"native_storage_host_initiator_id\": \"29ab6a0a00000000\",\n        \"wwn\": \"29ab6a0a00000000\",\n        \"type\": \"unknown\",\n        \"status\": \"online\",\n        \"native_storage_host_id\": \"7bec303100000002\"\n    }, {\n        \"name\": \"SDS_192.168.3.240\",\n        \"storage_id\": \"12345\",\n        \"native_storage_host_initiator_id\": \"29ab6a0800000001\",\n        \"wwn\": \"29ab6a0800000001\",\n        \"type\": \"unknown\",\n        \"status\": \"online\",\n        \"native_storage_host_id\": \"7bec302f00000000\"\n    }\n]\n\nSYSTEM_TRAP = {\n    'category': 'Fault',\n    'type': 'EquipmentAlarm',\n    'occur_time': 1655171867749,\n    'severity': 'Critical',\n    'description': 'mdm connection lost\"',\n    'location': 'mdm connection lost\"',\n    'alert_id': 'hjfadsfa42524533',\n    'alert_name': 'SIO02.01.0000008'\n}\n"
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/scaleio/test_scaleio_stor.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nfrom delfin import context\nfrom unittest import TestCase, mock\n\n\nfrom delfin.drivers.dell_emc.scaleio.scaleio_stor import ScaleioStorageDriver\nfrom delfin.drivers.dell_emc.scaleio.rest_handler import RestHandler\nfrom delfin.tests.unit.drivers.dell_emc.scaleio import test_constans\n\n\nsys.modules['delfin.cryptor'] = mock.Mock()\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"dell_emc\",\n    \"model\": \"scaleio\",\n    \"rest\": {\n        \"host\": \"8.44.162.250\",\n        \"port\": 443,\n        \"username\": \"admin\",\n        \"password\": \"Pbu4@123\"\n    }\n}\n\n\nclass TestScaleIOStorDriver(TestCase):\n    RestHandler.login = mock.Mock(return_value=None)\n\n    def test_get_storage(self):\n        RestHandler.get_rest_info = mock.Mock(side_effect=[\n            test_constans.SYSTEM_INFO, test_constans.SYSTEM_DETAIL])\n        RestHandler.list_disks = mock.Mock(side_effect=[test_constans.\n                                           SYSTEM_STORAGE_DISK])\n        system_storage = ScaleioStorageDriver(**ACCESS_INFO).\\\n            get_storage(context)\n        self.assertEqual(system_storage, test_constans.SYSTEM_STORAGE)\n\n    def test_list_storage_pool(self):\n        RestHandler.get_rest_info = mock.Mock(side_effect=[\n            test_constans.SYSTEM_STORAGE_POOL_INFO,\n            test_constans.SYSTEM_POOL_DETAIL])\n        storage_pool = ScaleioStorageDriver(**ACCESS_INFO)\\\n            .list_storage_pools(context)\n        self.assertEqual(storage_pool, test_constans.SYSTEM_STORAGE_POOL)\n\n    def test_list_volume(self):\n        RestHandler.get_rest_info = mock.Mock(side_effect=[\n            test_constans.SYSTEM_STORAGE_VOLUME_INFO,\n            test_constans.SYSTEM_VOLUME_DETAIL])\n        storage_volumes = ScaleioStorageDriver(**ACCESS_INFO)\\\n            .list_volumes(context)\n        self.assertEqual(storage_volumes, test_constans.SYSTEM_STORAGE_VOLUME)\n\n    def test_list_alert(self):\n        RestHandler.get_rest_info = mock.Mock(\n            side_effect=[test_constans.SYSTEM_ALERT_INFO])\n        storage_alert = ScaleioStorageDriver(**ACCESS_INFO).\\\n            list_alerts(context)\n        alert_result = test_constans.SYSTEM_ALERT\n        alert_result[0]['occur_time'] = storage_alert[0]['occur_time']\n        self.assertEqual(storage_alert, alert_result)\n\n    def test_list_storage_host_initiators(self):\n        RestHandler.get_rest_info = mock.Mock(\n            side_effect=[test_constans.SYSTEM_INITIATORS_INFO])\n        RestHandler.list_storage_hosts = mock.Mock(\n            side_effect=[test_constans.SYSTEM_HOST])\n        storage_initiators = ScaleioStorageDriver(**ACCESS_INFO). \\\n            list_storage_host_initiators(context)\n        self.assertEqual(storage_initiators, test_constans.SYSTEM_INITIATORS)\n\n    def test_list_masking_views(self):\n        RestHandler.get_rest_info = mock.Mock(\n            side_effect=[test_constans.SYSTEM_STORAGE_VOLUME_INFO])\n        storage_mapping = ScaleioStorageDriver(**ACCESS_INFO). \\\n            list_masking_views(context)\n        self.assertEqual(storage_mapping, test_constans.SYSTEM_VIEW_MAPPING)\n\n    def test_list_hosts(self):\n        RestHandler.get_rest_info = mock.Mock(\n            side_effect=[test_constans.SYSTEM_HOST_INFO])\n        storage_host = ScaleioStorageDriver(**ACCESS_INFO)\\\n            .list_storage_hosts(context)\n        self.assertEqual(storage_host, test_constans.SYSTEM_HOST)\n\n    def test_parse_alert(self):\n        trap_alert = test_constans.SYSTEM_TRAP_ALERT\n        storage_trap_alert = ScaleioStorageDriver(**ACCESS_INFO).\\\n            parse_alert(context, trap_alert)\n        trap_alert_result = test_constans.SYSTEM_TRAP\n        trap_alert_result['occur_time'] = storage_trap_alert['occur_time']\n        self.assertEqual(storage_trap_alert, trap_alert_result)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/unity/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/unity/test_emc_unity.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom unittest import TestCase, mock\n\nsys.modules['delfin.cryptor'] = mock.Mock()\nfrom delfin import context\nfrom delfin.drivers.dell_emc.unity.rest_handler import RestHandler\nfrom delfin.drivers.dell_emc.unity.unity import UnityStorDriver\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"rest\": {\n        \"host\": \"110.143.132.231\",\n        \"port\": \"8443\",\n        \"username\": \"username\",\n        \"password\": \"cGFzc3dvcmQ=\"\n    }\n}\nGET_STORAGE_NORMAL = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"0\",\n                \"health\": {\n                    \"value\": 5,\n                },\n                \"name\": \"CETV3182000026\",\n                \"model\": \"Unity 350F\",\n                \"serialNumber\": \"CETV3182000026\"\n            }\n        }\n    ]\n}\nGET_STORAGE_ABNORMAL = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"0\",\n                \"health\": {\n                    \"value\": 20,\n                },\n                \"name\": \"CETV3182000026\",\n                \"model\": \"Unity 350F\",\n                \"serialNumber\": \"CETV3182000026\"\n            }\n        }\n    ]\n}\nGET_CAPACITY = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"0\",\n                \"sizeFree\": 2311766147072,\n                \"sizeTotal\": 8838774259712,\n                \"sizeUsed\": 6527008112640,\n                \"sizeSubscribed\": 307567976775680,\n                \"totalLogicalSize\": 307542206971904\n            }\n        }\n    ]\n}\nGET_SOFT_VERSION = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"4.7.1\"\n            }\n        }\n    ]\n}\nstorage_normal_result = {\n    'free_capacity': 2311766147072,\n    'serial_number': 'CETV3182000026',\n    'subscribed_capacity': 307567976775680,\n    'used_capacity': 6527008112640,\n    'vendor': 'DELL EMC',\n    'location': '',\n    'total_capacity': 8838774259712,\n    'status': 'normal',\n    'name': 'CETV3182000026',\n    'model': 'Unity 350F',\n    'raw_capacity': 12121212,\n    'firmware_version': '4.7.1'\n}\nstorage_abnormal_result = {\n    'name': 'CETV3182000026',\n    'vendor': 'DELL EMC',\n    'model': 'Unity 350F',\n    'status': 'normal',\n    'serial_number': 'CETV3182000026',\n    'firmware_version': '4.7.1',\n    'location': '',\n    'subscribed_capacity': 307567976775680,\n    'total_capacity': 8838774259712,\n    'raw_capacity': 12121212,\n    'used_capacity': 6527008112640,\n    'free_capacity': 2311766147072\n}\nGET_ALL_POOLS = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"pool_1\",\n                \"health\": {\n                    \"value\": 7\n                },\n                \"name\": \"pool1\",\n                \"sizeFree\": 2311766147072,\n                \"sizeTotal\": 8838774259712,\n                \"sizeUsed\": 6527008112640,\n                \"sizeSubscribed\": 310896039559168\n            }\n        }\n    ]\n}\nGET_ALL_ABNORMAL_POOLS = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"pool_1\",\n                \"health\": {\n                    \"value\": 20\n                },\n                \"name\": \"pool1\",\n                \"sizeFree\": 2311766147072,\n                \"sizeTotal\": 8838774259712,\n                \"sizeUsed\": 6527008112640,\n                \"sizeSubscribed\": 310896039559168\n            }\n        }\n    ]\n}\npool_result = [\n    {\n        'native_storage_pool_id': 'pool_1',\n        'status': 'normal',\n        'free_capacity': 2311766147072,\n        'name': 'pool1',\n        'storage_type': 'unified',\n        'total_capacity': 8838774259712,\n        'description': None,\n        'subscribed_capacity': 310896039559168,\n        'used_capacity': 6527008112640,\n        'storage_id': '12345'\n    }\n]\npool_abnormal_result = [\n    {\n        'native_storage_pool_id': 'pool_1',\n        'status': 'abnormal',\n        'free_capacity': 2311766147072,\n        'name': 'pool1',\n        'storage_type': 'unified',\n        'total_capacity': 8838774259712,\n        'description': None,\n        'subscribed_capacity': 310896039559168,\n        'used_capacity': 6527008112640,\n        'storage_id': '12345'\n    }\n]\nGET_ALL_LUNS = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"sv_1\",\n                \"type\": 2,\n                \"health\": {\n                    \"value\": 5\n                },\n                \"name\": \"LUN-00\",\n                \"sizeTotal\": 107374182400,\n                \"sizeAllocated\": 0,\n                \"wwn\": \"60:06:01:60:0B:00:49:00:BE:CE:6C:5C:56:C1:9D:D2\",\n                \"pool\": {\n                    \"id\": \"pool_1\"\n                }\n            }\n        }\n    ]\n}\nGET_ALL_LUNS_NULL = {\n    \"entries\": []\n}\nvolume_result = [\n    {\n        'used_capacity': 0,\n        'free_capacity': 107374182400,\n        'native_storage_pool_id': 'pool_1',\n        'description': None,\n        'native_volume_id': 'sv_1',\n        'total_capacity': 107374182400,\n        'storage_id': '12345',\n        'wwn': '60:06:01:60:0B:00:49:00:BE:CE:6C:5C:56:C1:9D:D2',\n        'type': 'thick',\n        'name': 'LUN-00',\n        'status': 'normal'\n    }\n]\nTRAP_INFO = {\n    \"1.3.6.1.2.1.1.3.0\": \"0\",\n    '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1139.103.1.18.2.0',\n    '1.3.6.1.4.1.1139.103.1.18.1.1': 'eeeeeeeee',\n    '1.3.6.1.4.1.1139.103.1.18.1.3': '14:60bba',\n    '1.3.6.1.4.1.1139.103.1.18.1.4': 'this is test',\n    '1.3.6.1.4.1.1139.103.1.18.1.5': '2020/11/20 14:10:10',\n    '1.3.6.1.4.1.1139.103.1.18.1.2': 'test'\n}\nTRAP_NOT_IN_MAPPPING_INFO = {\n    \"1.3.6.1.2.1.1.3.0\": \"0\",\n    '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1139.103.1.18.2.0',\n    '1.3.6.1.4.1.1139.103.1.18.1.1': 'eeeeeeeee',\n    '1.3.6.1.4.1.1139.103.1.18.1.3': '14:60bba1',\n    '1.3.6.1.4.1.1139.103.1.18.1.4': 'this is test',\n    '1.3.6.1.4.1.1139.103.1.18.1.5': '2020/11/20 14:10:10',\n    '1.3.6.1.4.1.1139.103.1.18.1.2': 'test'\n}\ntrap_result = {\n    'alert_id': '14:60bba',\n    'alert_name': 'this is test',\n    'severity': 'Critical',\n    'category': 'Fault',\n    'type': 'EquipmentAlarm',\n    'occur_time': 1605852610000,\n    'description': 'Storage resource allocation from one of the pools has '\n                   'exceed the 85% threshold. Allocate more storage space '\n                   'from the pool to the storage resource.',\n    'resource_type': 'Storage',\n    'location': 'eeeeeeeee',\n    'match_key': '8c6d115258631625b625486f81b09532'\n}\ntrap_not_in_mapping_result = {\n    'alert_id': '14:60bba1',\n    'alert_name': 'this is test',\n    'severity': 'Critical',\n    'category': 'Fault',\n    'type': 'EquipmentAlarm',\n    'occur_time': 1605852610000,\n    'description': 'this is test',\n    'resource_type': 'Storage',\n    'location': 'eeeeeeeee',\n    'match_key': '8c6d115258631625b625486f81b09532'\n}\nGET_ALL_ALERTS = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"alert_31523\",\n                \"severity\": 4,\n                \"timestamp\": \"2020-10-12T09:09:52.609Z\",\n                \"component\": {\n                    \"id\": \"Host_87\",\n                    \"resource\": \"host\"\n                },\n                \"messageId\": \"14:608fe\",\n                \"message\": \"Host hpux11iv2 does not have any initiators\",\n                \"description\": \"The host does not have any initiators.\"\n            }\n        }\n    ]\n}\nGET_ALL_ALERTS_NULL = {\n    \"entries\": []\n}\nalert_result = [\n    {\n        'severity': 'Warning',\n        'location': 'Host_87',\n        'occur_time': 1602464992000,\n        'type': 'EquipmentAlarm',\n        'alert_name': 'Host hpux11iv2 does not have any initiators',\n        'resource_type': 'Storage',\n        'alert_id': '14:608fe',\n        'description': 'The host does not have any initiators.',\n        'category': 'Fault',\n        'sequence_number': 'alert_31523',\n        'match_key': 'de23e7c25b5a46f029cb2f84f15a4a3a'\n    }\n]\nGET_ALL_CONTROLLERS = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"spa\",\n                \"parent\": {\n                    \"id\": \"dpe\",\n                    \"resource\": \"dpe\"\n                },\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"needsReplacement\": \"false\",\n                \"isRescueMode\": \"false\",\n                \"model\": \"\",\n                \"slotNumber\": 0,\n                \"name\": \"SP A\",\n                \"emcPartNumber\": \"\",\n                \"emcSerialNumber\": \"VIRT2102W6CHH8\",\n                \"manufacturer\": \"\",\n                \"vendorPartNumber\": \"\",\n                \"vendorSerialNumber\": \"\",\n                \"sasExpanderVersion\": \"\",\n                \"memorySize\": 12288,\n                \"parentDpe\": {\n                    \"id\": \"dpe\"\n                }\n            }\n        }\n    ]\n}\ncontroller_result = [\n    {\n        'name': 'SP A',\n        'storage_id': '12345',\n        'native_controller_id': 'spa',\n        'status': 'normal',\n        'location': 0,\n        'memory_size': 12884901888\n    }\n]\nGET_ALL_DISKS = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"disk1\",\n                \"parent\": {\n                    \"id\": \"dpe\",\n                    \"resource\": \"dpe\"\n                },\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"model\": \"ddd\",\n                \"slotNumber\": 12,\n                \"name\": \"disk1\",\n                \"version\": \"dddd\",\n                \"emcSerialNumber\": \"VIRT2102W6CHH8\",\n                \"manufacturer\": \"ibm\",\n                \"vendorPartNumber\": \"\",\n                \"vendorSerialNumber\": \"\",\n                \"sasExpanderVersion\": \"\",\n                \"rpm\": 12288,\n                \"size\": 12121212,\n                \"diskTechnology\": 1,\n                \"diskGroup\": {\n                    \"id\": \"dp1\"\n                }\n            }\n        }\n    ]\n}\ndisk_result = [\n    {\n        'name': 'disk1',\n        'storage_id': '12345',\n        'native_disk_id': 'disk1',\n        'serial_number': 'VIRT2102W6CHH8',\n        'manufacturer': 'ibm',\n        'model': 'ddd',\n        'firmware': 'dddd',\n        'speed': 12288,\n        'capacity': 12121212,\n        'status': 'normal',\n        'physical_type': 'sas',\n        'logical_type': '',\n        'native_disk_group_id': 'dp1',\n        'location': 'disk1'\n    }\n]\nGET_ALL_ETHPORTS = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"spa_eth0\",\n                \"speed\": 10000,\n                \"connectorType\": 1,\n                \"requestedSpeed\": 0,\n                \"supportedSpeeds\": [\n                    0\n                ],\n                \"sfpSupportedSpeeds\": [],\n                \"sfpSupportedProtocols\": [],\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_PORT_LINK_UP\"\n                    ],\n                    \"descriptions\": [\n                        \"The port is operating normally.\"\n                    ]\n                },\n                \"name\": \"SP A Ethernet Port 0\",\n                \"portNumber\": 0,\n                \"mtu\": 1500,\n                \"minMtu\": 46,\n                \"maxMtu\": 9000,\n                \"bond\": False,\n                \"isLinkUp\": True,\n                \"macAddress\": \"00:50:56:81:E1:50\",\n                \"isRSSCapable\": False,\n                \"isRDMACapable\": False,\n                \"requestedMtu\": 1500,\n                \"parent\": {\n                    \"id\": \"spa\",\n                    \"resource\": \"storageProcessor\"\n                },\n                \"storageProcessor\": {\n                    \"id\": \"spa\"\n                },\n                \"parentStorageProcessor\": {\n                    \"id\": \"spa\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"spa_eth1\",\n                \"speed\": 10000,\n                \"connectorType\": 1,\n                \"requestedSpeed\": 0,\n                \"supportedSpeeds\": [\n                    0\n                ],\n                \"sfpSupportedSpeeds\": [],\n                \"sfpSupportedProtocols\": [],\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_PORT_LINK_UP\"\n                    ],\n                    \"descriptions\": [\n                        \"The port is operating normally.\"\n                    ]\n                },\n                \"name\": \"SP A Ethernet Port 1\",\n                \"portNumber\": 1,\n                \"mtu\": 1500,\n                \"minMtu\": 46,\n                \"maxMtu\": 9000,\n                \"bond\": False,\n                \"isLinkUp\": True,\n                \"macAddress\": \"00:50:56:81:E8:4B\",\n                \"isRSSCapable\": False,\n                \"isRDMACapable\": False,\n                \"requestedMtu\": 1500,\n                \"parent\": {\n                    \"id\": \"spa\",\n                    \"resource\": \"storageProcessor\"\n                },\n                \"storageProcessor\": {\n                    \"id\": \"spa\"\n                },\n                \"parentStorageProcessor\": {\n                    \"id\": \"spa\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"spa_eth2\",\n                \"speed\": 10000,\n                \"connectorType\": 1,\n                \"requestedSpeed\": 0,\n                \"supportedSpeeds\": [\n                    0\n                ],\n                \"sfpSupportedSpeeds\": [],\n                \"sfpSupportedProtocols\": [],\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_PORT_LINK_UP\"\n                    ],\n                    \"descriptions\": [\n                        \"The port is operating normally.\"\n                    ]\n                },\n                \"name\": \"SP A Ethernet Port 2\",\n                \"portNumber\": 2,\n                \"mtu\": 1500,\n                \"minMtu\": 46,\n                \"maxMtu\": 9000,\n                \"bond\": False,\n                \"isLinkUp\": True,\n                \"macAddress\": \"00:50:56:81:11:EF\",\n                \"isRSSCapable\": False,\n                \"isRDMACapable\": False,\n                \"requestedMtu\": 1500,\n                \"parent\": {\n                    \"id\": \"spa\",\n                    \"resource\": \"storageProcessor\"\n                },\n                \"storageProcessor\": {\n                    \"id\": \"spa\"\n                },\n                \"parentStorageProcessor\": {\n                    \"id\": \"spa\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"spa_eth3\",\n                \"speed\": 10000,\n                \"connectorType\": 1,\n                \"requestedSpeed\": 0,\n                \"supportedSpeeds\": [\n                    0\n                ],\n                \"sfpSupportedSpeeds\": [],\n                \"sfpSupportedProtocols\": [],\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_PORT_LINK_UP\"\n                    ],\n                    \"descriptions\": [\n                        \"The port is operating normally.\"\n                    ]\n                },\n                \"name\": \"SP A Ethernet Port 3\",\n                \"portNumber\": 3,\n                \"mtu\": 1500,\n                \"minMtu\": 46,\n                \"maxMtu\": 9000,\n                \"isLinkUp\": True,\n                \"macAddress\": \"00:50:56:81:DB:5D\",\n                \"requestedMtu\": 1500,\n                \"parent\": {\n                    \"id\": \"spa\",\n                    \"resource\": \"storageProcessor\"\n                },\n                \"storageProcessor\": {\n                    \"id\": \"spa\"\n                },\n                \"parentStorageProcessor\": {\n                    \"id\": \"spa\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"spa_mgmt\",\n                \"speed\": 10000,\n                \"connectorType\": 1,\n                \"requestedSpeed\": 0,\n                \"supportedSpeeds\": [\n                    0\n                ],\n                \"sfpSupportedSpeeds\": [],\n                \"sfpSupportedProtocols\": [],\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_PORT_LINK_UP\"\n                    ],\n                    \"descriptions\": [\n                        \"The port is operating normally.\"\n                    ]\n                },\n                \"name\": \"SP A Management Port\",\n                \"portNumber\": 0,\n                \"mtu\": 1500,\n                \"minMtu\": 0,\n                \"maxMtu\": 0,\n                \"bond\": False,\n                \"isLinkUp\": True,\n                \"macAddress\": \"00:50:56:81:E5:05\",\n                \"isRSSCapable\": False,\n                \"isRDMACapable\": False,\n                \"requestedMtu\": 0,\n                \"parent\": {\n                    \"id\": \"spa\",\n                    \"resource\": \"storageProcessor\"\n                },\n                \"storageProcessor\": {\n                    \"id\": \"spa\"\n                },\n                \"parentStorageProcessor\": {\n                    \"id\": \"spa\"\n                }\n            }\n        }\n    ]\n}\nGET_ALL_IP = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"1\",\n                \"netmask\": \"255.255.255.0\",\n                \"ipAddress\": \"192.168.3.111\",\n                \"ipProtocolVersion\": 4,\n                \"ipPort\": {\n                    \"id\": \"spa_eth1\"\n                }\n            }\n        }\n    ]\n}\nGET_ALL_FCPORTS = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"spa_fc0\",\n                \"currentSpeed\": 10,\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_PORT_LINK_UP\"\n                    ],\n                    \"descriptions\": [\n                        \"The port is operating normally.\"\n                    ]\n                },\n                \"name\": \"SP A FC Port 0\",\n                \"portNumber\": 0,\n                \"isLinkUp\": True,\n                \"macAddress\": \"00:50:56:81:E1:50\",\n                \"wwn\": \"fffffffffff\",\n                \"isRDMACapable\": False,\n                \"requestedMtu\": 1500,\n                \"parent\": {\n                    \"id\": \"spa\",\n                    \"resource\": \"storageProcessor\"\n                },\n                \"storageProcessor\": {\n                    \"id\": \"spa\"\n                },\n                \"parentStorageProcessor\": {\n                    \"id\": \"spa\"\n                }\n            }\n        }\n    ]\n}\nport_result = [\n    {\n        'name': 'SP A Ethernet Port 0',\n        'storage_id': '12345',\n        'native_port_id': 'spa_eth0',\n        'location': 'SP A Ethernet Port 0',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'eth',\n        'logical_type': '',\n        'speed': 10000000000,\n        'max_speed': 10000000000,\n        'native_parent_id': 'spa',\n        'wwn': '',\n        'mac_address': '00:50:56:81:E1:50',\n        'ipv4': None,\n        'ipv4_mask': None,\n        'ipv6': None,\n        'ipv6_mask': None\n    }, {\n        'name': 'SP A Ethernet Port 1',\n        'storage_id': '12345',\n        'native_port_id': 'spa_eth1',\n        'location': 'SP A Ethernet Port 1',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'eth',\n        'logical_type': '',\n        'speed': 10000000000,\n        'max_speed': 10000000000,\n        'native_parent_id': 'spa',\n        'wwn': '',\n        'mac_address': '00:50:56:81:E8:4B',\n        'ipv4': '192.168.3.111',\n        'ipv4_mask': '255.255.255.0',\n        'ipv6': None,\n        'ipv6_mask': None\n    }, {\n        'name': 'SP A Ethernet Port 2',\n        'storage_id': '12345',\n        'native_port_id': 'spa_eth2',\n        'location': 'SP A Ethernet Port 2',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'eth',\n        'logical_type': '',\n        'speed': 10000000000,\n        'max_speed': 10000000000,\n        'native_parent_id': 'spa',\n        'wwn': '',\n        'mac_address': '00:50:56:81:11:EF',\n        'ipv4': None,\n        'ipv4_mask': None,\n        'ipv6': None,\n        'ipv6_mask': None\n    }, {\n        'name': 'SP A Ethernet Port 3',\n        'storage_id': '12345',\n        'native_port_id': 'spa_eth3',\n        'location': 'SP A Ethernet Port 3',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'eth',\n        'logical_type': '',\n        'speed': 10000000000,\n        'max_speed': 10000000000,\n        'native_parent_id': 'spa',\n        'wwn': '',\n        'mac_address': '00:50:56:81:DB:5D',\n        'ipv4': None,\n        'ipv4_mask': None,\n        'ipv6': None,\n        'ipv6_mask': None\n    }, {\n        'name': 'SP A Management Port',\n        'storage_id': '12345',\n        'native_port_id': 'spa_mgmt',\n        'location': 'SP A Management Port',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'eth',\n        'logical_type': '',\n        'speed': 10000000000,\n        'max_speed': 10000000000,\n        'native_parent_id': 'spa',\n        'wwn': '',\n        'mac_address': '00:50:56:81:E5:05',\n        'ipv4': None,\n        'ipv4_mask': None,\n        'ipv6': None,\n        'ipv6_mask': None\n    }, {\n        'name': 'SP A FC Port 0',\n        'storage_id': '12345',\n        'native_port_id': 'spa_fc0',\n        'location': 'SP A FC Port 0',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'fc',\n        'logical_type': '',\n        'speed': 10000000000,\n        'max_speed': 10000000000,\n        'native_parent_id': 'spa',\n        'wwn': 'fffffffffff'\n    }\n]\nGET_ALL_FILESYSTEMS = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"fs_1\",\n                \"type\": 1,\n                \"flrVersion\": 1,\n                \"supportedProtocols\": 2,\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"name\": \"fs1\",\n                \"sizeTotal\": 5368709120,\n                \"sizeUsed\": 1622450176,\n                \"sizeAllocated\": 283148288,\n                \"isThinEnabled\": True,\n                \"storageResource\": {\n                    \"id\": \"res_1\"\n                },\n                \"pool\": {\n                    \"id\": \"pool_1\"\n                },\n                \"nasServer\": {\n                    \"id\": \"nas_1\"\n                },\n                \"cifsShare\": [\n                    {\n                        \"id\": \"SMBShare_2\"\n                    }\n                ],\n                \"nfsShare\": [\n                    {\n                        \"id\": \"NFSShare_2\"\n                    }\n                ]\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"fs_3\",\n                \"type\": 1,\n                \"flrVersion\": 2,\n                \"supportedProtocols\": 2,\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"name\": \"ddd\",\n                \"sizeTotal\": 107374182400,\n                \"sizeUsed\": 1620303872,\n                \"sizeAllocated\": 283140096,\n                \"isThinEnabled\": True,\n                \"storageResource\": {\n                    \"id\": \"res_3\"\n                },\n                \"pool\": {\n                    \"id\": \"pool_1\"\n                },\n                \"nasServer\": {\n                    \"id\": \"nas_1\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"fs_5\",\n                \"type\": 1,\n                \"flrVersion\": 0,\n                \"supportedProtocols\": 2,\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"name\": \"fs_home\",\n                \"sizeTotal\": 10737418240,\n                \"sizeUsed\": 1622458368,\n                \"sizeAllocated\": 283156480,\n                \"isThinEnabled\": True,\n                \"storageResource\": {\n                    \"id\": \"res_5\"\n                },\n                \"pool\": {\n                    \"id\": \"pool_1\"\n                },\n                \"nasServer\": {\n                    \"id\": \"nas_1\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"fs_16\",\n                \"type\": 1,\n                \"flrVersion\": 0,\n                \"supportedProtocols\": 2,\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"name\": \"fs_boga\",\n                \"sizeTotal\": 5368709120,\n                \"sizeUsed\": 1622450176,\n                \"sizeAllocated\": 283148288,\n                \"isThinEnabled\": True,\n                \"storageResource\": {\n                    \"id\": \"res_16\"\n                },\n                \"pool\": {\n                    \"id\": \"pool_1\"\n                },\n                \"nasServer\": {\n                    \"id\": \"nas_1\"\n                },\n                \"cifsShare\": [\n                    {\n                        \"id\": \"SMBShare_14\"\n                    }\n                ],\n                \"nfsShare\": [\n                    {\n                        \"id\": \"NFSShare_14\"\n                    }\n                ]\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"fs_20\",\n                \"type\": 1,\n                \"flrVersion\": 0,\n                \"supportedProtocols\": 2,\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally\"\n                    ]\n                },\n                \"name\": \"fs2\",\n                \"sizeTotal\": 5368709120,\n                \"sizeUsed\": 1622450176,\n                \"sizeAllocated\": 283148288,\n                \"isThinEnabled\": True,\n                \"storageResource\": {\n                    \"id\": \"res_20\"\n                },\n                \"pool\": {\n                    \"id\": \"pool_1\"\n                },\n                \"nasServer\": {\n                    \"id\": \"nas_1\"\n                },\n                \"cifsShare\": [\n                    {\n                        \"id\": \"SMBShare_18\"\n                    }\n                ],\n                \"nfsShare\": [\n                    {\n                        \"id\": \"NFSShare_18\"\n                    }\n                ]\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"fs_22\",\n                \"type\": 1,\n                \"flrVersion\": 0,\n                \"supportedProtocols\": 2,\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"name\": \"FS_MULTI1\",\n                \"sizeTotal\": 107374182400,\n                \"sizeUsed\": 1620303872,\n                \"sizeAllocated\": 283140096,\n                \"isThinEnabled\": True,\n                \"storageResource\": {\n                    \"id\": \"res_22\"\n                },\n                \"pool\": {\n                    \"id\": \"pool_1\"\n                },\n                \"nasServer\": {\n                    \"id\": \"nas_1\"\n                },\n                \"nfsShare\": [\n                    {\n                        \"id\": \"NFSShare_19\"\n                    }\n                ]\n            }\n        }\n    ]\n}\nfilesystem_result = [\n    {\n        'name': 'fs1',\n        'storage_id': '12345',\n        'native_filesystem_id': 'fs_1',\n        'native_pool_id': 'pool_1',\n        'status': 'normal',\n        'type': 'thin',\n        'total_capacity': 5368709120,\n        'used_capacity': 1622450176,\n        'free_capacity': 3746258944,\n        'worm': 'enterprise',\n        'security_mode': 'native'\n    }, {\n        'name': 'ddd',\n        'storage_id': '12345',\n        'native_filesystem_id': 'fs_3',\n        'native_pool_id': 'pool_1',\n        'status': 'normal',\n        'type': 'thin',\n        'total_capacity': 107374182400,\n        'used_capacity': 1620303872,\n        'free_capacity': 105753878528,\n        'worm': 'compliance',\n        'security_mode': 'native'\n    }, {\n        'name': 'fs_home',\n        'storage_id': '12345',\n        'native_filesystem_id': 'fs_5',\n        'native_pool_id': 'pool_1',\n        'status': 'normal',\n        'type': 'thin',\n        'total_capacity': 10737418240,\n        'used_capacity': 1622458368,\n        'free_capacity': 9114959872,\n        'worm': 'non_worm',\n        'security_mode': 'native'\n    }, {\n        'name': 'fs_boga',\n        'storage_id': '12345',\n        'native_filesystem_id': 'fs_16',\n        'native_pool_id': 'pool_1',\n        'status': 'normal',\n        'type': 'thin',\n        'total_capacity': 5368709120,\n        'used_capacity': 1622450176,\n        'free_capacity': 3746258944,\n        'worm': 'non_worm',\n        'security_mode': 'native'\n    }, {\n        'name': 'fs2',\n        'storage_id': '12345',\n        'native_filesystem_id': 'fs_20',\n        'native_pool_id': 'pool_1',\n        'status': 'normal',\n        'type': 'thin',\n        'total_capacity': 5368709120,\n        'used_capacity': 1622450176,\n        'free_capacity': 3746258944,\n        'worm': 'non_worm',\n        'security_mode': 'native'\n    }, {\n        'name': 'FS_MULTI1',\n        'storage_id': '12345',\n        'native_filesystem_id': 'fs_22',\n        'native_pool_id': 'pool_1',\n        'status': 'normal',\n        'type': 'thin',\n        'total_capacity': 107374182400,\n        'used_capacity': 1620303872,\n        'free_capacity': 105753878528,\n        'worm': 'non_worm',\n        'security_mode': 'native'\n    }\n]\nGET_ALL_QTREE = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"qtree_1\",\n                \"hardLimit\": 1000,\n                \"softLimit\": 1110,\n                \"sizeUsed\": 20000000,\n                \"path\": \"/\",\n                \"filesystem\": {\n                    \"id\": \"filesystem_1\"\n                },\n                \"quotaConfig\": {\n                    \"id\": \"quotaConfig_1\"\n                }\n            }\n        }\n    ]\n}\nqtree_result = [\n    {\n        'name': '/',\n        'storage_id': '12345',\n        'native_qtree_id': 'qtree_1',\n        'native_filesystem_id': 'filesystem_1',\n        'path': '/'\n    }\n]\nGET_ALL_CIFSSHARE = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"SMBShare_2\",\n                \"type\": 1,\n                \"name\": \"fs1\",\n                \"path\": \"/\",\n                \"filesystem\": {\n                    \"id\": \"fs_1\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"SMBShare_14\",\n                \"type\": 1,\n                \"name\": \"boga\",\n                \"path\": \"/\",\n                \"filesystem\": {\n                    \"id\": \"fs_16\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"SMBShare_18\",\n                \"type\": 1,\n                \"name\": \"fs2\",\n                \"path\": \"/\",\n                \"filesystem\": {\n                    \"id\": \"fs_20\"\n                }\n            }\n        }\n    ]\n}\nGET_ALL_NFSSHARE = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"NFSShare_2\",\n                \"type\": 1,\n                \"role\": 0,\n                \"name\": \"fs1\",\n                \"path\": \"/\",\n                \"filesystem\": {\n                    \"id\": \"fs_1\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"NFSShare_14\",\n                \"type\": 1,\n                \"role\": 0,\n                \"name\": \"boga\",\n                \"path\": \"/\",\n                \"filesystem\": {\n                    \"id\": \"fs_16\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"NFSShare_18\",\n                \"type\": 1,\n                \"role\": 0,\n                \"name\": \"fs2\",\n                \"path\": \"/\",\n                \"filesystem\": {\n                    \"id\": \"fs_20\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"NFSShare_19\",\n                \"type\": 1,\n                \"role\": 0,\n                \"name\": \"FS_MULTI1\",\n                \"path\": \"/\",\n                \"filesystem\": {\n                    \"id\": \"fs_22\"\n                }\n            }\n        }\n    ]\n}\nshare_result = [\n    {\n        'name': 'fs1',\n        'storage_id': '12345',\n        'native_share_id': 'SMBShare_2',\n        'native_qtree_id': 'qtree_1',\n        'native_filesystem_id': 'fs_1',\n        'path': '/fs1/',\n        'protocol': 'cifs'\n    }, {\n        'name': 'boga',\n        'storage_id': '12345',\n        'native_share_id': 'SMBShare_14',\n        'native_qtree_id': 'qtree_1',\n        'native_filesystem_id': 'fs_16',\n        'path': '/fs_boga/',\n        'protocol': 'cifs'\n    }, {\n        'name': 'fs2',\n        'storage_id': '12345',\n        'native_share_id': 'SMBShare_18',\n        'native_qtree_id': 'qtree_1',\n        'native_filesystem_id': 'fs_20',\n        'path': '/fs2/',\n        'protocol': 'cifs'\n    }, {\n        'name': 'fs1',\n        'storage_id': '12345',\n        'native_share_id': 'NFSShare_2',\n        'native_qtree_id': 'qtree_1',\n        'native_filesystem_id': 'fs_1',\n        'path': '/fs1/',\n        'protocol': 'nfs'\n    }, {\n        'name': 'boga',\n        'storage_id': '12345',\n        'native_share_id': 'NFSShare_14',\n        'native_qtree_id': 'qtree_1',\n        'native_filesystem_id': 'fs_16',\n        'path': '/fs_boga/',\n        'protocol': 'nfs'\n    }, {\n        'name': 'fs2',\n        'storage_id': '12345',\n        'native_share_id': 'NFSShare_18',\n        'native_qtree_id': 'qtree_1',\n        'native_filesystem_id': 'fs_20',\n        'path': '/fs2/',\n        'protocol': 'nfs'\n    }, {\n        'name': 'FS_MULTI1',\n        'storage_id': '12345',\n        'native_share_id': 'NFSShare_19',\n        'native_qtree_id': 'qtree_1',\n        'native_filesystem_id': 'fs_22',\n        'path': '/FS_MULTI1/',\n        'protocol': 'nfs'\n    }\n]\nGET_ALL_QUOTACONFIG = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"quotaConfig_1\",\n                \"isAccessDenyEnabled\": True,\n                \"quotaPolicy\": 0,\n                \"isUserQuotaEnabled\": True,\n                \"filesystem\": {\n                    \"id\": \"filesystem_1\"\n                },\n                \"treeQuota\": {\n                    \"id\": \"qtree_1\"\n                }\n            }\n        }\n    ]\n}\nGET_ALL_USERQUOTA = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"user_1\",\n                \"hardLimit\": 1000,\n                \"softLimit\": 1110,\n                \"sizeUsed\": 20000000,\n                \"path\": \"/\",\n                \"uid\": 1111,\n                \"filesystem\": {\n                    \"id\": \"filesystem_1\"\n                },\n                \"treeQuota\": {\n                    \"id\": \"qtree_1\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"user_2\",\n                \"hardLimit\": 1000,\n                \"softLimit\": 1110,\n                \"sizeUsed\": 20000000,\n                \"path\": \"/\",\n                \"uid\": 22222,\n                \"filesystem\": {\n                    \"id\": \"filesystem_1\"\n                }\n            }\n        }\n    ]\n}\nquota_result = [\n    {\n        'native_quota_id': 'qtree_1',\n        'type': 'tree',\n        'storage_id': '12345',\n        'native_filesystem_id': 'filesystem_1',\n        'native_qtree_id': 'qtree_1',\n        'capacity_hard_limit': 1000,\n        'capacity_soft_limit': 1110,\n        'used_capacity': 20000000\n    }, {\n        'native_quota_id': 'user_1',\n        'type': 'user',\n        'storage_id': '12345',\n        'native_filesystem_id': 'filesystem_1',\n        'native_qtree_id': 'qtree_1',\n        'capacity_hard_limit': 1000,\n        'capacity_soft_limit': 1110,\n        'used_capacity': 20000000,\n        'user_group_name': '1111'\n    }, {\n        'native_quota_id': 'user_2',\n        'type': 'user',\n        'storage_id': '12345',\n        'native_filesystem_id': 'filesystem_1',\n        'native_qtree_id': None,\n        'capacity_hard_limit': 1000,\n        'capacity_soft_limit': 1110,\n        'used_capacity': 20000000,\n        'user_group_name': '22222'\n    }\n]\nGET_ETH_PORT_READ_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_eth0\": \"10000\",\n                        \"spa_eth1\": \"20000\",\n                        \"spa_eth2\": \"30000\",\n                        \"spa_eth3\": \"40000\"\n                    },\n                    \"spb\": {\n                        \"spa_eth0\": \"10000\",\n                        \"spa_eth1\": \"20000\",\n                        \"spa_eth2\": \"30000\",\n                        \"spa_eth3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_eth0\": \"40000\",\n                        \"spa_eth1\": \"30000\",\n                        \"spa_eth2\": \"20000\",\n                        \"spa_eth3\": \"10000\"\n                    },\n                    \"spb\": {\n                        \"spa_eth0\": \"40000\",\n                        \"spa_eth1\": \"30000\",\n                        \"spa_eth2\": \"20000\",\n                        \"spa_eth3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_ETH_PORT_READ_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_ETH_PORT_WRITE_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_eth0\": \"90000\",\n                        \"spa_eth1\": \"80000\",\n                        \"spa_eth2\": \"70000\",\n                        \"spa_eth3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_eth0\": \"60000\",\n                        \"spa_eth1\": \"70000\",\n                        \"spa_eth2\": \"80000\",\n                        \"spa_eth3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_ETH_PORT_WRITE_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_FC_PORT_READ_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_fc0\": \"10000\",\n                        \"spa_fc1\": \"20000\",\n                        \"spa_fc2\": \"30000\",\n                        \"spa_fc3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_fc0\": \"40000\",\n                        \"spa_fc1\": \"30000\",\n                        \"spa_fc2\": \"20000\",\n                        \"spa_fc3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FC_PORT_READ_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_FC_PORT_WRITE_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_fc0\": \"90000\",\n                        \"spa_fc1\": \"80000\",\n                        \"spa_fc2\": \"70000\",\n                        \"spa_fc3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_fc0\": \"60000\",\n                        \"spa_fc1\": \"70000\",\n                        \"spa_fc2\": \"80000\",\n                        \"spa_fc3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FC_PORT_WRITE_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_FC_PORT_READ_IOPS_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_fc0\": \"10000\",\n                        \"spa_fc1\": \"20000\",\n                        \"spa_fc2\": \"30000\",\n                        \"spa_fc3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_fc0\": \"40000\",\n                        \"spa_fc1\": \"30000\",\n                        \"spa_fc2\": \"20000\",\n                        \"spa_fc3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FC_PORT_READ_IOPS_PERF_NULL = {\n    \"entries\": []\n}\nGET_FC_PORT_WRITE_IOPS_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"spa_fc0\": \"90000\",\n                        \"spa_fc1\": \"80000\",\n                        \"spa_fc2\": \"70000\",\n                        \"spa_fc3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"spa_fc0\": \"60000\",\n                        \"spa_fc1\": \"70000\",\n                        \"spa_fc2\": \"80000\",\n                        \"spa_fc3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FC_PORT_WRITE_IOPS_PERF_NULL = {\n    \"entries\": []\n}\nGET_VOLUME_READ_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"volume0\": \"10000\",\n                        \"volume1\": \"20000\",\n                        \"volume2\": \"30000\",\n                        \"volume3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"volume0\": \"40000\",\n                        \"volume1\": \"30000\",\n                        \"volume2\": \"20000\",\n                        \"volume3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_VOLUME_READ_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_VOLUME_WRITE_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"volume0\": \"90000\",\n                        \"volume1\": \"80000\",\n                        \"volume2\": \"70000\",\n                        \"volume3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"volume0\": \"60000\",\n                        \"volume1\": \"70000\",\n                        \"volume2\": \"80000\",\n                        \"volume3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_VOLUME_WRITE_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_VOLUME_READ_IOPS_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"volume0\": \"10000\",\n                        \"volume1\": \"20000\",\n                        \"volume2\": \"30000\",\n                        \"volume3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"volume0\": \"40000\",\n                        \"volume1\": \"30000\",\n                        \"volume2\": \"20000\",\n                        \"volume3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_VOLUME_READ_IOPS_PERF_NULL = {\n    \"entries\": []\n}\nGET_VOLUME_WRITE_IOPS_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"volume0\": \"90000\",\n                        \"volume1\": \"80000\",\n                        \"volume2\": \"70000\",\n                        \"volume3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"volume0\": \"60000\",\n                        \"volume1\": \"70000\",\n                        \"volume2\": \"80000\",\n                        \"volume3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_VOLUME_WRITE_IOPS_PERF_NULL = {\n    \"entries\": []\n}\nGET_VOLUME_READ_IO_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"volume0\": \"10000\",\n                        \"volume1\": \"20000\",\n                        \"volume2\": \"30000\",\n                        \"volume3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"volume0\": \"40000\",\n                        \"volume1\": \"30000\",\n                        \"volume2\": \"20000\",\n                        \"volume3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_VOLUME_READ_IO_PERF_NULL = {\n    \"entries\": []\n}\nGET_VOLUME_WRITE_IO_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"volume0\": \"90000\",\n                        \"volume1\": \"80000\",\n                        \"volume2\": \"70000\",\n                        \"volume3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"volume0\": \"60000\",\n                        \"volume1\": \"70000\",\n                        \"volume2\": \"80000\",\n                        \"volume3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_VOLUME_WRITE_IO_PERF_NULL = {\n    \"entries\": []\n}\nGET_VOLUME_RESPONSE_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"volume0\": \"90000\",\n                        \"volume1\": \"80000\",\n                        \"volume2\": \"70000\",\n                        \"volume3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"volume0\": \"60000\",\n                        \"volume1\": \"70000\",\n                        \"volume2\": \"80000\",\n                        \"volume3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_VOLUME_RESPONSE_PERF_NULL = {\n    \"entries\": []\n}\nGET_DISK_READ_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"DISK0\": \"10000\",\n                        \"DISK1\": \"20000\",\n                        \"DISK2\": \"30000\",\n                        \"DISK3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"DISK0\": \"40000\",\n                        \"DISK1\": \"30000\",\n                        \"DISK2\": \"20000\",\n                        \"DISK3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_DISK_READ_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_DISK_WRITE_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"DISK0\": \"90000\",\n                        \"DISK1\": \"80000\",\n                        \"DISK2\": \"70000\",\n                        \"DISK3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"DISK0\": \"60000\",\n                        \"DISK1\": \"70000\",\n                        \"DISK2\": \"80000\",\n                        \"DISK3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_DISK_WRITE_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_DISK_READ_IOPS_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"DISK0\": \"10000\",\n                        \"DISK1\": \"20000\",\n                        \"DISK2\": \"30000\",\n                        \"DISK3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"DISK0\": \"40000\",\n                        \"DISK1\": \"30000\",\n                        \"DISK2\": \"20000\",\n                        \"DISK3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_DISK_READ_IOPS_PERF_NULL = {\n    \"entries\": []\n}\nGET_DISK_WRITE_IOPS_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"DISK0\": \"90000\",\n                        \"DISK1\": \"80000\",\n                        \"DISK2\": \"70000\",\n                        \"DISK3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"DISK0\": \"60000\",\n                        \"DISK1\": \"70000\",\n                        \"DISK2\": \"80000\",\n                        \"DISK3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_DISK_WRITE_IOPS_PERF_NULL = {\n    \"entries\": []\n}\nGET_DISK_RESPONSE_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"DISK0\": \"90000\",\n                        \"DISK1\": \"80000\",\n                        \"DISK2\": \"70000\",\n                        \"DISK3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"DISK0\": \"60000\",\n                        \"DISK1\": \"70000\",\n                        \"DISK2\": \"80000\",\n                        \"DISK3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_DISK_RESPONSE_PERF_NULL = {\n    \"entries\": []\n}\nGET_FILE_READ_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"FILE0\": \"10000\",\n                        \"FILE1\": \"20000\",\n                        \"FILE2\": \"30000\",\n                        \"FILE3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"FILE0\": \"40000\",\n                        \"FILE1\": \"30000\",\n                        \"FILE2\": \"20000\",\n                        \"FILE3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FILE_READ_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_FILE_WRITE_THR_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"FILE0\": \"90000\",\n                        \"FILE1\": \"80000\",\n                        \"FILE2\": \"70000\",\n                        \"FILE3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"FILE0\": \"60000\",\n                        \"FILE1\": \"70000\",\n                        \"FILE2\": \"80000\",\n                        \"FILE3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FILE_WRITE_THR_PERF_NULL = {\n    \"entries\": []\n}\nGET_FILE_READ_IOPS_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"FILE0\": \"10000\",\n                        \"FILE1\": \"20000\",\n                        \"FILE2\": \"30000\",\n                        \"FILE3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"FILE0\": \"40000\",\n                        \"FILE1\": \"30000\",\n                        \"FILE2\": \"20000\",\n                        \"FILE3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FILE_READ_IOPS_PERF_NULL = {\n    \"entries\": []\n}\nGET_FILE_WRITE_IOPS_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"FILE0\": \"90000\",\n                        \"FILE1\": \"80000\",\n                        \"FILE2\": \"70000\",\n                        \"FILE3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"FILE0\": \"60000\",\n                        \"FILE1\": \"70000\",\n                        \"FILE2\": \"80000\",\n                        \"FILE3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FILE_WRITE_IOPS_PERF_NULL = {\n    \"entries\": []\n}\nGET_FILE_READ_IO_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"FILE0\": \"10000\",\n                        \"FILE1\": \"20000\",\n                        \"FILE2\": \"30000\",\n                        \"FILE3\": \"40000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"FILE0\": \"40000\",\n                        \"FILE1\": \"30000\",\n                        \"FILE2\": \"20000\",\n                        \"FILE3\": \"10000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FILE_READ_IO_PERF_NULL = {\n    \"entries\": []\n}\nGET_FILE_WRITE_IO_PERF = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:47:10.000Z\",\n                \"values\": {\n                    \"spa\": {\n                        \"FILE0\": \"90000\",\n                        \"FILE1\": \"80000\",\n                        \"FILE2\": \"70000\",\n                        \"FILE3\": \"60000\"\n                    }\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"queryId\": 46,\n                \"path\": \"sp.*.net.device.*.bytesOut\",\n                \"timestamp\": \"2021-07-08T06:46:10.000Z\",\n                \"values\": {\n                    \"spb\": {\n                        \"FILE0\": \"60000\",\n                        \"FILE1\": \"70000\",\n                        \"FILE2\": \"80000\",\n                        \"FILE3\": \"90000\"\n                    }\n                }\n            }\n        }\n    ]\n}\nGET_FILE_WRITE_IO_PERF_NULL = {\n    \"entries\": []\n}\nresource_metrics = {\n    'volume': [\n        'iops', 'readIops', 'writeIops',\n        'throughput', 'readThroughput', 'writeThroughput',\n        'responseTime',\n        'ioSize', 'readIoSize', 'writeIoSize',\n    ],\n    'port': [\n        'iops', 'readIops', 'writeIops',\n        'throughput', 'readThroughput', 'writeThroughput'\n    ],\n    'disk': [\n        'iops', 'readIops', 'writeIops',\n        'throughput', 'readThroughput', 'writeThroughput',\n        'responseTime'\n    ],\n    'filesystem': [\n        'iops', 'readIops', 'writeIops',\n        'throughput', 'readThroughput', 'writeThroughput',\n        'readIoSize', 'writeIoSize'\n    ]\n}\nGET_ALL_INIT = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"init1\",\n                \"type\": 1,\n                \"initiatorId\": \"fs1\",\n                \"path\": \"/\",\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"parentHost\": {\n                    \"id\": \"fs_1\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"init14\",\n                \"type\": 1,\n                \"initiatorId\": \"boga\",\n                \"path\": \"/\",\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"parentHost\": {\n                    \"id\": \"fs_16\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"init11\",\n                \"type\": 2,\n                \"initiatorId\": \"fs2\",\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"parentHost\": {\n                    \"id\": \"host_20\"\n                }\n            }\n        }\n    ]\n}\nGET_ALL_INIT_NULL = {\n    \"entries\": [\n    ]\n}\nINIT_RESULT = [\n    {\n        'name': 'fs1',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': 'init1',\n        'wwn': 'fs1',\n        'status': 'online',\n        'type': 'fc',\n        'native_storage_host_id': 'fs_1'\n    }, {\n        'name': 'boga',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': 'init14',\n        'wwn': 'boga',\n        'status': 'online',\n        'type': 'fc',\n        'native_storage_host_id': 'fs_16'\n    }, {\n        'name': 'fs2',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': 'init11',\n        'wwn': 'fs2',\n        'status': 'online',\n        'type': 'iscsi',\n        'native_storage_host_id': 'host_20'\n    }\n]\nGET_ALL_HOST = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"host1\",\n                \"type\": 1,\n                \"name\": \"fs1\",\n                \"description\": \"test\",\n                \"osType\": \"AIX\",\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"parentHost\": {\n                    \"id\": \"fs_1\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"host2\",\n                \"type\": 1,\n                \"name\": \"boga\",\n                \"description\": \"test\",\n                \"osType\": \"Citrix XenServer\",\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"parentHost\": {\n                    \"id\": \"fs_16\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"host3\",\n                \"type\": 2,\n                \"name\": \"fs2\",\n                \"description\": \"test\",\n                \"osType\": \"VMware ESXi 6.5\",\n                \"health\": {\n                    \"value\": 5,\n                    \"descriptionIds\": [\n                        \"ALRT_COMPONENT_OK\"\n                    ],\n                    \"descriptions\": [\n                        \"The component is operating normally.\"\n                    ]\n                },\n                \"parentHost\": {\n                    \"id\": \"host_20\"\n                }\n            }\n        }\n    ]\n}\nGET_ALL_HOST_NULL = {\n    \"entries\": [\n    ]\n}\nGET_HOST_IP = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"ip1\",\n                \"address\": \"1.1.1.1\",\n                \"host\": {\n                    \"id\": \"host1\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"ip1\",\n                \"address\": \"1.1.1.2\",\n                \"host\": {\n                    \"id\": \"host2\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"ip1\",\n                \"address\": \"1.1.1.1\",\n                \"host\": {\n                    \"id\": \"host3\"\n                }\n            }\n        }\n    ]\n}\nHOST_RESULT = [\n    {\n        'name': 'fs1',\n        'description': 'test',\n        'storage_id': '12345',\n        'native_storage_host_id': 'host1',\n        'os_type': 'AIX',\n        'status': 'normal',\n        'ip_address': '1.1.1.1'\n    }, {\n        'name': 'boga',\n        'description': 'test',\n        'storage_id': '12345',\n        'native_storage_host_id': 'host2',\n        'os_type': 'XenServer',\n        'status': 'normal',\n        'ip_address': '1.1.1.2'\n    }, {\n        'name': 'fs2',\n        'description': 'test',\n        'storage_id': '12345',\n        'native_storage_host_id': 'host3',\n        'os_type': 'VMware ESX',\n        'status': 'normal',\n        'ip_address': '1.1.1.1'\n    }\n]\nGET_HOST_LUN = {\n    \"entries\": [\n        {\n            \"content\": {\n                \"id\": \"1\",\n                \"lun\": {\n                    \"id\": \"lun1\"\n                },\n                \"host\": {\n                    \"id\": \"host1\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"2\",\n                \"lun\": {\n                    \"id\": \"lun2\"\n                },\n                \"host\": {\n                    \"id\": \"host2\"\n                }\n            }\n        },\n        {\n            \"content\": {\n                \"id\": \"3\",\n                \"lun\": {\n                    \"id\": \"lun3\"\n                },\n                \"host\": {\n                    \"id\": \"host3\"\n                }\n            }\n        }\n    ]\n}\nGET_HOST_LUN_NULL = {\n    \"entries\": [\n    ]\n}\nVIEW_RESULT = [\n    {\n        'name': '1',\n        'native_storage_host_id': 'host1',\n        'storage_id': '12345',\n        'native_volume_id': 'lun1',\n        'native_masking_view_id': '1'\n    }, {\n        'name': '2',\n        'native_storage_host_id': 'host2',\n        'storage_id': '12345',\n        'native_volume_id': 'lun2',\n        'native_masking_view_id': '2'\n    }, {\n        'name': '3',\n        'native_storage_host_id': 'host3',\n        'storage_id': '12345',\n        'native_volume_id': 'lun3',\n        'native_masking_view_id': '3'\n    }\n]\n\n\nclass TestUNITYStorDriver(TestCase):\n\n    @mock.patch.object(RestHandler, 'get_all_pools')\n    def test_list_storage_pools(self, mock_pool):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_pool.return_value = GET_ALL_POOLS\n        pool = UnityStorDriver(**ACCESS_INFO).list_storage_pools(context)\n        self.assertDictEqual(pool[0], pool_result[0])\n        mock_pool.return_value = GET_ALL_ABNORMAL_POOLS\n        pool = UnityStorDriver(**ACCESS_INFO).list_storage_pools(context)\n        self.assertDictEqual(pool[0], pool_abnormal_result[0])\n\n    @mock.patch.object(RestHandler, 'get_all_disks')\n    @mock.patch.object(RestHandler, 'get_storage')\n    @mock.patch.object(RestHandler, 'get_capacity')\n    @mock.patch.object(RestHandler, 'get_soft_version')\n    def test_get_storage(self, mock_version, mock_capa, mock_base, mock_disk):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_version.return_value = GET_SOFT_VERSION\n        mock_capa.return_value = GET_CAPACITY\n        mock_base.return_value = GET_STORAGE_ABNORMAL\n        mock_disk.return_value = GET_ALL_DISKS\n        storage = UnityStorDriver(**ACCESS_INFO).get_storage(context)\n        self.assertDictEqual(storage, storage_abnormal_result)\n        mock_base.return_value = GET_STORAGE_NORMAL\n        storage = UnityStorDriver(**ACCESS_INFO).get_storage(context)\n        self.assertDictEqual(storage, storage_normal_result)\n\n    @mock.patch.object(RestHandler, 'get_all_luns')\n    def test_list_volumes(self, mock_lun):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_lun.side_effect = [GET_ALL_LUNS, GET_ALL_LUNS_NULL]\n        volume = UnityStorDriver(**ACCESS_INFO).list_volumes(context)\n        self.assertDictEqual(volume[0], volume_result[0])\n\n    def test_parse_alert(self):\n        RestHandler.login = mock.Mock(return_value=None)\n        trap = UnityStorDriver(**ACCESS_INFO).parse_alert(context, TRAP_INFO)\n        trap['occur_time'] = int(1605852610000)\n        self.assertEqual(trap, trap_result)\n        trap = UnityStorDriver(**ACCESS_INFO).parse_alert(\n            context, TRAP_NOT_IN_MAPPPING_INFO)\n        trap['occur_time'] = int(1605852610000)\n        self.assertEqual(trap, trap_not_in_mapping_result)\n\n    @mock.patch.object(RestHandler, 'remove_alert')\n    def test_clear_alert(self, mock_remove):\n        RestHandler.login = mock.Mock(return_value=None)\n        alert_id = 101\n        UnityStorDriver(**ACCESS_INFO).clear_alert(context, alert_id)\n        self.assertEqual(mock_remove.call_count, 1)\n\n    @mock.patch.object(RestHandler, 'get_all_alerts')\n    def test_list_alerts(self, mock_alert):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_alert.side_effect = [GET_ALL_ALERTS, GET_ALL_ALERTS_NULL]\n        alert = UnityStorDriver(**ACCESS_INFO).list_alerts(context)\n        alert_result[0]['occur_time'] = alert[0]['occur_time']\n        self.assertEqual(alert[0], alert_result[0])\n\n    @mock.patch.object(RestHandler, 'call_with_token')\n    def test_call_and_login(self, mock_token):\n        with self.assertRaises(Exception) as exc:\n            mock_token.return_value = mock.MagicMock(status_code=401,\n                                                     text='Unauthorized')\n            UnityStorDriver(**ACCESS_INFO).rest_handler.login()\n        self.assertEqual('Invalid username or password.', str(exc.exception))\n        with self.assertRaises(Exception) as exc:\n            mock_token.return_value = mock.MagicMock(status_code=401,\n                                                     text='Forbidden')\n            UnityStorDriver(**ACCESS_INFO).rest_handler.login()\n        self.assertEqual('Invalid ip or port.', str(exc.exception))\n        with self.assertRaises(Exception) as exc:\n            mock_token.return_value = mock.MagicMock(status_code=503)\n            UnityStorDriver(**ACCESS_INFO).rest_handler.call('')\n        self.assertIn('Exception from Storage Backend', str(exc.exception))\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_token.return_value = mock.MagicMock(status_code=401)\n        UnityStorDriver(**ACCESS_INFO).rest_handler.call('')\n\n    @mock.patch.object(RestHandler, 'call')\n    def test_get_rest_info(self, mock_rest):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_rest.return_value = mock.MagicMock(status_code=200)\n        UnityStorDriver(**ACCESS_INFO).rest_handler.get_rest_info('')\n        self.assertEqual(mock_rest.call_count, 1)\n\n    @mock.patch.object(RestHandler, 'get_all_controllers')\n    def test_list_controllers(self, mock_controller):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_controller.return_value = GET_ALL_CONTROLLERS\n        controller = UnityStorDriver(**ACCESS_INFO).list_controllers(context)\n        self.assertEqual(controller, controller_result)\n\n    @mock.patch.object(RestHandler, 'get_all_disks')\n    def test_list_disks(self, mock_disk):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_disk.return_value = GET_ALL_DISKS\n        disk = UnityStorDriver(**ACCESS_INFO).list_disks(context)\n        self.assertEqual(disk, disk_result)\n\n    @mock.patch.object(RestHandler, 'get_all_ethports')\n    @mock.patch.object(RestHandler, 'get_port_interface')\n    @mock.patch.object(RestHandler, 'get_all_fcports')\n    def test_list_ports(self, mock_fc, mock_ip, mock_eth):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_eth.return_value = GET_ALL_ETHPORTS\n        mock_ip.return_value = GET_ALL_IP\n        mock_fc.return_value = GET_ALL_FCPORTS\n        port = UnityStorDriver(**ACCESS_INFO).list_ports(context)\n        self.assertEqual(port, port_result)\n\n    @mock.patch.object(RestHandler, 'get_all_filesystems')\n    def test_list_filesystems(self, mock_filesystem):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_filesystem.return_value = GET_ALL_FILESYSTEMS\n        file = UnityStorDriver(**ACCESS_INFO).list_filesystems(context)\n        self.assertEqual(file, filesystem_result)\n\n    @mock.patch.object(RestHandler, 'get_all_qtrees')\n    def test_list_qtrees(self, mock_qtree):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_qtree.return_value = GET_ALL_QTREE\n        qtree = UnityStorDriver(**ACCESS_INFO).list_qtrees(context)\n        self.assertEqual(qtree, qtree_result)\n\n    @mock.patch.object(RestHandler, 'get_all_nfsshares')\n    @mock.patch.object(RestHandler, 'get_all_cifsshares')\n    @mock.patch.object(RestHandler, 'get_all_qtrees')\n    @mock.patch.object(RestHandler, 'get_all_filesystems')\n    def test_list_shares(self, mock_file, mock_qtree, mock_cifs, mock_nfs):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_cifs.return_value = GET_ALL_CIFSSHARE\n        mock_qtree.return_value = GET_ALL_QTREE\n        mock_nfs.return_value = GET_ALL_NFSSHARE\n        mock_file.return_value = GET_ALL_FILESYSTEMS\n        share = UnityStorDriver(**ACCESS_INFO).list_shares(context)\n        self.assertEqual(share, share_result)\n\n    @mock.patch.object(RestHandler, 'get_all_qtrees')\n    @mock.patch.object(RestHandler, 'get_all_userquotas')\n    def test_list_quotas(self, mock_user, mock_qtree):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_user.return_value = GET_ALL_USERQUOTA\n        mock_qtree.return_value = GET_ALL_QTREE\n        quota = UnityStorDriver(**ACCESS_INFO).list_quotas(context)\n        self.assertEqual(quota, quota_result)\n\n    @mock.patch.object(RestHandler, 'get_history_metrics')\n    def test_collect_perf_metrics(self, mock_history):\n        RestHandler.login = mock.Mock(return_value=None)\n        start_time = 1625726770000\n        end_time = 1625726830000\n        storage_id = '12345'\n        mock_history.side_effect = [GET_VOLUME_READ_THR_PERF,\n                                    GET_VOLUME_READ_THR_PERF_NULL,\n                                    GET_VOLUME_WRITE_THR_PERF,\n                                    GET_VOLUME_WRITE_THR_PERF_NULL,\n                                    GET_VOLUME_READ_IOPS_PERF,\n                                    GET_VOLUME_READ_IOPS_PERF_NULL,\n                                    GET_VOLUME_WRITE_IOPS_PERF,\n                                    GET_VOLUME_WRITE_IOPS_PERF_NULL,\n                                    GET_VOLUME_READ_IO_PERF,\n                                    GET_VOLUME_READ_IO_PERF_NULL,\n                                    GET_VOLUME_WRITE_IO_PERF,\n                                    GET_VOLUME_WRITE_IO_PERF_NULL,\n                                    GET_VOLUME_RESPONSE_PERF,\n                                    GET_VOLUME_RESPONSE_PERF_NULL,\n                                    GET_DISK_READ_THR_PERF,\n                                    GET_DISK_READ_THR_PERF_NULL,\n                                    GET_DISK_WRITE_THR_PERF,\n                                    GET_DISK_WRITE_THR_PERF_NULL,\n                                    GET_DISK_READ_IOPS_PERF,\n                                    GET_DISK_READ_IOPS_PERF_NULL,\n                                    GET_DISK_WRITE_IOPS_PERF,\n                                    GET_DISK_WRITE_IOPS_PERF_NULL,\n                                    GET_DISK_RESPONSE_PERF,\n                                    GET_DISK_RESPONSE_PERF_NULL,\n                                    GET_ETH_PORT_READ_THR_PERF,\n                                    GET_ETH_PORT_READ_THR_PERF_NULL,\n                                    GET_ETH_PORT_WRITE_THR_PERF,\n                                    GET_ETH_PORT_WRITE_THR_PERF_NULL,\n                                    GET_FC_PORT_READ_THR_PERF,\n                                    GET_ETH_PORT_READ_THR_PERF,\n                                    GET_ETH_PORT_READ_THR_PERF_NULL,\n                                    GET_ETH_PORT_WRITE_THR_PERF,\n                                    GET_ETH_PORT_WRITE_THR_PERF_NULL,\n                                    GET_FC_PORT_READ_THR_PERF,\n                                    GET_FC_PORT_READ_THR_PERF_NULL,\n                                    GET_FC_PORT_WRITE_THR_PERF,\n                                    GET_FC_PORT_WRITE_THR_PERF_NULL,\n                                    GET_FC_PORT_READ_IOPS_PERF,\n                                    GET_FC_PORT_READ_IOPS_PERF_NULL,\n                                    GET_FC_PORT_WRITE_IOPS_PERF,\n                                    GET_FC_PORT_WRITE_IOPS_PERF_NULL,\n                                    GET_FILE_READ_THR_PERF,\n                                    GET_FILE_READ_THR_PERF_NULL,\n                                    GET_FILE_WRITE_THR_PERF,\n                                    GET_FILE_WRITE_THR_PERF_NULL,\n                                    GET_FILE_READ_IOPS_PERF,\n                                    GET_FILE_READ_IOPS_PERF_NULL,\n                                    GET_FILE_WRITE_IOPS_PERF,\n                                    GET_FILE_WRITE_IOPS_PERF_NULL,\n                                    GET_FILE_READ_IO_PERF,\n                                    GET_FILE_READ_IO_PERF_NULL,\n                                    GET_FILE_WRITE_IO_PERF,\n                                    GET_FILE_WRITE_IO_PERF_NULL]\n        metrics = UnityStorDriver(**ACCESS_INFO).collect_perf_metrics(\n            context, storage_id, resource_metrics, start_time, end_time)\n        self.assertEqual(metrics[0][1]['resource_id'], 'volume0')\n\n    @mock.patch.object(RestHandler, 'get_history_metrics')\n    def test_latest_perf_timestamp(self, mock_history):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_history.return_value = GET_VOLUME_READ_THR_PERF\n        last_time = UnityStorDriver(**ACCESS_INFO).get_latest_perf_timestamp(\n            context)\n        self.assertEqual(last_time, 1625726830000)\n\n    @mock.patch.object(RestHandler, 'get_host_initiators')\n    def test_host_initiators(self, mock_init):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_init.side_effect = [GET_ALL_INIT, GET_ALL_INIT_NULL]\n        initiators = UnityStorDriver(\n            **ACCESS_INFO).list_storage_host_initiators(context)\n        self.assertEqual(initiators, INIT_RESULT)\n\n    @mock.patch.object(RestHandler, 'get_all_hosts')\n    @mock.patch.object(RestHandler, 'get_host_ip')\n    def test_hosts(self, mock_ip, mock_host):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_host.side_effect = [GET_ALL_HOST, GET_ALL_HOST_NULL]\n        mock_ip.return_value = GET_HOST_IP\n        hosts = UnityStorDriver(**ACCESS_INFO).list_storage_hosts(context)\n        self.assertEqual(hosts, HOST_RESULT)\n\n    @mock.patch.object(RestHandler, 'get_host_lun')\n    def test_masking_views(self, mock_view):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_view.side_effect = [GET_HOST_LUN, GET_HOST_LUN_NULL]\n        views = UnityStorDriver(**ACCESS_INFO).list_masking_views(context)\n        self.assertEqual(views, VIEW_RESULT)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/vmax/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/vmax/test_alert_handler.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom oslo_utils import importutils\n\nfrom delfin import exception\nfrom delfin.common import constants\n\n\nclass AlertHandlerTestCase(unittest.TestCase):\n    ALERT_HANDLER_CLASS = 'delfin.drivers.dell_emc.vmax.alert_handler' \\\n                          '.snmp_alerts.AlertHandler'\n\n    def _get_alert_handler(self):\n        alert_handler_class = importutils.import_class(\n            self.ALERT_HANDLER_CLASS)\n        alert_handler = alert_handler_class()\n        return alert_handler\n\n    def _get_fake_alert_info(self):\n        alert_info = {\n            '1.3.6.1.3.94.1.11.1.3.0': 79,\n            '1.3.6.1.3.94.1.6.1.20.0': '000192601409',\n            '1.3.6.1.3.94.1.11.1.7.0': 'topology',\n            '1.3.6.1.3.94.1.11.1.9.0': 'Symmetrix 000192601409 FastSRP '\n                                       'SRP_1 : Remote (SRDF) diagnostic '\n                                       'event trace triggered.',\n            '1.3.6.1.3.94.1.11.1.6.0': '6',\n            '1.3.6.1.3.94.1.6.1.3.0': 'storage-subsystem',\n            '1.3.6.1.4.1.1139.3.8888.1.0.0': 'symmetrix',\n            '1.3.6.1.4.1.1139.3.8888.2.0.0': '1050',\n            '1.3.6.1.4.1.1139.3.8888.3.0.0': '1051',\n            '1.3.6.1.4.1.1139.3.8888.4.0.0': 'SRP_1'}\n\n        return alert_info\n\n    def test_parse_alert_with_all_necessary_info(self):\n        \"\"\" Success flow with all necessary parameters\"\"\"\n        alert_handler_inst = self._get_alert_handler()\n        alert = self._get_fake_alert_info()\n\n        expected_alert_model = {\n            'alert_id': alert['1.3.6.1.4.1.1139.3.8888.2.0.0'],\n            'alert_name': 'SYMAPI_AEVENT2_UID_MOD_DIAG_TRACE_TRIG',\n            'severity': constants.Severity.WARNING,\n            'category': constants.Category.NOT_SPECIFIED,\n            'type': constants.EventType.EQUIPMENT_ALARM,\n            'sequence_number': alert['1.3.6.1.3.94.1.11.1.3.0'],\n            'serial_number': '000192601409',\n            'description': alert['1.3.6.1.3.94.1.11.1.9.0'],\n            'recovery_advice': 'None',\n            'resource_type': alert['1.3.6.1.3.94.1.6.1.3.0'],\n            'location': 'Array id=000192601409,'\n                        'Component type=Symmetrix Disk '\n                        'Group,'\n                        'Component name=SRP_1,'\n                        'Event source=symmetrix',\n        }\n        context = {}\n        alert_model = alert_handler_inst.parse_alert(context, alert)\n\n        # occur_time depends on current time\n        # Verify that all other fields are matching\n        expected_alert_model['occur_time'] = alert_model['occur_time']\n        self.assertDictEqual(expected_alert_model, alert_model)\n\n    def test_parse_alert_without_mandatory_info(self):\n        \"\"\" Error flow with some mandatory parameters missing\"\"\"\n        alert_handler_inst = self._get_alert_handler()\n        context = {}\n        alert = self._get_fake_alert_info()\n        alert['1.3.6.1.3.94.1.11.1.6.0'] = ''\n        self.assertRaisesRegex(exception.InvalidInput, \"Mandatory information \"\n                                                       \"connUnitEventSeverity\"\n                                                       \" missing\",\n                               alert_handler_inst.parse_alert, context, alert)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom unittest import TestCase, mock\n\nfrom requests.sessions import Session\n\nfrom delfin import context as ctxt\nfrom delfin import exception\nfrom delfin.common import constants, config  # noqa\nfrom delfin.drivers.dell_emc.vmax.rest import VMaxRest\nfrom delfin.drivers.dell_emc.vmax.vmax import VMAXStorageDriver\n\n\nclass Request:\n    def __init__(self):\n        self.environ = {'delfin.context': ctxt.RequestContext()}\n        pass\n\n\nVMAX_STORAGE_CONF = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"dell_emc\",\n    \"model\": \"vmax\",\n    \"rest\": {\n        \"host\": \"10.0.0.1\",\n        \"port\": 8443,\n        \"username\": \"user\",\n        \"password\": \"cGFzc3dvcmQ=\"\n    },\n    \"extra_attributes\": {\n        \"array_id\": \"00112233\"\n    }\n}\n\n\nclass TestVMAXStorageDriver(TestCase):\n\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_init(self, mock_unisphere_version,\n                  mock_version, mock_array):\n        kwargs = VMAX_STORAGE_CONF\n\n        mock_version.return_value = ['V9.0.2.7', '90']\n        mock_unisphere_version.return_value = ['V9.0.2.7', '90']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.client.uni_version, '90')\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        with self.assertRaises(Exception) as exc:\n            mock_version.side_effect = exception.InvalidIpOrPort\n            VMAXStorageDriver(**kwargs)\n        self.assertIn('Invalid ip or port', str(exc.exception))\n\n        with self.assertRaises(Exception) as exc:\n            mock_version.side_effect = exception.InvalidUsernameOrPassword\n            VMAXStorageDriver(**kwargs)\n        self.assertIn('Invalid username or password.', str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_system_capacity')\n    @mock.patch.object(VMaxRest, 'get_vmax_array_details')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_get_storage(self, mock_unisphere_version,\n                         mock_version, mock_array,\n                         mock_array_details, mock_capacity):\n        expected = {\n            'name': 'VMAX250F-00112233',\n            'vendor': 'Dell EMC',\n            'description': '',\n            'model': 'VMAX250F',\n            'firmware_version': '5978.221.221',\n            'status': 'normal',\n            'serial_number': '00112233',\n            'location': '',\n            'total_capacity': 109951162777600,\n            'used_capacity': 82463372083200,\n            'free_capacity': 27487790694400,\n            'raw_capacity': 1610612736000,\n            'subscribed_capacity': 219902325555200\n        }\n        system_capacity = {\n            'system_capacity': {\n                'usable_total_tb': 100,\n                'usable_used_tb': 75,\n                'subscribed_total_tb': 200\n            },\n            'physicalCapacity': {\n                'total_capacity_gb': 1500\n\n            }\n        }\n        system_capacity_84 = {\n            'total_usable_cap_gb': 100 * 1024,\n            'total_allocated_cap_gb': 75 * 1024,\n            'total_subscribed_cap_gb': 200 * 1024,\n            'physicalCapacity': {\n                'total_capacity_gb': 1500\n            }\n        }\n        kwargs = VMAX_STORAGE_CONF\n\n        mock_version.return_value = ['V9.0.2.7', '90']\n        mock_unisphere_version.return_value = ['V9.0.2.7', '90']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_array_details.return_value = {\n            'model': 'VMAX250F',\n            'ucode': '5978.221.221',\n            'display_name': 'VMAX250F-00112233'}\n        mock_capacity.return_value = system_capacity\n\n        driver = VMAXStorageDriver(**kwargs)\n\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.get_storage(context)\n        self.assertDictEqual(ret, expected)\n\n        driver.client.uni_version = '84'\n        mock_capacity.return_value = system_capacity_84\n        ret = driver.get_storage(context)\n        self.assertDictEqual(ret, expected)\n\n        mock_array_details.side_effect = exception.StorageBackendException\n        with self.assertRaises(Exception) as exc:\n            driver.get_storage(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_array_details.side_effect = [{\n            'model': 'VMAX250F',\n            'ucode': '5978.221.221',\n            'display_name': 'VMAX250F-00112233'}]\n\n        mock_capacity.side_effect = exception.StorageBackendException\n        with self.assertRaises(Exception) as exc:\n            driver.get_storage(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_srp_by_name')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_storage_pools(self, mock_unisphere_version,\n                                mock_version,\n                                mock_array, mock_srp):\n        expected = [{\n            'name': 'SRP_1',\n            'storage_id': '12345',\n            'native_storage_pool_id': 'SRP_ID',\n            'description': 'Dell EMC VMAX Pool',\n            'status': 'normal',\n            'storage_type': 'block',\n            'total_capacity': 109951162777600,\n            'used_capacity': 82463372083200,\n            'free_capacity': 27487790694400,\n            'subscribed_capacity': 219902325555200\n        }]\n        pool_info = {\n            'srp_capacity': {\n                'usable_total_tb': 100,\n                'usable_used_tb': 75,\n                'subscribed_total_tb': 200\n            },\n            'srpId': 'SRP_ID'\n        }\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.0.2.7', '90']\n        mock_unisphere_version.return_value = ['V9.0.2.7', '90']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_srp.side_effect = [{'srpId': ['SRP_1']}, pool_info]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.list_storage_pools(context)\n        self.assertDictEqual(ret[0], expected[0])\n\n        mock_srp.side_effect = [{'srpId': ['SRP_1']},\n                                exception.StorageBackendException]\n        with self.assertRaises(Exception) as exc:\n            driver.list_storage_pools(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_srp.side_effect = [exception.StorageBackendException, pool_info]\n        with self.assertRaises(Exception) as exc:\n            driver.list_storage_pools(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_system_capacity')\n    @mock.patch.object(VMaxRest, 'get_storage_group')\n    @mock.patch.object(VMaxRest, 'get_volume')\n    @mock.patch.object(VMaxRest, 'get_volume_list')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_volumes(self, mock_unisphere_version,\n                          mock_version, mock_array,\n                          mock_vols, mock_vol, mock_sg, mock_capacity):\n        expected = \\\n            [\n                {\n                    'name': 'volume_1',\n                    'storage_id': '12345',\n                    'description': \"Dell EMC VMAX 'thin device' volume\",\n                    'type': 'thin',\n                    'status': 'available',\n                    'native_volume_id': '00001',\n                    'wwn': 'wwn123',\n                    'total_capacity': 104857600,\n                    'used_capacity': 10485760,\n                    'free_capacity': 94371840,\n                    'native_storage_pool_id': 'SRP_1',\n                    'compressed': True\n                },\n                {\n                    'name': 'volume_2:id',\n                    'storage_id': '12345',\n                    'description': \"Dell EMC VMAX 'thin device' volume\",\n                    'type': 'thin',\n                    'status': 'available',\n                    'native_volume_id': '00002',\n                    'wwn': 'wwn1234',\n                    'total_capacity': 104857600,\n                    'used_capacity': 10485760,\n                    'free_capacity': 94371840,\n                    'native_storage_pool_id': 'SRP_1'\n                }\n            ]\n        volumes = {\n            'volumeId': '00001',\n            'cap_mb': 100,\n            'allocated_percent': 10,\n            'status': 'Ready',\n            'type': 'TDEV',\n            'wwn': 'wwn123',\n            'num_of_storage_groups': 1,\n            'storageGroupId': ['SG_001'],\n            'emulation': 'FBA'\n        }\n        volumes1 = {\n            'volumeId': '00002',\n            'volume_identifier': 'id',\n            'cap_mb': 100,\n            'allocated_percent': 10,\n            'status': 'Ready',\n            'type': 'TDEV',\n            'wwn': 'wwn1234',\n            'num_of_storage_groups': 0,\n            'storageGroupId': [],\n            'emulation': 'FBA'\n        }\n        volumes2 = {\n            'volumeId': '00003',\n            'cap_mb': 100,\n            'allocated_percent': 10,\n            'status': 'Ready',\n            'type': 'TDEV',\n            'wwn': 'wwn1234',\n            'num_of_storage_groups': 0,\n            'storageGroupId': [],\n            'emulation': 'CKD'\n        }\n        storage_group_info = {\n            'srp': 'SRP_1',\n            'compression': True\n        }\n        default_srps = {\n            'default_fba_srp': 'SRP_1',\n            'default_ckd_srp': 'SRP_2'\n        }\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.0.2.7', '90']\n        mock_unisphere_version.return_value = ['V9.0.2.7', '90']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_vols.side_effect = [['volume_1', 'volume_2', 'volume_3']]\n        mock_vol.side_effect = [volumes, volumes1, volumes2]\n        mock_sg.side_effect = [storage_group_info]\n        mock_capacity.return_value = default_srps\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.list_volumes(context)\n        self.assertDictEqual(ret[0], expected[0])\n        self.assertDictEqual(ret[1], expected[1])\n\n        mock_vols.side_effect = [['volume_1']]\n        mock_vol.side_effect = [volumes]\n        mock_sg.side_effect = [exception.StorageBackendException]\n        with self.assertRaises(Exception) as exc:\n            driver.list_volumes(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_vols.side_effect = [['volume_1']]\n        mock_vol.side_effect = [exception.StorageBackendException]\n        mock_sg.side_effect = [storage_group_info]\n        with self.assertRaises(Exception) as exc:\n            driver.list_volumes(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_vols.side_effect = [exception.StorageBackendException]\n        mock_vol.side_effect = [volumes]\n        mock_sg.side_effect = [storage_group_info]\n        with self.assertRaises(Exception) as exc:\n            driver.list_volumes(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_resource')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_controllers(self, mock_unisphere_version,\n                              mock_version,\n                              mock_array, mock_res):\n        expected = [\n            {\n                'name': 'DF-1C',\n                'storage_id': '12345',\n                'native_controller_id': 'DF-1C',\n                'status': 'normal',\n                'location': 'slot_10',\n                'soft_version': None,\n                'cpu_info': 'Cores-64',\n                'memory_size': None\n            }\n        ]\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.0.2.7', '90']\n        mock_unisphere_version.return_value = ['V9.0.2.7', '90']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_res.side_effect = [\n            {'directorId': ['DF-1C', 'DF-2C']},\n            {\n                'availability': 'ON',\n                'directorId': 'DF-1C',\n                'director_number': 1,\n                'director_slot_number': 10,\n                'num_of_cores': 64,\n                'num_of_ports': 2,\n                'srdf_groups': [\n                    {\n                        'label': 'label_1',\n                        'rdf_group_number': 1\n                    }\n                ]\n            },\n            {\n                'availability': 'ON',\n                'directorId': 'DF-2C',\n                'director_number': 2,\n                'director_slot_number': 10,\n                'num_of_cores': 64,\n                'num_of_ports': 2,\n                'srdf_groups': [\n                    {\n                        'label': 'label_1',\n                        'rdf_group_number': 1\n                    }\n                ]\n            },\n            {'directorId': ['DF-1C', 'DF-2C']},\n            exception.StorageBackendException,\n            exception.StorageBackendException\n        ]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n\n        ret = driver.list_controllers(context)\n        self.assertDictEqual(ret[0], expected[0])\n\n        with self.assertRaises(Exception) as exc:\n            driver.list_controllers(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        with self.assertRaises(Exception) as exc:\n            driver.list_controllers(context)\n\n        self.assertIn('Exception from Storage Backend:',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_resource_kwargs')\n    @mock.patch.object(VMaxRest, 'get_director_list')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_ports(self, mock_unisphere_version,\n                        mock_version,\n                        mock_array, mock_dirs, mock_res):\n        expected = [{\n            'name': 'DF-1D:30',\n            'storage_id': '12345',\n            'native_port_id': '30',\n            'location': 'director_DF-1D',\n            'connection_status': 'connected',\n            'health_status': 'normal',\n            'type': 'other',\n            'logical_type': 'backend',\n            'speed': 0,\n            'max_speed': 10737418240,\n            'native_parent_id': 'DF-1D',\n            'wwn': None,\n            'mac_address': None,\n            'ipv4': None,\n            'ipv4_mask': None,\n            'ipv6': None,\n            'ipv6_mask': None\n        }]\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.0.2.7', '90']\n        mock_unisphere_version.return_value = ['V9.0.2.7', '90']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_dirs.return_value = ['DF-1C']\n        mock_res.side_effect = [\n            {\n                'symmetrixPortKey': [\n                    {\n                        'directorId': 'DF-1D',\n                        'portId': '30'\n                    },\n                    {\n                        'directorId': 'DF-2C',\n                        'portId': '0'\n                    }\n                ]\n            },\n            {\n                'symmetrixPort': {\n                    'aclx': False,\n                    'avoid_reset_broadcast': False,\n                    'common_serial_number': True,\n                    'director_status': 'Offline',\n                    'disable_q_reset_on_ua': False,\n                    'enable_auto_negotiate': False,\n                    'environ_set': False,\n                    'hp_3000_mode': False,\n                    'ip_addresses': [\n                        '192.168.0.51'\n                    ],\n                    'iscsi_target': False,\n                    'max_speed': '10',\n                    'negotiate_reset': False,\n                    'num_of_cores': 6,\n                    'num_of_mapped_vols': 0,\n                    'num_of_masking_views': 0,\n                    'num_of_port_groups': 0,\n                    'port_status': 'PendOn',\n                    'scsi_3': False,\n                    'scsi_support1': False,\n                    'siemens': False,\n                    'soft_reset': False,\n                    'spc2_protocol_version': False,\n                    'sunapee': False,\n                    'symmetrixPortKey': {\n                        'directorId': 'DF-1C',\n                        'portId': '30'\n                    },\n                    'type': 'GigE',\n                    'vnx_attached': False,\n                    'volume_set_addressing': False\n                }\n            },\n            {\n                'symmetrixPort': {\n                    'aclx': False,\n                    'avoid_reset_broadcast': False,\n                    'common_serial_number': True,\n                    'director_status': 'Offline',\n                    'disable_q_reset_on_ua': False,\n                    'enable_auto_negotiate': False,\n                    'environ_set': False,\n                    'hp_3000_mode': False,\n                    'ip_addresses': [\n                        '192.168.0.51'\n                    ],\n                    'iscsi_target': False,\n                    'max_speed': '10',\n                    'negotiate_reset': False,\n                    'num_of_cores': 6,\n                    'num_of_mapped_vols': 0,\n                    'num_of_masking_views': 0,\n                    'num_of_port_groups': 0,\n                    'port_status': 'PendOn',\n                    'scsi_3': False,\n                    'scsi_support1': False,\n                    'siemens': False,\n                    'soft_reset': False,\n                    'spc2_protocol_version': False,\n                    'sunapee': False,\n                    'symmetrixPortKey': {\n                        'directorId': 'DF-2C',\n                        'portId': '0'\n                    },\n                    'type': 'GigE',\n                    'vnx_attached': False,\n                    'volume_set_addressing': False\n                }\n            },\n            {\n                'symmetrixPortKey': [\n                    {\n                        'directorId': 'DF-1C',\n                        'portId': '30'\n                    },\n                    {\n                        'directorId': 'DF-2C',\n                        'portId': '0'\n                    }\n                ]\n            },\n            exception.StorageBackendException,\n            exception.StorageBackendException\n        ]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n\n        ret = driver.list_ports(context)\n        self.assertDictEqual(ret[0], expected[0])\n\n        mock_dirs.side_effect = exception.StorageBackendException\n        with self.assertRaises(Exception) as exc:\n            driver.list_ports(context)\n\n        self.assertIn('Exception from Storage Backend:',\n                      str(exc.exception))\n\n        with self.assertRaises(Exception) as exc:\n            driver.list_ports(context)\n\n        self.assertIn('Exception from Storage Backend:',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_disk')\n    @mock.patch.object(VMaxRest, 'get_disk_list')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_disks(self, mock_unisphere_version,\n                        mock_version, mock_array,\n                        mock_disks, mock_disk):\n        expected = \\\n            [\n                {\n                    'name': '1',\n                    'storage_id': '12345',\n                    'native_disk_id': '1',\n                    'manufacturer': 'HGST',\n                    'capacity': 1073741824000\n                },\n                {\n                    'name': '2',\n                    'storage_id': '12345',\n                    'native_disk_id': '2',\n                    'manufacturer': 'WD',\n                    'capacity': 2147483648000\n                }\n            ]\n        disks = {\n            'spindle_id': '1000',\n            'type': 'HGOMAHA_1',\n            'vendor': 'HGST',\n            'capacity': 1000.0\n        }\n        disk1 = {\n            'spindle_id': '1001',\n            'type': 'HGOMAHA_2',\n            'vendor': 'WD',\n            'capacity': 2000.0\n        }\n        disk2 = {\n            'spindle_id': '1002',\n            'type': 'HGOMAHA_3',\n            'vendor': 'SUN',\n            'capacity': 3000.0\n        }\n\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.2.2.7', '92']\n        mock_unisphere_version.return_value = ['V9.2.2.7', '92']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_disks.side_effect = [['1', '2', '3']]\n        mock_disk.side_effect = [disks, disk1, disk2]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.list_disks(context)\n        self.assertDictEqual(ret[0], expected[0])\n        self.assertDictEqual(ret[1], expected[1])\n\n        mock_disks.side_effect = [['disk_1']]\n        mock_disk.side_effect = [exception.StorageBackendException]\n        with self.assertRaises(Exception) as exc:\n            driver.list_disks(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_disks.side_effect = [exception.StorageBackendException]\n        mock_disk.side_effect = [disks]\n        with self.assertRaises(Exception) as exc:\n            driver.list_disks(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_initiator')\n    @mock.patch.object(VMaxRest, 'get_initiator_list')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_storage_host_initiators(self, mock_unisphere_version,\n                                          mock_version, mock_array,\n                                          mock_initiators, mock_initiator):\n        expected = \\\n            [\n                {\n                    'name': '1001',\n                    'storage_id': '12345',\n                    'native_storage_host_initiator_id': '1001',\n                    'alias': 'I1',\n                    'wwn': '1001',\n                    'type': 'fc',\n                    'status': 'online',\n                    'native_storage_host_id': 'host1',\n                },\n                {\n                    'name': '1002',\n                    'storage_id': '12345',\n                    'native_storage_host_initiator_id': '1002',\n                    'alias': 'I2',\n                    'wwn': '1002',\n                    'type': 'iscsi',\n                    'status': 'offline',\n                    'native_storage_host_id': 'host2',\n                },\n                {\n                    'name': '1003',\n                    'storage_id': '12345',\n                    'native_storage_host_initiator_id': '1003',\n                    'alias': 'I3',\n                    'wwn': '1003',\n                    'type': 'fc',\n                    'status': 'offline',\n                    'native_storage_host_id': 'host3',\n                }\n            ]\n        init_1 = {\n            'initiatorId': '1001',\n            'wwn': '1001',\n            'alias': 'I1',\n            'host': 'host1',\n            'on_fabric': True,\n            'type': 'FIBRE'\n        }\n        init_2 = {\n            'initiatorId': '1002',\n            'wwn': '1002',\n            'alias': 'I2',\n            'host': 'host2',\n            'type': 'ISCSI'\n        }\n        init_3 = {\n            'initiatorId': '1003',\n            'wwn': '1003',\n            'alias': 'I3',\n            'host': 'host3',\n            'type': 'FIBRE'\n        }\n\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.2.2.7', '92']\n        mock_unisphere_version.return_value = ['V9.2.2.7', '92']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_initiators.side_effect = [['1001', '1002', '1003']]\n        mock_initiator.side_effect = [init_1, init_2, init_3]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.list_storage_host_initiators(context)\n        self.assertDictEqual(ret[0], expected[0])\n        self.assertDictEqual(ret[1], expected[1])\n        self.assertDictEqual(ret[2], expected[2])\n\n        mock_initiators.side_effect = [['1001']]\n        mock_initiator.side_effect = [exception.StorageBackendException]\n        with self.assertRaises(Exception) as exc:\n            driver.list_storage_host_initiators(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_initiators.side_effect = [exception.StorageBackendException]\n        mock_initiator.side_effect = [init_1]\n        with self.assertRaises(Exception) as exc:\n            driver.list_storage_host_initiators(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_host')\n    @mock.patch.object(VMaxRest, 'get_host_list')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_storage_hosts(self, mock_unisphere_version,\n                                mock_version, mock_array,\n                                mock_hosts, mock_host):\n        expected = \\\n            [\n                {\n                    'storage_id': '12345',\n                    'name': 'h1',\n                    'native_storage_host_id': 'h1',\n                    'os_type': 'Unknown',\n                    'status': 'normal',\n                },\n                {\n                    'storage_id': '12345',\n                    'name': 'h2',\n                    'native_storage_host_id': 'h2',\n                    'os_type': 'Unknown',\n                    'status': 'normal',\n                },\n                {\n                    'storage_id': '12345',\n                    'name': 'h3',\n                    'native_storage_host_id': 'h3',\n                    'os_type': 'Unknown',\n                    'status': 'normal',\n                }\n            ]\n        host_1 = {\n            'hostId': 'h1',\n        }\n        host_2 = {\n            'hostId': 'h2',\n        }\n        host_3 = {\n            'hostId': 'h3',\n        }\n\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.2.2.7', '92']\n        mock_unisphere_version.return_value = ['V9.2.2.7', '92']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_hosts.side_effect = [['h1', 'h2', 'h3']]\n        mock_host.side_effect = [host_1, host_2, host_3]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.list_storage_hosts(context)\n        self.assertDictEqual(ret[0], expected[0])\n        self.assertDictEqual(ret[1], expected[1])\n        self.assertDictEqual(ret[2], expected[2])\n\n        mock_hosts.side_effect = [['h1']]\n        mock_host.side_effect = [exception.StorageBackendException]\n        with self.assertRaises(Exception) as exc:\n            driver.list_storage_hosts(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_hosts.side_effect = [exception.StorageBackendException]\n        mock_host.side_effect = [host_1]\n        with self.assertRaises(Exception) as exc:\n            driver.list_storage_hosts(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_host_group')\n    @mock.patch.object(VMaxRest, 'get_host_group_list')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_storage_host_groups(self, mock_unisphere_version,\n                                      mock_version, mock_array,\n                                      mock_host_groups, mock_host_group):\n        expected = \\\n            [\n                {\n                    'name': 'hg1',\n                    'storage_id': '12345',\n                    'native_storage_host_group_id': 'hg1',\n                },\n                {\n                    'name': 'hg2',\n                    'storage_id': '12345',\n                    'native_storage_host_group_id': 'hg2',\n                },\n                {\n                    'name': 'hg3',\n                    'storage_id': '12345',\n                    'native_storage_host_group_id': 'hg3',\n                }\n            ]\n        expected_rel = [\n            {\n                'storage_id': '12345',\n                'native_storage_host_group_id': 'hg1',\n                'native_storage_host_id': 'h1',\n            },\n            {\n                'storage_id': '12345',\n                'native_storage_host_group_id': 'hg1',\n                'native_storage_host_id': 'h2',\n            },\n            {\n                'storage_id': '12345',\n                'native_storage_host_group_id': 'hg2',\n                'native_storage_host_id': 'h2',\n            },\n            {\n                'storage_id': '12345',\n                'native_storage_host_group_id': 'hg3',\n                'native_storage_host_id': 'h1',\n            },\n        ]\n        hg_1 = {\n            'hostGroupId': 'hg1',\n            'host': [{'hostId': 'h1'}, {'hostId': 'h2'}],\n        }\n        hg_2 = {\n            'hostGroupId': 'hg2',\n            'host': [{'hostId': 'h2'}],\n        }\n        hg_3 = {\n            'hostGroupId': 'hg3',\n            'host': [{'hostId': 'h1'}],\n        }\n\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.2.2.7', '92']\n        mock_unisphere_version.return_value = ['V9.2.2.7', '92']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_host_groups.side_effect = [['hg1', 'hg2', 'hg3']]\n        mock_host_group.side_effect = [hg_1, hg_2, hg_3]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.list_storage_host_groups(context)\n        ret_hgs = ret['storage_host_groups']\n        ret_hg_rels = ret['storage_host_grp_host_rels']\n        self.assertDictEqual(ret_hgs[0], expected[0])\n        self.assertDictEqual(ret_hgs[1], expected[1])\n        self.assertDictEqual(ret_hgs[2], expected[2])\n        self.assertDictEqual(ret_hg_rels[0], expected_rel[0])\n        self.assertDictEqual(ret_hg_rels[1], expected_rel[1])\n        self.assertDictEqual(ret_hg_rels[2], expected_rel[2])\n        self.assertDictEqual(ret_hg_rels[3], expected_rel[3])\n\n        mock_host_groups.side_effect = [['hg1']]\n        mock_host_group.side_effect = [exception.StorageBackendException]\n        with self.assertRaises(Exception) as exc:\n            driver.list_storage_host_groups(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_host_groups.side_effect = [exception.StorageBackendException]\n        mock_host_group.side_effect = [hg_1]\n        with self.assertRaises(Exception) as exc:\n            driver.list_storage_host_groups(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_port_group')\n    @mock.patch.object(VMaxRest, 'get_port_group_list')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_port_groups(self, mock_unisphere_version,\n                              mock_version, mock_array,\n                              mock_port_groups, mock_port_group):\n        expected = \\\n            [\n                {\n                    'name': 'pg1',\n                    'storage_id': '12345',\n                    'native_port_group_id': 'pg1',\n                },\n                {\n                    'name': 'pg2',\n                    'storage_id': '12345',\n                    'native_port_group_id': 'pg2',\n                },\n                {\n                    'name': 'pg3',\n                    'storage_id': '12345',\n                    'native_port_group_id': 'pg3',\n                }\n            ]\n        expected_rel = [\n            {\n                'storage_id': '12345',\n                'native_port_group_id': 'pg1',\n                'native_port_id': 'FA-1D:1',\n            },\n            {\n                'storage_id': '12345',\n                'native_port_group_id': 'pg1',\n                'native_port_id': 'FA-1D:2',\n            },\n            {\n                'storage_id': '12345',\n                'native_port_group_id': 'pg2',\n                'native_port_id': 'FA-2D:2',\n            },\n            {\n                'storage_id': '12345',\n                'native_port_group_id': 'pg3',\n                'native_port_id': 'FA-3D:1',\n            },\n        ]\n        pg_1 = {\n            'hostGroupId': 'hg1',\n            'symmetrixPortKey': [\n                {\n                    \"directorId\": \"FA-1D\",\n                    \"portId\": \"1\"\n                },\n                {\n                    \"directorId\": \"FA-1D\",\n                    \"portId\": \"2\"\n                }\n            ],\n        }\n        pg_2 = {\n            'hostGroupId': 'hg2',\n            'symmetrixPortKey': [\n                {\n                    \"directorId\": \"FA-2D\",\n                    \"portId\": \"2\"\n                }\n            ],\n        }\n        pg_3 = {\n            'hostGroupId': 'hg3',\n            'symmetrixPortKey': [\n                {\n                    \"directorId\": \"FA-3D\",\n                    \"portId\": \"1\"\n                },\n            ],\n        }\n\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.2.2.7', '92']\n        mock_unisphere_version.return_value = ['V9.2.2.7', '92']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_port_groups.side_effect = [['pg1', 'pg2', 'pg3']]\n        mock_port_group.side_effect = [pg_1, pg_2, pg_3]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.list_port_groups(context)\n        ret_pgs = ret['port_groups']\n        ret_pg_rels = ret['port_grp_port_rels']\n        self.assertDictEqual(ret_pgs[0], expected[0])\n        self.assertDictEqual(ret_pgs[1], expected[1])\n        self.assertDictEqual(ret_pgs[2], expected[2])\n        self.assertDictEqual(ret_pg_rels[0], expected_rel[0])\n        self.assertDictEqual(ret_pg_rels[1], expected_rel[1])\n        self.assertDictEqual(ret_pg_rels[2], expected_rel[2])\n        self.assertDictEqual(ret_pg_rels[3], expected_rel[3])\n\n        mock_port_groups.side_effect = [['pg1']]\n        mock_port_group.side_effect = [exception.StorageBackendException]\n        with self.assertRaises(Exception) as exc:\n            driver.list_port_groups(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_port_groups.side_effect = [exception.StorageBackendException]\n        mock_port_group.side_effect = [pg_1]\n        with self.assertRaises(Exception) as exc:\n            driver.list_port_groups(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_volume_list')\n    @mock.patch.object(VMaxRest, 'get_volume_group_list')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_volume_groups(self, mock_unisphere_version,\n                                mock_version, mock_array,\n                                mock_volume_groups, mock_volumes):\n        expected = \\\n            [\n                {\n                    'name': 'vg1',\n                    'storage_id': '12345',\n                    'native_volume_group_id': 'vg1',\n                },\n                {\n                    'name': 'vg2',\n                    'storage_id': '12345',\n                    'native_volume_group_id': 'vg2',\n                },\n                {\n                    'name': 'vg3',\n                    'storage_id': '12345',\n                    'native_volume_group_id': 'vg3',\n                }\n            ]\n        expected_rel = [\n            {\n                'storage_id': '12345',\n                'native_volume_group_id': 'vg1',\n                'native_volume_id': 'volume1',\n            },\n            {\n                'storage_id': '12345',\n                'native_volume_group_id': 'vg1',\n                'native_volume_id': 'volume2',\n            },\n            {\n                'storage_id': '12345',\n                'native_volume_group_id': 'vg2',\n                'native_volume_id': 'volume2',\n            },\n            {\n                'storage_id': '12345',\n                'native_volume_group_id': 'vg3',\n                'native_volume_id': 'volume1',\n            },\n        ]\n        v_1 = ['volume1', 'volume2']\n        v_2 = ['volume2']\n        v_3 = ['volume1']\n\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.2.2.7', '92']\n        mock_unisphere_version.return_value = ['V9.2.2.7', '92']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_volume_groups.side_effect = [['vg1', 'vg2', 'vg3']]\n        mock_volumes.side_effect = [v_1, v_2, v_3]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.list_volume_groups(context)\n        ret_vgs = ret['volume_groups']\n        ret_vg_rels = ret['vol_grp_vol_rels']\n        self.assertDictEqual(ret_vgs[0], expected[0])\n        self.assertDictEqual(ret_vgs[1], expected[1])\n        self.assertDictEqual(ret_vgs[2], expected[2])\n        self.assertDictEqual(ret_vg_rels[0], expected_rel[0])\n        self.assertDictEqual(ret_vg_rels[1], expected_rel[1])\n        self.assertDictEqual(ret_vg_rels[2], expected_rel[2])\n        self.assertDictEqual(ret_vg_rels[3], expected_rel[3])\n\n        mock_volume_groups.side_effect = [['vg1']]\n        mock_volumes.side_effect = [exception.StorageBackendException]\n        with self.assertRaises(Exception) as exc:\n            driver.list_volume_groups(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_volume_groups.side_effect = [exception.StorageBackendException]\n        mock_volumes.side_effect = [v_1]\n        with self.assertRaises(Exception) as exc:\n            driver.list_volume_groups(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(VMaxRest, 'get_masking_view')\n    @mock.patch.object(VMaxRest, 'get_masking_view_list')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_list_masking_views(self, mock_unisphere_version,\n                                mock_version, mock_array,\n                                mock_masking_views, mock_masking_view):\n        expected = \\\n            [\n                {\n                    'storage_id': '12345',\n                    'native_storage_host_id': 'host1',\n                    'native_storage_host_group_id': 'hg1',\n                    'native_volume_group_id': 'sg1',\n                    'native_port_group_id': 'pg1',\n                    'native_masking_view_id': 'mv1',\n                    'name': 'mv1',\n                },\n                {\n                    'storage_id': '12345',\n                    'native_storage_host_id': 'host2',\n                    'native_storage_host_group_id': 'hg2',\n                    'native_volume_group_id': 'sg2',\n                    'native_port_group_id': 'pg2',\n                    'native_masking_view_id': 'mv2',\n                    'name': 'mv2',\n                },\n                {\n                    'storage_id': '12345',\n                    'native_storage_host_id': 'host3',\n                    'native_storage_host_group_id': 'hg3',\n                    'native_volume_group_id': 'sg3',\n                    'native_port_group_id': 'pg3',\n                    'native_masking_view_id': 'mv3',\n                    'name': 'mv3',\n                }\n            ]\n        mv_1 = {\n            'maskingViewId': 'mv1',\n            'hostId': 'host1',\n            'hostGroupId': 'hg1',\n            'storageGroupId': 'sg1',\n            'portGroupId': 'pg1',\n        }\n        mv_2 = {\n            'maskingViewId': 'mv2',\n            'hostId': 'host2',\n            'hostGroupId': 'hg2',\n            'storageGroupId': 'sg2',\n            'portGroupId': 'pg2',\n        }\n        mv_3 = {\n            'maskingViewId': 'mv3',\n            'hostId': 'host3',\n            'hostGroupId': 'hg3',\n            'storageGroupId': 'sg3',\n            'portGroupId': 'pg3',\n        }\n\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.2.2.7', '92']\n        mock_unisphere_version.return_value = ['V9.2.2.7', '92']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        mock_masking_views.side_effect = [['mv1', 'mv2', 'mv3']]\n        mock_masking_view.side_effect = [mv_1, mv_2, mv_3]\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.list_masking_views(context)\n        self.assertDictEqual(ret[0], expected[0])\n        self.assertDictEqual(ret[1], expected[1])\n        self.assertDictEqual(ret[2], expected[2])\n\n        mock_masking_views.side_effect = [['mv1']]\n        mock_masking_view.side_effect = [exception.StorageBackendException]\n        with self.assertRaises(Exception) as exc:\n            driver.list_masking_views(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_masking_views.side_effect = [exception.StorageBackendException]\n        mock_masking_view.side_effect = [mv_1]\n        with self.assertRaises(Exception) as exc:\n            driver.list_masking_views(context)\n\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(Session, 'request')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_rest(self, mock_unisphere_version,\n                  mock_version, mock_array,\n                  mock_request):\n        kwargs = VMAX_STORAGE_CONF\n\n        mock_version.return_value = ['V9.0.2.7', '90']\n        mock_unisphere_version.return_value = ['V9.0.2.7', '90']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.client.uni_version, '90')\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        mock_request.return_value = mock.Mock()\n        mock_request.return_value.json = mock.Mock(return_value={})\n        driver.reset_connection(context, **kwargs)\n        driver.client.rest.session = None\n        driver.client.rest.request('/session', 'GET')\n        self.assertEqual(driver.client.uni_version, '90')\n\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_get_capabilities(self, mock_unisphere_version,\n                              mock_version, mock_array):\n        kwargs = VMAX_STORAGE_CONF\n\n        mock_version.return_value = ['V9.0.2.7', '90']\n        mock_unisphere_version.return_value = ['V9.0.2.7', '90']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.client.uni_version, '90')\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        capabilities = driver.get_capabilities(context)\n        self.assertIsNotNone(capabilities)\n        self.assertIsInstance(capabilities, dict)\n        self.assertEqual(capabilities['is_historic'], True)\n        self.assertIsInstance(capabilities['resource_metrics'], dict)\n        # Support storage, storage_pool, controller, port & disk metrics\n        self.assertEqual(len(capabilities['resource_metrics']), 5)\n\n    @mock.patch.object(VMaxRest, 'get_resource_metrics')\n    @mock.patch.object(VMaxRest, 'get_resource_keys')\n    @mock.patch.object(VMaxRest, 'get_array_keys')\n    @mock.patch.object(VMaxRest, 'get_array_detail')\n    @mock.patch.object(VMaxRest, 'get_uni_version')\n    @mock.patch.object(VMaxRest, 'get_unisphere_version')\n    def test_collect_perf_metrics(self, mock_unisphere_version,\n                                  mock_version,\n                                  mock_array, mock_array_keys,\n                                  mock_r_keys, mock_r_metrics):\n        expected = [\n            constants.metric_struct(name='iops',\n                                    labels={\n                                        'storage_id': '12345',\n                                        'resource_type': 'storage',\n                                        'resource_id': '00112233',\n                                        'resource_name': 'VMAX00112233',\n                                        'type': 'RAW',\n                                        'unit': 'IOPS'},\n                                    values={1566550500000: 417.42667}\n                                    ),\n            constants.metric_struct(name='iops',\n                                    labels={\n                                        'storage_id': '12345',\n                                        'resource_type': 'storagePool',\n                                        'resource_id': 'SRP_1',\n                                        'resource_name': 'SRP_1',\n                                        'type': 'RAW',\n                                        'unit': 'IOPS'},\n                                    values={1566550800000: 304.8}\n                                    ),\n            constants.metric_struct(name='iops',\n                                    labels={\n                                        'storage_id': '12345',\n                                        'resource_type': 'controller',\n                                        'resource_id': 'DF-1C',\n                                        'resource_name': 'BEDirector_DF-1C',\n                                        'type': 'RAW',\n                                        'unit': 'IOPS'\n                                    },\n                                    values={1566987000000: 248.40666}\n                                    ),\n            constants.metric_struct(name='iops',\n                                    labels={\n                                        'storage_id': '12345',\n                                        'resource_type': 'port',\n                                        'resource_id': '12',\n                                        'resource_name': 'BEPort_DF-1C_12',\n                                        'type': 'RAW',\n                                        'unit': 'IOPS'\n                                    },\n                                    values={1566987000000: 6.693333}\n                                    ),\n        ]\n        kwargs = VMAX_STORAGE_CONF\n        mock_version.return_value = ['V9.0.2.7', '90']\n        mock_unisphere_version.return_value = ['V9.0.2.7', '90']\n        mock_array.return_value = {'symmetrixId': ['00112233']}\n\n        driver = VMAXStorageDriver(**kwargs)\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.client.array_id[\"12345\"], \"00112233\")\n\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret_array_key = {\n            \"arrayInfo\": [{\n                \"symmetrixId\": \"00112233\",\n                \"firstAvailableDate\": \"1566146400000\",\n                \"lastAvailableDate\": \"1566550800000\",\n            }]\n        }\n        ret_pool_key = {\n            \"srpInfo\": [\n                {\n                    \"srpId\": \"SRP_1\",\n                    \"firstAvailableDate\": 1567065600000,\n                    \"lastAvailableDate\": 1568130900000\n                },\n            ]\n        }\n        ret_be_dir_key = {\n            \"beDirectorInfo\": [\n                {\n                    \"directorId\": \"DF-1C\",\n                    \"firstAvailableDate\": 1566557100000,\n                    \"lastAvailableDate\": 1566987300000\n                },\n            ]\n        }\n        ret_fe_dir_key = {\n            \"feDirectorInfo\": [\n                {\n                    \"directorId\": \"FA-1D\",\n                    \"firstAvailableDate\": 1567065600000,\n                    \"lastAvailableDate\": 1567093200000\n                },\n            ]\n        }\n        ret_rdf_dir_key = {\n            \"rdfDirectorInfo\": [\n                {\n                    \"directorId\": \"RF-1F\",\n                    \"firstAvailableDate\": 1567065600000,\n                    \"lastAvailableDate\": 1567437900000\n                },\n            ]\n        }\n        ret_be_port_key = {\n            \"bePortInfo\": [\n                {\n                    \"portId\": \"12\",\n                    \"firstAvailableDate\": 1566557100000,\n                    \"lastAvailableDate\": 1566988500000\n                },\n            ]\n        }\n        ret_fe_port_key = {\n            \"fePortInfo\": [\n                {\n                    \"firstAvailableDate\": 1567065600000,\n                    \"lastAvailableDate\": 1567162500000,\n                    \"portId\": \"4\"\n                },\n            ]\n        }\n        ret_rdf_port_key = {\n            \"rdfPortInfo\": [\n                {\n                    \"portId\": \"7\",\n                    \"firstAvailableDate\": 1567065600000,\n                    \"lastAvailableDate\": 1567439100000\n                }\n            ]\n        }\n        mock_array_keys.return_value = ret_array_key\n        mock_r_keys.side_effect = [\n            ret_pool_key,\n            ret_be_dir_key, ret_fe_dir_key, ret_rdf_dir_key,\n            ret_be_dir_key, ret_be_port_key,\n            ret_fe_dir_key, ret_fe_port_key,\n            ret_rdf_dir_key, ret_rdf_port_key,\n        ]\n        ret_array_metric = {\n            \"HostIOs\": 417.42667,\n            \"HostMBs\": 0.0018131511,\n            \"FEReqs\": 23.55,\n            \"BEIOs\": 25.216667,\n            \"BEReqs\": 5.55,\n            \"PercentCacheWP\": 0.031244868,\n            \"timestamp\": 1566550500000\n        }\n        ret_pool_metric = {\n            \"HostIOs\": 304.8,\n            \"HostMBs\": 0.005192057,\n            \"FEReqs\": 23.04,\n            \"BEIOs\": 22.566668,\n            \"BEReqs\": 4.7733335,\n            \"PercentCacheWP\": 0.018810686,\n            \"timestamp\": 1566550800000\n        }\n        ret_be_dir_metric = {\n            \"PercentBusy\": 0.025403459,\n            \"IOs\": 248.40666,\n            \"Reqs\": 3.91,\n            \"MBRead\": 1.7852213,\n            \"MBWritten\": 0.37213543,\n            \"PercentNonIOBusy\": 0.0,\n            \"timestamp\": 1566987000000\n        }\n        ret_fe_dir_metric = {\n            \"PercentBusy\": 2.54652,\n            \"HostIOs\": 3436.9368,\n            \"HostMBs\": 51.7072,\n            \"Reqs\": 3330.5947,\n            \"ReadResponseTime\": 0.12916493,\n            \"WriteResponseTime\": 0.3310084,\n            \"timestamp\": 1567078200000\n        }\n        ret_rdf_dir_metric = {\n            \"PercentBusy\": 4.8083158,\n            \"IOs\": 1474.2234,\n            \"WriteReqs\": 1189.76,\n            \"MBWritten\": 54.89597,\n            \"MBRead\": 0.4565983,\n            \"MBSentAndReceived\": 55.35257,\n            \"AvgIOServiceTime\": 0.89211756,\n            \"CopyIOs\": 0.0,\n            \"CopyMBs\": 0.0,\n            \"timestamp\": 1567161600000\n        }\n        ret_be_port_metric = {\n            \"Reads\": 4.7,\n            \"Writes\": 1.9933333,\n            \"IOs\": 6.693333,\n            \"MBRead\": 0.43401042,\n            \"MBWritten\": 0.10486979,\n            \"MBs\": 0.5388802,\n            \"AvgIOSize\": 82.44224,\n            \"PercentBusy\": 0.013356605,\n            \"timestamp\": 1566987000000\n        }\n        ret_fe_port_metric = {\n            \"ResponseTime\": 0.1263021,\n            \"ReadResponseTime\": 0.1263021,\n            \"WriteResponseTime\": 0.0,\n            \"Reads\": 0.32,\n            \"Writes\": 0.0,\n            \"IOs\": 0.32,\n            \"MBRead\": 4.296875E-4,\n            \"MBWritten\": 0.0,\n            \"MBs\": 4.296875E-4,\n            \"AvgIOSize\": 1.375,\n            \"SpeedGBs\": 16.0,\n            \"PercentBusy\": 2.6226044E-5,\n            \"timestamp\": 1567161600000\n        }\n        ret_rdf_port_metric = {\n            \"Reads\": 0.0,\n            \"Writes\": 1216.7633,\n            \"IOs\": 1216.7633,\n            \"MBRead\": 0.0,\n            \"MBWritten\": 57.559597,\n            \"MBs\": 57.559597,\n            \"AvgIOSize\": 48.440834,\n            \"SpeedGBs\": 16.0,\n            \"PercentBusy\": 3.5131588,\n            \"timestamp\": 1567161600000\n        }\n        mock_r_metrics.side_effect = [\n            [ret_array_metric],\n            [ret_pool_metric],\n            [ret_be_dir_metric],\n            [ret_fe_dir_metric],\n            [ret_rdf_dir_metric],\n            [ret_be_port_metric],\n            [ret_fe_port_metric],\n            [ret_rdf_port_metric],\n        ]\n        resource_metrics = {\n            'storage': {'iops': {'unit': 'IOPS'}},\n            'storagePool': {'iops': {'unit': 'IOPS'}},\n            'controller': {'iops': {'unit': 'IOPS'}},\n            'port': {'iops': {'unit': 'IOPS'}},\n        }\n        context = ctxt.get_admin_context()\n        context.storage_id = \"12345\"\n        ret = driver.collect_perf_metrics(context,\n                                          driver.storage_id,\n                                          resource_metrics,\n                                          1000, 2000)\n\n        self.assertEqual(ret[0], expected[0])\n        self.assertEqual(ret[2], expected[1])\n        self.assertEqual(ret[4], expected[2])\n        self.assertEqual(ret[13], expected[3])\n\n        with self.assertRaises(Exception) as exc:\n            driver.collect_perf_metrics(context,\n                                        driver.storage_id,\n                                        resource_metrics,\n                                        1000, 2000\n                                        )\n\n        self.assertIn('', str(exc.exception))\n"
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/vnx/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/vnx/vnx_block/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/vnx/vnx_block/test_vnx_block.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nimport time\nfrom unittest import TestCase, mock\n\nfrom delfin.common import constants\nfrom delfin.drivers.dell_emc.vnx.vnx_block import consts\nfrom delfin.drivers.dell_emc.vnx.vnx_block.alert_handler import AlertHandler\nfrom delfin.drivers.dell_emc.vnx.vnx_block.component_handler import \\\n    ComponentHandler\nfrom delfin.drivers.utils.tools import Tools\n\nsys.modules['delfin.cryptor'] = mock.Mock()\nfrom delfin import context\nfrom delfin.drivers.dell_emc.vnx.vnx_block.navi_handler import NaviHandler\nfrom delfin.drivers.dell_emc.vnx.vnx_block.navicli_client import NaviClient\nfrom delfin.drivers.dell_emc.vnx.vnx_block.vnx_block import VnxBlockStorDriver\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"dell_emc\",\n    \"model\": \"vnx_block\",\n    \"cli\": {\n        \"host\": \"110.143.132.231\",\n        \"port\": 22,\n        \"username\": \"user\",\n        \"password\": \"cGFzc3dvcmQ=\"\n    }\n}\nAGENT_INFOS = \"\"\"\n    Agent Rev:           7.33.1 (0.38)\n    Name:                K10\n    Desc:\n    Revision:            05.33.000.5.038\n    Model:               VNX5400\n    Serial No:           CETV00000001\n\"\"\"\nDOMAIN_INFOS = \"\"\"\nNode: APM00011111111\nIP Address: 111.222.33.55\n(Master)\nName: CX300I_33_55\nPort: 80\nSecure Port: 443\nIP Address: 111.222.33.44\nName: CX300I_33_44\nPort: 80\nSecure Port: 443\n\"\"\"\nDISK_INFOS = \"\"\"\n        Bus 0 Enclosure 0  Disk 0\n        State:                   Enabled\n        Capacity:                54969\n        \"\"\"\nPOOL_INFOS = \"\"\"\n        Pool Name:  Pool 1\n        Pool ID:  1\n        Description:\n        State:  Offline\n        Status:  Storage Pool requires recovery. service provider(0x712d8518)\n        User Capacity (GBs):  8583.732\n        Consumed Capacity (GBs):  8479.780\n        Available Capacity (GBs):  103.953\n        Total Subscribed Capacity (GBs):  8479.780\n        \"\"\"\nRAID_INFOS = \"\"\"\n        RaidGroup ID:                              0\n        RaidGroup State:                           Valid_luns\n        Raw Capacity (Blocks):                     1688426496\n        Logical Capacity (Blocks):                 1688420352\n        Free Capacity (Blocks,non-contiguous):     522260480\n        \"\"\"\nLUN_INFOS = \"\"\"\n        LOGICAL UNIT NUMBER 239\n        Name:  sun_data_VNX_2\n        User Capacity (GBs):  9.000\n        Consumed Capacity (GBs):  1.753\n        Pool Name:  Migration_pool\n        Current State:  Ready\n        Status:  OK(0x0)\n        Is Thin LUN:  Yes\n        Is Compressed:  No\n        \"\"\"\nGET_ALL_LUN_INFOS = \"\"\"\n        LOGICAL UNIT NUMBER 186\n        Name                        LN_10G_01\n        RAIDGroup ID:               1\n        State:                      Bound\n        LUN Capacity(Megabytes):    10240\n        Is Thin LUN:                YES\n        \"\"\"\nCER_INFOS = \"\"\"\n-----------------------------\nSubject:CN=TrustedRoot,C=US,ST=MA,L=Hopkinton,EMAIL=rsa@emc.com,OU=CSP,O=RSA\nIssuer:1.1.1.1\nSerial#: 00d8280b0c863f6d4e\nValid From: 20090407135111Z\nValid To: 20190405135111Z\n-----------------------------\nSubject:CN=TrustedRoot,C=US,ST=MA,L=Hopkinton,EMAIL=rsa@emc.com,OU=CSP,O=RSA\nIssuer:110.143.132.231\nSerial#: 00d8280b0c863f6d4e\nValid From: 20090407135111Z\nValid To: 20190405135111Z\n        \"\"\"\n\nDISK_DATAS = \"\"\"\n        Bus 0 Enclosure 0  Disk 0\n        Vendor Id:               HITACHI\n        Product Id:              HUC10906 CLAR600\n        Product Revision:        C430\n        Type:                    193: RAID5 129: RAID5 146: RAID5 151: RAID5\n        State:                   Enabled\n        Hot Spare:               N/A\n        Serial Number:           KSJEX35J\n        Capacity:                549691\n        Raid Group ID:           0\n        Drive Type:              SAS\n        Current Speed: 6Gbps\n        \"\"\"\nSP_DATAS = \"\"\"\n\nSP A\n\nCabinet:             DPE9\nSignature For The SP:          3600485\nSignature For The Peer SP:     3600424\nRevision Number For The SP:    05.33.000.5.038\nSerial Number For The SP:      CF2Z7134700101\nMemory Size For The SP:        16384\nSP SCSI ID if Available:       0\n\nSP B\n\nCabinet:             DPE9\nSignature For The SP:          3600424\nSignature For The Peer SP:     3600485\nRevision Number For The SP:    05.33.000.5.038\nSerial Number For The SP:      CF2Z7134700040\nMemory Size For The SP:        16384\nSP SCSI ID if Available:       0\n\n\n\"\"\"\nRESUME_DATAS = \"\"\"\nStorage Processor A\n  CPU Module\n    EMC Serial Number:               CF2Z7134700101\n    Assembly Name:                   JFSP 1.8GHZ 4C CPU GEN3\n\nStorage Processor B\n  CPU Module\n    EMC Serial Number:               CF2Z7134700040\n    Assembly Name:                   JFSP 1.8GHZ 4C CPU GEN3\n\"\"\"\nPORT_DATAS = \"\"\"\nInformation about each SPPORT:\n\nSP Name:             SP A\nSP Port ID:          6\nSP UID:              50:06:01:60:88:60:24:1E:50:06:01:66:08:60:24:1E\nLink Status:         Up\nPort Status:         Online\nSwitch Present:      YES\nSwitch UID:          10:00:C4:F5:7C:20:05:80:20:0E:C4:F5:7C:20:05:80\nSP Source ID:        1773056\nALPA Value:         0\nSpeed Value :         8Gbps\nAuto Negotiable :     YES\nAvailable Speeds:\n2Gbps\n4Gbps\n8Gbps\nAuto\nRequested Value:      Auto\nMAC Address:         Not Applicable\nSFP State:           Online\nReads:               510068560\nWrites:              331050079\nBlocks Read:         1504646456\nBlocks Written:      236376118\nQueue Full/Busy:     12246\nI/O Module Slot:     3\nPhysical Port ID:    0\n\"\"\"\nBUS_PORT_DATAS = \"\"\"\n\nBus 0\n\nCurrent Speed: 6Gbps.\nAvailable Speeds:\n              3Gbps.\n              6Gbps.\n\nSPA SFP State: N/A\nSPB SFP State: N/A\n\nI/O Module Slot: Base Module\nPhysical Port ID: 0\nPort Combination In Use: No\n\n\n\nSPA Connector State: None\nSPB Connector State: None\n\n\"\"\"\nBUS_PORT_STATE_DATAS = \"\"\"\nInformation about each I/O module(s) on SPA:\n\nSP ID: A\nI/O Module Slot: Base Module\nI/O Module Type: SAS\nI/O Module State: Present\nI/O Module Substate: Good\nI/O Module Power state: On\nI/O Carrier: No\n\nInformation about each port on this I/O module:\nPhysical Port ID: 0\nPort State: Enabled\nPhysical Port ID: 1\nPort State: Missing\nInformation about each I/O module(s) on SPB:\n\nSP ID: B\nI/O Module Slot: Base Module\nI/O Module Type: SAS\nI/O Module State: Present\nI/O Module Substate: Good\nI/O Module Power state: On\nI/O Carrier: No\n\nInformation about each port on this I/O module:\nPhysical Port ID: 0\nPort State: Enabled\nPhysical Port ID: 1\nPort State: Missing\n\"\"\"\nISCSI_PORT_DATAS = \"\"\"\nSP: A\nPort ID: 4\nPort WWN: iqn.1992-04.com.emc:cx.apm00093300877.a4\niSCSI Alias: 0877.a4\nIP Address: 172.20.1.140\nSubnet Mask: 255.255.255.0\nGateway Address: 172.20.1.1\nInitiator Authentication: Not Available\n\nSP: A\nPort ID: 5\nPort WWN: iqn.1992-04.com.emc:cx.apm00093300877.a5\niSCSI Alias: 0877.a5\n\nSP: A\nPort ID: 6\nPort WWN: iqn.1992-04.com.emc:cx.apm00093300877.a6\niSCSI Alias: 0877.a6\nIP Address: 172.20.2.140\nSubnet Mask: 255.255.255.0\nGateway Address: 172.20.2.1\nInitiator Authentication: Not Available\n\nSP: A\nPort ID: 7\nPort WWN: iqn.1992-04.com.emc:cx.apm00093300877.a7\niSCSI Alias: 0877.a7\n\nSP: B\nPort ID: 4\nPort WWN: iqn.1992-04.com.emc:cx.apm00093300877.b4\niSCSI Alias: 0877.b4\nIP Address: 172.20.1.141\nSubnet Mask: 255.255.255.0\nGateway Address: 172.20.1.1\nInitiator Authentication: Not Available\n\nSP: B\nPort ID: 5\nPort WWN: iqn.1992-04.com.emc:cx.apm00093300877.b5\niSCSI Alias: 0877.b5\n\nSP: B\nPort ID: 6\nPort WWN: iqn.1992-04.com.emc:cx.apm00093300877.b6\niSCSI Alias: 0877.b6\nIP Address: 172.20.2.141\nSubnet Mask: 255.255.255.0\nGateway Address: 172.20.2.1\nInitiator Authentication: Not Available\n\nSP: B\nPort ID: 7\nPort WWN: iqn.1992-04.com.emc:cx.apm00093300877.b7\niSCSI Alias: 0877.b7\n\nSP: B\nPort ID: 9\nPort WWN: 50:06:01:60:BB:20:13:0D:50:06:01:69:3B:24:13:0D\niSCSI Alias: N/A\nIP Address: N/A\nSubnet Mask: N/A\nGateway Address: N/A\nInitiator Authentication: N/A\n\nSP: A\nPort ID: 8\nPort WWN: 50:06:01:60:BB:20:13:0D:50:06:01:60:3B:24:13:0D\niSCSI Alias: N/A\nIP Address: N/A\nSubnet Mask: N/A\nGateway Address: N/A\nInitiator Authentication: N/A\n\nSP: A\nPort ID: 9\nPort WWN: 50:06:01:60:BB:20:13:0D:50:06:01:61:3B:24:13:0D\niSCSI Alias: N/A\nIP Address: N/A\nSubnet Mask: N/A\nGateway Address: N/A\nInitiator Authentication: N/A\n\nSP: B\nPort ID: 8\nPort WWN: 50:06:01:60:BB:20:13:0D:50:06:01:68:3B:24:13:0D\niSCSI Alias: N/A\nIP Address: N/A\nSubnet Mask: N/A\nGateway Address: N/A\nInitiator Authentication: N/A\n\"\"\"\nIO_PORT_CONFIG_DATAS = \"\"\"\nSP ID :  A\nI/O Module Slot :  3\nI/O Module Type :  Fibre Channel\nI/O Module State :  Present\n\nSP ID :  A\nI/O Module Slot :  Base Module\nI/O Module Type :  SAS\n\nSP ID :  B\nI/O Module Slot :  Base Module\nI/O Module Type :  SAS\n\"\"\"\n\nVIEW_DATAS = \"\"\"\nStorage Group Name:    AIX_PowerHA_node2\nStorage Group UID:     0B:33:4A:6E:81:38:EC:11:90:2B:00:60:16:63\nHBA/SP Pairs:\n\n  HBA UID                                          SP Name     SPPort\n -------                                          -------     ------\n  20:00:00:00:C9:76:5E:79:10:00:00:00:C9:76:5E:79   SP A         6\nHost name:             AIX_21\n  20:00:00:00:C9:75:80:4C:10:00:00:00:C9:75:80:4C   SP B         3\nHost name:             AIX_21\n\nHLU/ALU Pairs:\n\n  HLU Number     ALU Number\n  ----------     ----------\n    1               335\nShareable:             YES\n\"\"\"\nHBA_DATAS = \"\"\"\nInformation about each HBA:\n\nHBA UID:                 20:00:00:00:C9:9B:57:79:10:00:00:00:C9:9B:57:79\nServer Name:             aix_ma\nServer IP Address:       8.44.129.26\nHBA Model Description:\nHBA Vendor Description:\nHBA Device Driver Name:   N/A\nInformation about each port of this HBA:\n\n    SP Name:               SP A\n    SP Port ID:            6\n    HBA Devicename:        N/A\n    Trusted:               NO\n    Logged In:             NO\n    Defined:               YES\n    Initiator Type:          3\n    StorageGroup Name:     None\n\"\"\"\n\nARCHIVE_DATAS = \"\"\"\nIndex Size in KB     Last Modified            Filename\n2 46 07/08/2021 01:20:29  CETV2135000041_SPA_2021-07-07_17-20-26-GMT_P08-00.nar\n3 40 07/08/2021 03:56:28  CETV2135000041_SPA_2021-07-07_19-56-25-GMT_P08-00.nar\n4 31 07/08/2021 06:32:29  CETV2135000041_SPA_2021-07-07_22-32-26-GMT_P08-00.nar\n5 02 07/08/2021 09:08:29  CETV2135000041_SPA_2021-07-08_01-08-26-GMT_P08-00.nar\n6 76 07/08/2021 11:44:29  CETV2135000041_SPA_2021-07-08_03-44-26-GMT_P08-00.nar\n7 48 07/08/2021 14:20:28  CETV2135000041_SPA_2021-07-08_06-20-26-GMT_P08-00.nar\n8 34 07/08/2021 16:31:13  CETV2135000041_SPA_2021-07-08_08-31-11-GMT_P08-00.nar\n\"\"\"\nPERFORMANCE_LINES_MAP = {\n    'SP A': [['SP A', '07/08/2021 12:15:56', '', '', '', '', '', '', '', '',\n              '0', '', '', '0', '', '', '0', '', '', '0', '', '', '0', '',\n              '', '0', '', '', '0', '', '', '0', '', '', '0'],\n             ['SP A', '07/08/2021 12:16:56', '', '', '', '', '', '', '', '',\n              '0', '', '', '0', '', '', '0', '', '', '0', '', '', '0', '', '',\n              '0', '', '', '0', '', '', '0', '', '', '0'],\n             ['SP A', '07/08/2021 12:17:55', '', '', '', '', '', '', '',\n              '', '0', '', '', '0', '', '', '0', '', '', '0', '', '', '0',\n              '', '', '0', '', '', '0', '', '', '0', '', '', '0'],\n             ['SP A', '07/08/2021 12:18:56', '', '', '', '', '', '', '', '',\n              '0', '', '', '0.28', '', '', '0.73', '', '', '0', '', '',\n              '0', '', '', '0', '', '', '0.28', '', '', '', '', '', '0.73'],\n             ['SP A', '07/08/2021 12:19:56', '', '', '', '', '', '', '', '',\n              '0', '', '', '0', '', '', '0', '', '', '0', '', '', '0', '',\n              '', '0', '', '', '0', '', '', '0', '', '', '0']],\n    'SP B': [['SP B', '07/08/2021 12:15:56', '', '', '', '', '', '', '', '',\n              '0', '', '', '0.9', '', '', '2.6', '', '', '0.9', '', '',\n              '2.4', '', '', '1', '', '', '0.7', '', '', '', '', '', '0.2'],\n             ['SP B', '07/08/2021 12:16:56', '', '', '', '', '', '', '', '',\n              '0', '', '', '0.1', '', '', '5.6', '', '', '0.2', '', '', '6.7',\n              '', '', '2', '', '', '1.6', '', '', '', '', '', '1.4'],\n             ['SP B', '07/08/2021 12:17:55', '', '', '', '', '', '', '',\n              '', '0', '', '', '0.2', '', '', '4.6', '', '', '0.3', '', '',\n              '1.7', '', '', '3', '', '', '2.6', '', '', '', '', '', '2.4'],\n             ['SP B', '07/08/2021 12:18:56', '', '', '', '', '', '', '', '',\n              '0', '', '', '0.3', '', '', '6.6', '', '', '0.4', '', '',\n              '2.7', '', '', '4', '', '', '3.6', '', '', '', '', '', '3.4'],\n             ['SP B', '07/08/2021 12:19:56', '', '', '', '', '', '', '', '',\n              '0', '', '', '0.4', '', '', '7.6', '', '', '0.5', '', '',\n              '3.7', '', '', '5', '', '', '4.6', '', '', '', '', '', '4.4']]\n}\nNAR_INTERVAL_DATAS = \"\"\"\nArchive Poll Interval (sec):  60\n\"\"\"\n\nAGENT_RESULT = {\n    'agent_rev': '7.33.1 (0.38)',\n    'name': 'K10',\n    'desc': '',\n    'revision': '05.33.000.5.038',\n    'model': 'VNX5400',\n    'serial_no': 'CETV00000001'\n}\nSTORAGE_RESULT = {\n    'name': 'APM00011111111',\n    'vendor': 'DELL EMC',\n    'model': 'VNX5400',\n    'status': 'normal',\n    'serial_number': 'CETV00000001',\n    'firmware_version': '05.33.000.5.038',\n    'total_capacity': 10081183274631,\n    'raw_capacity': 57639174144,\n    'used_capacity': 9702168298782,\n    'free_capacity': 379016049590\n}\nDOMAIN_RESULT = [\n    {\n        'node': 'APM00011111111',\n        'ip_address': '111.222.33.55',\n        'master': 'True',\n        'name': 'CX300I_33_55',\n        'port': '80',\n        'secure_port': '443'\n    }]\nPOOLS_RESULT = [\n    {\n        'name': 'Pool 1',\n        'storage_id': '12345',\n        'native_storage_pool_id': '1',\n        'description': '',\n        'status': 'offline',\n        'storage_type': 'block',\n        'total_capacity': 9216712054407,\n        'subscribed_capacity': 9105094444318,\n        'used_capacity': 9105094444318,\n        'free_capacity': 111618683830\n    }]\nRAID_RESULT = [\n    {\n        'raidgroup_id': '0',\n        'raidgroup_state': 'Valid_luns',\n        'raw_capacity_blocks': '1688426496',\n        'logical_capacity_blocks': '1688420352',\n        'free_capacity_blocks,non-contiguous': '522260480'\n    }]\nALL_LUN_RESULT = [\n    {\n        'logical_unit_number': '186',\n        'name': 'LN_10G_01',\n        'raidgroup_id': '1',\n        'state': 'Bound',\n        'lun_capacitymegabytes': '10240',\n        'is_thin_lun': 'YES'\n    }]\nPOOLS_ANALYSE_RESULT = [{\n    'pool_name': 'Pool 1',\n    'pool_id': '1',\n    'description': '',\n    'state': 'Offline',\n    'status': 'Storage Pool requires recovery. service provider(0x712d8518)',\n    'user_capacity_gbs': '8583.732',\n    'consumed_capacity_gbs': '8479.780',\n    'available_capacity_gbs': '103.953',\n    'total_subscribed_capacity_gbs': '8479.780'\n}]\nVOLUMES_RESULT = [\n    {\n        'name': 'sun_data_VNX_2',\n        'storage_id': '12345',\n        'status': 'normal',\n        'native_volume_id': '239',\n        'native_storage_pool_id': '',\n        'type': 'thin',\n        'total_capacity': 9663676416,\n        'used_capacity': 1882269417,\n        'free_capacity': 7781406998,\n        'compressed': False,\n        'wwn': None\n    }]\nALERTS_RESULT = [\n    {\n        'alert_id': '0x76cc',\n        'alert_name': 'Navisphere Agent, version 7.33',\n        'severity': 'Critical',\n        'category': 'Fault',\n        'type': 'EquipmentAlarm',\n        'occur_time': 1585114217000,\n        'description': 'Navisphere Agent, version 7.33',\n        'resource_type': 'Storage',\n        'match_key': 'b969bbaa22b62ebcad4074618cc29b94'\n    }]\nALERT_RESULT = {\n    'alert_id': '0x761f',\n    'alert_name': 'Unisphere can no longer manage',\n    'severity': 'Critical',\n    'category': 'Fault',\n    'type': 'EquipmentAlarm',\n    'occur_time': 1614310456716,\n    'description': 'Unisphere can no longer manage',\n    'resource_type': 'Storage',\n    'match_key': '8e97fe0af779d78bad8f2de52e15c65c'\n}\nDISK_RESULT = [\n    {\n        'name': 'Bus 0 Enclosure 0 Disk 0',\n        'storage_id': '12345',\n        'native_disk_id': 'Bus0Enclosure0Disk0',\n        'serial_number': 'KSJEX35J',\n        'manufacturer': 'HITACHI',\n        'model': 'HUC10906 CLAR600',\n        'firmware': 'C430',\n        'speed': None,\n        'capacity': 576392790016,\n        'status': 'normal',\n        'physical_type': 'sas',\n        'logical_type': 'unknown',\n        'health_score': None,\n        'native_disk_group_id': None,\n        'location': 'Bus 0 Enclosure 0 Disk 0'\n    }]\nSP_RESULT = [\n    {\n        'name': 'SP A',\n        'storage_id': '12345',\n        'native_controller_id': '3600485',\n        'status': 'normal',\n        'location': None,\n        'soft_version': '05.33.000.5.038',\n        'cpu_info': 'JFSP 1.8GHZ 4C CPU GEN3',\n        'cpu_count': 1,\n        'memory_size': '17179869184'\n    },\n    {\n        'name': 'SP B',\n        'storage_id': '12345',\n        'native_controller_id': '3600424',\n        'status': None,\n        'location': None,\n        'soft_version': '05.33.000.5.038',\n        'cpu_info': 'JFSP 1.8GHZ 4C CPU GEN3',\n        'memory_size': '16777216'\n    }]\nPORT_RESULT = [\n    {\n        'name': 'Slot A3,Port 0',\n        'storage_id': '12345',\n        'native_port_id': 'A-6',\n        'location': 'Slot A3,Port 0',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'fc',\n        'logical_type': None,\n        'speed': 8000000000,\n        'max_speed': 8000000000,\n        'native_parent_id': None,\n        'wwn': '50:06:01:60:88:60:24:1E:50:06:01:66:08:60:24:1E',\n        'mac_address': None,\n        'ipv4': '172.20.2.140',\n        'ipv4_mask': '255.255.255.0',\n        'ipv6': None,\n        'ipv6_mask': None\n    }]\nVIEW_RESULT = [\n    {\n        'native_masking_view_id': '0B:33:4A:6E:81:38:EC:11:90:2B:00:'\n                                  '60:16:63_AIX_21_335',\n        'name': 'AIX_PowerHA_node2',\n        'storage_id': '12345',\n        'native_storage_host_id': 'AIX_21',\n        'native_volume_id': '335'\n    }]\nINITIATOR_RESULT = [\n    {\n        'name': '20:00:00:00:C9:9B:57:79:10:00:00:00:C9:9B:57:79',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': '20:00:00:00:C9:9B:57:79:10:'\n                                            '00:00:00:C9:9B:57:79',\n        'wwn': '20:00:00:00:C9:9B:57:79:10:00:00:00:C9:9B:57:79',\n        'type': 'unknown',\n        'status': 'online',\n        'native_storage_host_id': 'aix_ma'\n    }]\nHOST_RESULT = [\n    {\n        'name': 'aix_ma',\n        'storage_id': '12345',\n        'native_storage_host_id': 'aix_ma',\n        'os_type': 'Unknown',\n        'status': 'normal',\n        'ip_address': '8.44.129.26'\n    }]\nMETRICS_RESULT = [\n    constants.metric_struct(name='iops', labels={\n        'storage_id': '12345',\n        'resource_type': 'controller',\n        'resource_id': '3600485',\n        'type': 'RAW',\n        'unit': 'IOPS'\n    }, values={\n        1625717816000: 0.0,\n        1625717875000: 0.0,\n        1625717936000: 0.73,\n        1625717996000: 0.0\n    }),\n    constants.metric_struct(name='iops', labels={\n        'storage_id': '12345',\n        'resource_type': 'port',\n        'resource_id': 'A-6',\n        'type': 'RAW',\n        'unit': 'IOPS'\n    }, values={\n        1625717816000: 3.0,\n        1625717875000: 4.0,\n        1625717936000: 5.0,\n        1625717996000: 6.0\n    }),\n    constants.metric_struct(name='iops', labels={\n        'storage_id': '12345',\n        'resource_type': 'disk',\n        'resource_id': 'Bus0Enclosur0Disk0',\n        'type': 'RAW',\n        'unit': 'IOPS'\n    }, values={\n        1625717816000: 4.0,\n        1625717875000: 5.0,\n        1625717936000: 6.0,\n        1625717996000: 6.0\n    }),\n    constants.metric_struct(name='iops', labels={\n        'storage_id': '12345',\n        'resource_type': 'volume',\n        'resource_id': '230',\n        'type': 'RAW',\n        'unit': 'IOPS'\n    }, values={\n        1625717816000: 0.0,\n        1625717875000: 0.0,\n        1625717936000: 0.0,\n        1625717996000: 0.0\n    })\n]\n\n\ndef create_driver():\n    NaviHandler.login = mock.Mock(return_value={\"05.33.000.5.038_test\"})\n    return VnxBlockStorDriver(**ACCESS_INFO)\n\n\nclass TestVnxBlocktorageDriver(TestCase):\n    driver = create_driver()\n\n    def test_init(self):\n        NaviHandler.login = mock.Mock(return_value=\"05.33.000.5.038_test\")\n        vnx = VnxBlockStorDriver(**ACCESS_INFO)\n        self.assertEqual(vnx.version, \"05.33.000.5.038_test\")\n\n    def test_get_storage(self):\n        NaviClient.exec = mock.Mock(\n            side_effect=[DOMAIN_INFOS, AGENT_INFOS, DISK_INFOS, POOL_INFOS,\n                         RAID_INFOS])\n        storage = self.driver.get_storage(context)\n        self.assertDictEqual(storage, STORAGE_RESULT)\n\n    def test_get_pools(self):\n        NaviClient.exec = mock.Mock(side_effect=[POOL_INFOS, RAID_INFOS])\n        pools = self.driver.list_storage_pools(context)\n        self.assertDictEqual(pools[0], POOLS_RESULT[0])\n\n    def test_get_volumes(self):\n        NaviClient.exec = mock.Mock(\n            side_effect=[LUN_INFOS, POOL_INFOS, GET_ALL_LUN_INFOS])\n        volumes = self.driver.list_volumes(context)\n        self.assertDictEqual(volumes[0], VOLUMES_RESULT[0])\n\n    def test_get_alerts(self):\n        with self.assertRaises(Exception) as exc:\n            self.driver.list_alerts(context, None)\n        self.assertIn('Driver API list_alerts() is not Implemented',\n                      str(exc.exception))\n\n    def test_parse_alert(self):\n        alert = {\n            '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1981.0.6',\n            '1.3.6.1.4.1.1981.1.4.3': 'A-CETV00000001',\n            '1.3.6.1.4.1.1981.1.4.4': 'K10',\n            '1.3.6.1.4.1.1981.1.4.5': '761f',\n            '1.3.6.1.4.1.1981.1.4.6': 'Unisphere can no longer manage',\n            '1.3.6.1.4.1.1981.1.4.7': 'VNX5400'\n        }\n        alert = self.driver.parse_alert(context, alert)\n        ALERT_RESULT['occur_time'] = alert['occur_time']\n        self.assertDictEqual(alert, ALERT_RESULT)\n\n    def test_cli_res_to_dict(self):\n        navi_handler = NaviHandler(**ACCESS_INFO)\n        agent_re = navi_handler.cli_res_to_dict(AGENT_INFOS)\n        self.assertDictEqual(agent_re, AGENT_RESULT)\n\n    def test_cli_res_to_list(self):\n        navi_handler = NaviHandler(**ACCESS_INFO)\n        re_list = navi_handler.cli_res_to_list(POOL_INFOS)\n        self.assertDictEqual(re_list[0], POOLS_ANALYSE_RESULT[0])\n\n    def test_cli_domain_to_dict(self):\n        navi_handler = NaviHandler(**ACCESS_INFO)\n        re_list = navi_handler.cli_domain_to_dict(DOMAIN_INFOS)\n        self.assertDictEqual(re_list[0], DOMAIN_RESULT[0])\n\n    def test_cli_lun_to_list(self):\n        navi_handler = NaviHandler(**ACCESS_INFO)\n        re_list = navi_handler.cli_lun_to_list(GET_ALL_LUN_INFOS)\n        self.assertDictEqual(re_list[0], ALL_LUN_RESULT[0])\n\n    @mock.patch.object(NaviClient, 'exec')\n    def test_init_cli(self, mock_exec):\n        mock_exec.return_value = 'test'\n        navi_handler = NaviHandler(**ACCESS_INFO)\n        re = navi_handler.navi_exe('abc')\n        self.assertEqual(re, 'test')\n        self.assertEqual(mock_exec.call_count, 1)\n\n    @mock.patch.object(NaviClient, 'exec')\n    def test_remove_cer(self, mock_exec):\n        navi_handler = NaviHandler(**ACCESS_INFO)\n        navi_handler.remove_cer()\n        self.assertEqual(mock_exec.call_count, 1)\n\n    def test_err_cli_res_to_dict(self):\n        with self.assertRaises(Exception) as exc:\n            navi_handler = NaviHandler(**ACCESS_INFO)\n            navi_handler.cli_res_to_dict({})\n        self.assertIn('arrange resource info error', str(exc.exception))\n\n    def test_err_cli_res_to_list(self):\n        with self.assertRaises(Exception) as exc:\n            navi_handler = NaviHandler(**ACCESS_INFO)\n            navi_handler.cli_res_to_list({})\n        self.assertIn('cli resource to list error', str(exc.exception))\n\n    @mock.patch.object(time, 'mktime')\n    def test_time_str_to_timestamp(self, mock_mktime):\n        tools = Tools()\n        time_str = '03/26/2021 14:25:36'\n        mock_mktime.return_value = 1616739936\n        re = tools.time_str_to_timestamp(time_str, consts.TIME_PATTERN)\n        self.assertEqual(1616739936000, re)\n\n    @mock.patch.object(time, 'strftime')\n    def test_timestamp_to_time_str(self, mock_strftime):\n        tools = Tools()\n        mock_strftime.return_value = '03/26/2021 14:25:36'\n        timestamp = 1616739936000\n        re = tools.timestamp_to_time_str(timestamp, consts.TIME_PATTERN)\n        self.assertEqual('03/26/2021 14:25:36', re)\n\n    def test_cli_exec(self):\n        with self.assertRaises(Exception) as exc:\n            command_str = 'abc'\n            NaviClient.exec(command_str)\n        self.assertIn('Component naviseccli could not be found',\n                      str(exc.exception))\n\n    def test_analyse_cer(self):\n        re_map = {\n            '1.1.1.1': {\n                'subject': 'CN=TrustedRoot,C=US,ST=MA,L=Hopkinton,'\n                           'EMAIL=rsa@emc.com,OU=CSP,O=RSA',\n                'issuer': '1.1.1.1',\n                'serial#': '00d8280b0c863f6d4e',\n                'valid_from': '20090407135111Z',\n                'valid_to': '20190405135111Z'\n            }\n        }\n        navi_handler = NaviHandler(**ACCESS_INFO)\n        cer_map = navi_handler.analyse_cer(CER_INFOS, host_ip='1.1.1.1')\n        self.assertDictEqual(cer_map, re_map)\n\n    def test_analyse_cer_exception(self):\n        with self.assertRaises(Exception) as exc:\n            navi_handler = NaviHandler(**ACCESS_INFO)\n            navi_handler.analyse_cer(CER_INFOS)\n        self.assertIn('arrange cer info error', str(exc.exception))\n\n    def test_get_resources_info_exception(self):\n        with self.assertRaises(Exception) as exc:\n            NaviClient.exec = mock.Mock(side_effect=[LUN_INFOS])\n            navi_handler = NaviHandler(**ACCESS_INFO)\n            navi_handler.get_resources_info('abc', None)\n        self.assertIn('object is not callable', str(exc.exception))\n\n    def test_parse_alert_exception(self):\n        with self.assertRaises(Exception) as exc:\n            AlertHandler.parse_alert(None)\n        self.assertIn('The results are invalid', str(exc.exception))\n\n    def test_clear_alert(self):\n        self.driver.clear_alert(None, None)\n\n    def test_remove_trap_config(self):\n        self.driver.remove_trap_config(None, None)\n\n    def test_get_disks(self):\n        NaviClient.exec = mock.Mock(return_value=DISK_DATAS)\n        disks = self.driver.list_disks(context)\n        self.assertDictEqual(disks[0], DISK_RESULT[0])\n\n    def test_get_controllers(self):\n        NaviClient.exec = mock.Mock(side_effect=[SP_DATAS, RESUME_DATAS])\n        controllers = self.driver.list_controllers(context)\n        self.assertDictEqual(controllers[0], SP_RESULT[0])\n\n    def test_get_ports(self):\n        NaviClient.exec = mock.Mock(\n            side_effect=[IO_PORT_CONFIG_DATAS, ISCSI_PORT_DATAS, PORT_DATAS,\n                         BUS_PORT_DATAS, BUS_PORT_STATE_DATAS])\n        ports = self.driver.list_ports(context)\n        self.assertDictEqual(ports[0], PORT_RESULT[0])\n\n    def test_get_masking_views(self):\n        NaviClient.exec = mock.Mock(side_effect=[VIEW_DATAS])\n        views = self.driver.list_masking_views(context)\n        self.assertDictEqual(views[0], VIEW_RESULT[0])\n\n    def test_get_initiators(self):\n        NaviClient.exec = mock.Mock(side_effect=[HBA_DATAS,\n                                                 IO_PORT_CONFIG_DATAS,\n                                                 ISCSI_PORT_DATAS, PORT_DATAS,\n                                                 BUS_PORT_DATAS,\n                                                 BUS_PORT_STATE_DATAS])\n        initiators = self.driver.list_storage_host_initiators(context)\n        self.assertDictEqual(initiators[0], INITIATOR_RESULT[0])\n\n    def test_get_hosts(self):\n        NaviClient.exec = mock.Mock(side_effect=[HBA_DATAS])\n        hosts = self.driver.list_storage_hosts(context)\n        self.assertDictEqual(hosts[0], HOST_RESULT[0])\n\n    def test_get_perf_metrics(self):\n        driver = create_driver()\n        resource_metrics = {\n            'controller': [\n                'iops', 'readIops', 'writeIops',\n                'throughput', 'readThroughput', 'writeThroughput',\n                'responseTime'\n            ],\n            'port': [\n                'iops', 'readIops', 'writeIops',\n                'throughput', 'readThroughput', 'writeThroughput',\n                'responseTime'\n            ],\n            'disk': [\n                'iops', 'readIops', 'writeIops',\n                'throughput', 'readThroughput', 'writeThroughput',\n                'responseTime'\n            ],\n            'volume': [\n                'iops', 'readIops', 'writeIops',\n                'throughput', 'readThroughput', 'writeThroughput',\n                'responseTime',\n                'cacheHitRatio', 'readCacheHitRatio', 'writeCacheHitRatio',\n                'ioSize', 'readIoSize', 'writeIoSize',\n            ]\n        }\n        start_time = 1625717756000\n        end_time = 1625717996000\n        ComponentHandler._filter_performance_data = mock.Mock(\n            side_effect=[PERFORMANCE_LINES_MAP])\n        NaviClient.exec = mock.Mock(\n            side_effect=[ARCHIVE_DATAS, SP_DATAS, PORT_DATAS, DISK_DATAS,\n                         GET_ALL_LUN_INFOS, NAR_INTERVAL_DATAS])\n        ComponentHandler._remove_archive_file = mock.Mock(return_value=\"\")\n        metrics = driver.collect_perf_metrics(context, '12345',\n                                              resource_metrics, start_time,\n                                              end_time)\n        self.assertEqual(metrics[0][1][\"resource_id\"], '3600485')\n\n    def test_get_capabilities(self):\n        cap = VnxBlockStorDriver.get_capabilities(context)\n        self.assertIsNotNone(cap.get('resource_metrics'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('controller'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('volume'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('port'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('disk'))\n"
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/vplex/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/dell_emc/vplex/test_emc_vplex.py",
    "content": "# Copyright 2021 The SODA Authors.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n#   http:#www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nfrom unittest import TestCase, mock\r\n\r\nfrom delfin import context\r\nfrom delfin.drivers.dell_emc.vplex.rest_handler import RestHandler\r\nfrom delfin.drivers.dell_emc.vplex.vplex_stor import VplexStorageDriver\r\n\r\nACCESS_INFO = {\r\n    \"storage_id\": \"12345\",\r\n    \"vendor\": \"dell_emc\",\r\n    \"model\": \"vplex\",\r\n    \"rest\": {\r\n        \"host\": \"8.44.162.250\",\r\n        \"port\": 443,\r\n        \"username\": \"service\",\r\n        \"password\": \"Abcdef@123\"\r\n    }\r\n}\r\n\r\nTRAP_INFO = {\r\n    \"1.3.6.1.2.1.1.3.0\": \"0\",\r\n    '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1139.21.0',\r\n    '1.3.6.1.4.1.1139.21.1.5.0': 'this is test',\r\n    '1.3.6.1.4.1.1139.21.1.3.0': '123321'\r\n}\r\n\r\ntrap_result = {\r\n    'alert_id': '123321',\r\n    'alert_name': 'this is test',\r\n    'severity': 'Informational',\r\n    'category': 'Fault',\r\n    'type': 'EquipmentAlarm',\r\n    'occur_time': 1614067724000,\r\n    'description': 'this is test',\r\n    'resource_type': 'Storage',\r\n    'location': '',\r\n    'match_key': '8c6d115258631625b625486f81b09532'\r\n}\r\n\r\nGET_ALL_CLUSTER = {\r\n    \"context\": [{\r\n        \"children\": [{\r\n            \"name\": \"cluster-1\",\r\n            \"type\": \"cluster\"\r\n        }\r\n        ]\r\n    }\r\n    ]\r\n}\r\n\r\nGET_ALL_LUNS = {\r\n    \"context\": [\r\n        {\r\n            \"children\": [\r\n                {\r\n                    \"name\": \"device_VPLEX_LUN0_1_vol\",\r\n                    \"type\": \"virtual-volume\"\r\n                }\r\n            ]\r\n        }\r\n    ]\r\n}\r\n\r\nGET_LUN = {\r\n    \"context\": [\r\n        {\r\n            \"attributes\": [\r\n                {\r\n                    \"name\": \"capacity\",\r\n                    \"value\": \"644245094400B\"\r\n                },\r\n                {\r\n                    \"name\": \"health-state\",\r\n                    \"value\": \"ok\"\r\n                },\r\n                {\r\n                    \"name\": \"operational-status\",\r\n                    \"value\": \"ok\"\r\n                },\r\n                {\r\n                    \"name\": \"supporting-device\",\r\n                    \"value\": \"device__VPLEX_LUN0_1\"\r\n                },\r\n                {\r\n                    \"name\": \"thin-enabled\",\r\n                    \"value\": \"unavailable\"\r\n                },\r\n                {\r\n                    \"name\": \"vpd-id\",\r\n                    \"value\": \"VPD83T3:60000000000000000000000000000000\"\r\n                }\r\n            ]\r\n        }\r\n    ]\r\n}\r\nvolume_result = [{\r\n    'name': 'device_VPLEX_LUN0_1_vol',\r\n    'storage_id': '12345',\r\n    'description': 'EMC VPlex volume',\r\n    'status': 'normal',\r\n    'native_volume_id': 'VPD83T3:60000000000000000000000000000000',\r\n    'native_storage_pool_id': 'device__VPLEX_LUN0_1',\r\n    'type': 'thick',\r\n    'total_capacity': 644245094400,\r\n    'used_capacity': 644245094400,\r\n    'free_capacity': 0,\r\n    'wwn': '60000000000000000000000000000000'\r\n}\r\n]\r\n\r\nGET_ALL_POOLS = {\r\n    \"context\": [\r\n        {\r\n            \"children\": [\r\n                {\r\n                    \"name\": \"Device_KLM_test01\",\r\n                    \"type\": \"local-device\"\r\n                }\r\n            ]\r\n        }\r\n    ]\r\n}\r\n\r\nGET_POOL = {\r\n    \"context\": [\r\n        {\r\n            \"attributes\": [\r\n                {\r\n                    \"name\": \"capacity\",\r\n                    \"value\": \"732212254720B\"\r\n                },\r\n                {\r\n                    \"name\": \"health-state\",\r\n                    \"value\": \"ok\"\r\n                },\r\n                {\r\n                    \"name\": \"operational-status\",\r\n                    \"value\": \"ok\"\r\n                },\r\n                {\r\n                    \"name\": \"system-id\",\r\n                    \"value\": \"Device_KLM_test01\"\r\n                },\r\n                {\r\n                    \"name\": \"virtual-volume\",\r\n                    \"value\": \"Volume_CLARiiON0041_KLM_test01\"\r\n                }\r\n            ]\r\n        }\r\n    ]\r\n}\r\n\r\npool_result = [\r\n    {\r\n        'name': 'Device_KLM_test01',\r\n        'storage_id': '12345',\r\n        'native_storage_pool_id': 'Device_KLM_test01',\r\n        'description': 'EMC VPlex Pool',\r\n        'status': 'normal',\r\n        'storage_type': 'block',\r\n        'total_capacity': 732212254720,\r\n        'used_capacity': 732212254720,\r\n        'free_capacity': 0\r\n    }\r\n]\r\n\r\nGET_HEALH_CHECK = {\r\n    \"context\": None,\r\n    \"message\": \"health-check -l\",\r\n    \"exception\": None,\r\n    \"custom-data\": \"Product Version: 6.1.0.01.00.13\\n\"\r\n                   \"Product Type: Local\\n\"\r\n}\r\n\r\nGET_CLUSTER = {\r\n    \"context\": [\r\n        {\r\n            \"type\": \"cluster\",\r\n            \"parent\": \"/clusters\",\r\n            \"attributes\": [\r\n                {\r\n                    \"name\": \"health-state\",\r\n                    \"value\": \"major-failure\"\r\n                },\r\n                {\r\n                    \"name\": \"operational-status\",\r\n                    \"value\": \"degraded\"\r\n                },\r\n                {\r\n                    \"name\": \"top-level-assembly\",\r\n                    \"value\": \"FNM00000000000\"\r\n                }\r\n            ],\r\n        }\r\n    ]\r\n}\r\n\r\nstorage_result = {\r\n    'name': 'cluster-1',\r\n    'vendor': 'DELL EMC',\r\n    'description': 'EMC VPlex Storage',\r\n    'status': 'degraded',\r\n    'serial_number': 'FNM00000000000',\r\n    'firmware_version': ' 6.1.0.01.00.13',\r\n    'model': 'EMC VPLEX  Local',\r\n    'location': '',\r\n    'raw_capacity': 12754334882201,\r\n    'total_capacity': 11654823254425,\r\n    'used_capacity': 8983009998929,\r\n    'free_capacity': 2671813255496\r\n}\r\n\r\nGET_ALL_STORAGE_VOLUME_SUMMARY = {\r\n    \"custom-data\": \"Capacity                total         11.6T\\n\\n\"\r\n}\r\n\r\nGET_ALL_POOLS_SUMMARY = {\r\n    \"custom-data\": \"total capacity    1.88T  total capacity    \"\r\n                   \"8.68T  total capacity    10.6T\\n\\n\"\r\n}\r\n\r\nGET_ALL_LUNS_SUMMARY = {\r\n    \"custom-data\": \"Total virtual-volume capacity is 8.17T.\"\r\n}\r\n\r\nGET_ALL_ENGINE_DIRECTOR = {\r\n    \"context\": [\r\n        {\r\n            \"type\": \"director\",\r\n            \"parent\": \"/engines/engine-1-1/directors\",\r\n            \"attributes\": [\r\n                {\r\n                    \"name\": \"director-id\",\r\n                    \"value\": \"0x00000000472029e9\"\r\n                },\r\n                {\r\n                    \"name\": \"communication-status\",\r\n                    \"value\": \"ok\"\r\n                },\r\n                {\r\n                    \"name\": \"name\",\r\n                    \"value\": \"director-1-1-A\"\r\n                }\r\n            ]\r\n        }\r\n    ]\r\n}\r\n\r\ncontrollers_result = [\r\n    {\r\n        'native_controller_id': '0x00000000472029e9',\r\n        'name': 'director-1-1-A',\r\n        'status': 'normal',\r\n        'location': '',\r\n        'storage_id': '12345',\r\n        'soft_version': '161.1.0.78.0',\r\n        'cpu_info': '',\r\n        'memory_size': ''\r\n    }\r\n]\r\n\r\nGET_VERSION_VERBOSE = {\r\n    \"context\": None,\r\n    \"message\": \"getsysinfo\",\r\n    \"exception\": None,\r\n    \"custom-data\": \"What:     Mgmt Server Software\\nVersion:  161.1.0.78\\n\t\"\r\n                   \"For director /engines/engine-1-1/directors/director-1-1-A:\"\r\n                   \"\\n\t\"\r\n                   \"What:     O/S\\n\t\"\r\n                   \"Version:  161.1.0.11 (SLES11)\\n\\n\t\"\r\n                   \"What:     NSFW\\n\t\"\r\n                   \"Version:  161.1.0.78.0\\n\\n\t\"\r\n                   \"What:      ZPEM\\n\t\"\r\n                   \"Version:  161.1.0.78.0-0\\n\t\"\r\n                   \"What:     Director Software\\n\t\"\r\n                   \"Version:  161.1.0.78.0\\n\\n\t\"\r\n                   \"What:     SSD Model: P30056-0000000000000 000000000\\n\t\"\r\n                   \"Version:  0005\\n\"\r\n}\r\n\r\nGET_ALL_CLUSTER_EXPORT_PORT = {\r\n    \"context\": [\r\n        {\r\n            \"type\": \"fc-target-port\",\r\n            \"parent\": \"/clusters/cluster-1/exports/ports\",\r\n            \"attributes\": [\r\n                {\r\n                    \"name\": \"director-id\",\r\n                    \"value\": \"0x00000000472029e9\"\r\n                },\r\n                {\r\n                    \"name\": \"enabled\",\r\n                    \"value\": \"true\"\r\n                },\r\n                {\r\n                    \"name\": \"export-status\",\r\n                    \"value\": \"ok\"\r\n                },\r\n                {\r\n                    \"name\": \"name\",\r\n                    \"value\": \"P00000000472029E9-A0-FC00\"\r\n                },\r\n                {\r\n                    \"name\": \"node-wwn\",\r\n                    \"value\": \"0x50001440472029e9\"\r\n                },\r\n                {\r\n                    \"name\": \"port-id\",\r\n                    \"value\": None\r\n                },\r\n                {\r\n                    \"name\": \"port-wwn\",\r\n                    \"value\": \"0x500014428029e900\"\r\n                }\r\n            ]\r\n        }\r\n    ]\r\n}\r\n\r\nGET_ALL_ENGINE_DIRECTOR_HARDWARE_PORT = {\r\n    \"context\": [\r\n        {\r\n            \"type\": \"fc-port\",\r\n            \"parent\": \"/engines/engine-1-1/directors/director-1-1-A/\"\r\n                      \"hardware/ports\",\r\n            \"attributes\": [\r\n                {\r\n                    \"name\": \"address\",\r\n                    \"value\": \"0x500014428029e900\"\r\n                },\r\n                {\r\n                    \"name\": \"current-speed\",\r\n                    \"value\": \"8Gbits/s\"\r\n                },\r\n                {\r\n                    \"name\": \"enabled\",\r\n                    \"value\": \"true\"\r\n                },\r\n                {\r\n                    \"name\": \"max-speed\",\r\n                    \"value\": \"8Gbits/s\"\r\n                },\r\n                {\r\n                    \"name\": \"name\",\r\n                    \"value\": \"A0-FC00\"\r\n                },\r\n                {\r\n                    \"name\": \"node-wwn\",\r\n                    \"value\": \"0x50001440472029e9\"\r\n                },\r\n                {\r\n                    \"name\": \"operational-status\",\r\n                    \"value\": \"ok\"\r\n                },\r\n                {\r\n                    \"name\": \"port-status\",\r\n                    \"value\": \"up\"\r\n                },\r\n                {\r\n                    \"name\": \"port-wwn\",\r\n                    \"value\": \"0x500014428029e900\"\r\n                },\r\n                {\r\n                    \"name\": \"protocols\",\r\n                    \"value\": [\r\n                        \"fc\"\r\n                    ]\r\n                },\r\n                {\r\n                    \"name\": \"role\",\r\n                    \"value\": \"front-end\"\r\n                },\r\n                {\r\n                    \"name\": \"target-port\",\r\n                    \"value\": \"P00000000472029E9-A0-FC00\"\r\n                }\r\n            ]\r\n        }\r\n    ]\r\n}\r\n\r\nports_result = [\r\n    {\r\n        'native_port_id': 'P00000000472029E9-A0-FC00',\r\n        'name': 'P00000000472029E9-A0-FC00',\r\n        'type': 'fc',\r\n        'logical_type': 'frontend',\r\n        'connection_status': 'connected',\r\n        'health_status': 'normal',\r\n        'location': '',\r\n        'storage_id': '12345',\r\n        'native_parent_id': '0x00000000472029e9',\r\n        'speed': 8000000000,\r\n        'max_speed': 8000000000,\r\n        'wwn': '0x500014428029e900',\r\n        'mac_address': '',\r\n        'ipv4': '',\r\n        'ipv4_mask': '',\r\n        'ipv6': '',\r\n        'ipv6_mask': ''\r\n    }\r\n]\r\n\r\nGET_STORAGE_VIEW = {\r\n    \"context\": [\r\n        {\r\n            \"type\": \"storage-view\",\r\n            \"parent\": \"/clusters/cluster-1/exports/storage-views\",\r\n            \"attributes\": [\r\n                {\r\n                    \"name\": \"caw-enabled\",\r\n                    \"value\": \"true\"\r\n                },\r\n                {\r\n                    \"name\": \"controller-tag\",\r\n                    \"value\": None\r\n                },\r\n                {\r\n                    \"name\": \"initiators\",\r\n                    \"value\": [\"CHEN_LINUX\"]\r\n                },\r\n                {\r\n                    \"name\": \"name\",\r\n                    \"value\": \"CHEN_LINUX\"\r\n                },\r\n                {\r\n                    \"name\": \"operational-status\",\r\n                    \"value\": \"ok\"\r\n                },\r\n                {\r\n                    \"name\": \"port-name-enabled-status\",\r\n                    \"value\": [\"P0000000047302920-B0-FC00,true,ok\"\r\n                              ]\r\n                },\r\n                {\r\n                    \"name\": \"ports\",\r\n                    \"value\": [\r\n                        \"P0000000047302920-B0-FC00\"\r\n                    ]\r\n                },\r\n                {\r\n                    \"name\": \"scsi-spc-version\",\r\n                    \"value\": \"2\"\r\n                },\r\n                {\r\n                    \"name\": \"virtual-volumes\",\r\n                    \"value\": [\r\n                        \"(0,device_wcj_hp_3_c1_vol,123,16G)\",\r\n                        \"(1,dg_ocr,456,100G)\"\r\n                    ]\r\n                },\r\n                {\r\n                    \"name\": \"write-same-16-enabled\",\r\n                    \"value\": \"true\"\r\n                },\r\n                {\r\n                    \"name\": \"xcopy-enabled\",\r\n                    \"value\": \"true\"\r\n                }\r\n            ],\r\n            \"children\": []\r\n        }\r\n    ]\r\n}\r\n\r\nGET_INITIATORS_PORT = {\r\n    \"context\": [\r\n        {\r\n            \"type\": \"fc-initiator-port\",\r\n            \"parent\": \"/clusters/cluster-1/exports/initiator-ports\",\r\n            \"attributes\": [\r\n                {\r\n                    \"name\": \"name\",\r\n                    \"value\": \"CHEN_LINUX\"\r\n                },\r\n                {\r\n                    \"name\": \"node-wwn\",\r\n                    \"value\": \"0x21000024ff7fb74d\"\r\n                },\r\n                {\r\n                    \"name\": \"port-wwn\",\r\n                    \"value\": \"0x21000024ff7fb74d\"\r\n                },\r\n                {\r\n                    \"name\": \"scsi-spc-version\",\r\n                    \"value\": \"2\"\r\n                },\r\n                {\r\n                    \"name\": \"suspend-on-detach\",\r\n                    \"value\": None\r\n                },\r\n                {\r\n                    \"name\": \"target-ports\",\r\n                    \"value\": [\r\n                        \"P0000000047302920-B0-FC03\",\r\n                        \"P0000000047302920-B0-FC01\"\r\n                    ]\r\n                },\r\n                {\r\n                    \"name\": \"type\",\r\n                    \"value\": \"default\"\r\n                }\r\n            ],\r\n            \"children\": []\r\n        }\r\n    ]\r\n}\r\n\r\nlist_port_groups_result = {\r\n    'port_groups': [\r\n        {\r\n            'name': 'port_group_CHEN_LINUX',\r\n            'description': 'port_group_CHEN_LINUX',\r\n            'storage_id': '12345',\r\n            'native_port_group_id': 'port_group_CHEN_LINUX',\r\n            'ports': [\r\n                'P0000000047302920-B0-FC00'\r\n            ]\r\n        }\r\n    ],\r\n    'port_grp_port_rels': [\r\n        {\r\n            \"storage_id\": \"12345\",\r\n            \"native_port_group_id\": \"port_group_CHEN_LINUX\",\r\n            \"native_port_id\": \"P0000000047302920-B0-FC00\"\r\n        }\r\n    ]\r\n}\r\n\r\nlist_storage_host_initiators_result = [\r\n    {\r\n        'name': 'CHEN_LINUX',\r\n        'type': 'fc',\r\n        'storage_id': '12345',\r\n        'native_storage_host_initiator_id': '0x21000024ff7fb74d',\r\n        'wwn': '0x21000024ff7fb74d',\r\n        'alias': '0x21000024ff7fb74d',\r\n        'status': 'online',\r\n        'native_storage_host_id': '0x21000024ff7fb74d'\r\n    }\r\n]\r\n\r\nlist_storage_hosts_result = [\r\n    {\r\n        'name': 'CHEN_LINUX',\r\n        'os_type': 'Unknown',\r\n        'storage_id': '12345',\r\n        'native_storage_host_id': '0x21000024ff7fb74d',\r\n        'status': 'normal'\r\n    }\r\n]\r\n\r\nlist_masking_views_result = [\r\n    {\r\n        'name': 'CHEN_LINUX',\r\n        'description': 'CHEN_LINUX',\r\n        'storage_id': '12345',\r\n        'native_masking_view_id': 'CHEN_LINUX123',\r\n        'native_port_group_id': 'port_group_CHEN_LINUX',\r\n        'native_volume_id': '123',\r\n        'native_storage_host_id': '0x21000024ff7fb74d'\r\n    },\r\n    {\r\n        'name': 'CHEN_LINUX',\r\n        'description': 'CHEN_LINUX',\r\n        'storage_id': '12345',\r\n        'native_masking_view_id': 'CHEN_LINUX',\r\n        'native_port_group_id': 'port_group_CHEN_LINUX',\r\n        'native_volume_id': '456',\r\n        'native_storage_host_id': '0x21000024ff7fb74d'\r\n    }\r\n]\r\n\r\n\r\nclass TestVplexStorDriver(TestCase):\r\n    RestHandler.login = mock.Mock(return_value=None)\r\n\r\n    def test_parse_alert(self):\r\n        trap = VplexStorageDriver(**ACCESS_INFO).parse_alert(context,\r\n                                                             TRAP_INFO)\r\n        trap_result['occur_time'] = trap['occur_time']\r\n        self.assertDictEqual(trap, trap_result)\r\n\r\n    @mock.patch.object(RestHandler, 'get_cluster_resp')\r\n    @mock.patch.object(RestHandler, 'get_virtual_volume_resp')\r\n    @mock.patch.object(RestHandler, 'get_virtual_volume_by_name_resp')\r\n    def test_list_volumes(self, mock_name, mock_volume, mock_cluster):\r\n        mock_cluster.return_value = GET_ALL_CLUSTER\r\n        mock_volume.return_value = GET_ALL_LUNS\r\n        mock_name.return_value = GET_LUN\r\n        volume = VplexStorageDriver(**ACCESS_INFO).list_volumes(context)\r\n        self.assertDictEqual(volume[0], volume_result[0])\r\n\r\n    @mock.patch.object(RestHandler, 'get_cluster_resp')\r\n    @mock.patch.object(RestHandler, 'get_devcie_resp')\r\n    @mock.patch.object(RestHandler, 'get_device_by_name_resp')\r\n    def test_list_storage_pools(self, mock_name, mock_device, mock_cluster):\r\n        mock_cluster.return_value = GET_ALL_CLUSTER\r\n        mock_device.return_value = GET_ALL_POOLS\r\n        mock_name.return_value = GET_POOL\r\n        pool = VplexStorageDriver(**ACCESS_INFO).list_storage_pools(context)\r\n        self.assertDictEqual(pool[0], pool_result[0])\r\n\r\n    def test_get_storage(self):\r\n        RestHandler.get_rest_info = mock.Mock(\r\n            side_effect=[GET_HEALH_CHECK, GET_ALL_CLUSTER, GET_CLUSTER,\r\n                         GET_ALL_STORAGE_VOLUME_SUMMARY, GET_ALL_POOLS_SUMMARY,\r\n                         GET_ALL_LUNS_SUMMARY])\r\n        storage = VplexStorageDriver(**ACCESS_INFO).get_storage(context)\r\n        self.assertDictEqual(storage, storage_result)\r\n\r\n    def test_list_alerts(self):\r\n        with self.assertRaises(Exception) as exc:\r\n            VplexStorageDriver(**ACCESS_INFO).list_alerts(context)\r\n        self.assertEqual('list_alerts is not supported in model VPLEX',\r\n                         str(exc.exception))\r\n\r\n    @mock.patch.object(RestHandler, 'get_version_verbose')\r\n    @mock.patch.object(RestHandler, 'get_engine_director_resp')\r\n    def test_list_controller(self, mock_controller, mocke_version):\r\n        mocke_version.return_value = GET_VERSION_VERBOSE\r\n        mock_controller.return_value = GET_ALL_ENGINE_DIRECTOR\r\n        controllers = VplexStorageDriver(**ACCESS_INFO). \\\r\n            list_controllers(context)\r\n        self.assertDictEqual(controllers[0], controllers_result[0])\r\n\r\n    @mock.patch.object(RestHandler, 'get_cluster_export_port_resp')\r\n    @mock.patch.object(RestHandler, 'get_engine_director_hardware_port_resp')\r\n    def test_list_port(self, mock_hardware_port, mock_export_port):\r\n        mock_hardware_port.return_value = GET_ALL_ENGINE_DIRECTOR_HARDWARE_PORT\r\n        mock_export_port.return_value = GET_ALL_CLUSTER_EXPORT_PORT\r\n        ports = VplexStorageDriver(**ACCESS_INFO).list_ports(context)\r\n        self.assertDictEqual(ports[0], ports_result[0])\r\n\r\n    @mock.patch.object(RestHandler, 'get_storage_views')\r\n    def test_list_port_groups(self, mock_storage_view):\r\n        mock_storage_view.return_value = GET_STORAGE_VIEW\r\n        list_port_groups = VplexStorageDriver(**ACCESS_INFO).\\\r\n            list_port_groups(context)\r\n        port_groups_result = {\r\n            'port_groups': list_port_groups.get('port_groups'),\r\n            'port_grp_port_rels': list_port_groups.get('port_grp_port_rels')\r\n        }\r\n        self.assertDictEqual(port_groups_result, list_port_groups_result)\r\n\r\n    @mock.patch.object(RestHandler, 'get_initiators_resp')\r\n    def test_list_storage_hosts(self, mock_storage_view):\r\n        mock_storage_view.return_value = GET_INITIATORS_PORT\r\n        list_storage_hosts = VplexStorageDriver(**ACCESS_INFO). \\\r\n            list_storage_hosts(context)\r\n        self.assertDictEqual(list_storage_hosts[0],\r\n                             list_storage_hosts_result[0])\r\n\r\n    @mock.patch.object(RestHandler, 'get_storage_views')\r\n    @mock.patch.object(VplexStorageDriver, 'list_storage_hosts')\r\n    def test_list_masking_views(self, mock_storage_view, mock_storage_hosts):\r\n        mock_storage_view.return_value = list_storage_hosts_result\r\n        mock_storage_hosts.return_value = GET_STORAGE_VIEW\r\n        list_masking_views = VplexStorageDriver(**ACCESS_INFO). \\\r\n            list_masking_views(context)\r\n        self.assertDictEqual(list_masking_views[0],\r\n                             list_masking_views_result[0])\r\n\r\n    @mock.patch.object(RestHandler, 'get_initiators_resp')\r\n    def test_list_storage_host_initiators(self, mock_initiators_port):\r\n        mock_initiators_port.return_value = GET_INITIATORS_PORT\r\n        list_storage_host_initiators = VplexStorageDriver(**ACCESS_INFO). \\\r\n            list_storage_host_initiators(context)\r\n        self.assertDictEqual(list_storage_host_initiators[0],\r\n                             list_storage_host_initiators_result[0])\r\n"
  },
  {
    "path": "delfin/tests/unit/drivers/fujitsu/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/fujitsu/eternus/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/fujitsu/eternus/test_eternus_stor.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom unittest import TestCase, mock\n\nsys.modules['delfin.cryptor'] = mock.Mock()\n\nfrom delfin.drivers.fujitsu.eternus.eternus_ssh_client import \\\n    EternusSSHPool\nimport paramiko\nfrom delfin import context\nfrom delfin.drivers.fujitsu.eternus.eternus_stor import \\\n    EternusDriver\n\n\nclass Request:\n    def __init__(self):\n        self.environ = {'delfin.context': context.RequestContext()}\n        pass\n\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"fujitsu\",\n    \"model\": \"eternus_af650s2\",\n    \"ssh\": {\n        \"host\": \"110.143.132.231\",\n        \"port\": 22,\n        \"username\": \"user\",\n        \"password\": \"cGFzc3dvcmQ=\"\n    }\n}\n\nSTORAGE_NAME_DATA = \"\"\"\nName              [dx100-test]\nInstallation Site [test location]\nContact           []\nDescription       [test dx100-test]\nCLI>\n\"\"\"\nSTORAGE_MODEL_DATA = \"\"\"\nEnclosure View\n Storage System Name               [dx100-test]\n Model Upgrade Status              [Not Upgraded]\n Model Name                        [ET103ACU]\n Serial Number                     [4601620378]\n Device Identification Number      [280A7D]\n Status                            [Normal]\n Cache Mode                        [Write Back Mode]\n Remote Support                    [Not yet Set]\n Operation Mode                    [Normal]\n CLI Connecting Controller Module  [CM#0]\n Firmware Version                  [V10L50-9003]\n\n Controller Enclosure (2.5\")       [Normal (Inside unused parts)]\nCLI>\n\"\"\"\nSTORAGE_STATUS_DATA = \"\"\"\nSummary Status  [Normal]\nCLI>\n\"\"\"\nNODE_DATAS = \"\"\"\nCM#0 Information\n CPU Status/Status Code    [Normal      / 0xE001]\n Memory Size               [4.0GB]\n Parts Number              [CA07662-D111]\n Serial Number             [WK16201983]\n Hard Revision             [AA]\n CPU Clock                 [1.40GHz]\n CM Active EC              [EC#1]\n CM Next EC                [EC#1]\n BIOS Active EC            [EC#1]\n BIOS Next EC              [EC#1]\n CM EXP Active EC          [EC#1]\n CM EXP Next EC            [EC#1]\nCM#0 Internal Parts Status/Status Code\n Memory#0                  [Normal      / 0xE001]\n Memory#0 Parts Number     [18KDF51272PZ-1G6K1]\n Memory#0 Serial Number    [1612121A68F5]\n Memory#0 Hard Revision    [0F4B31]\n Memory#1                  [Undefined   / 0x0000]\n Memory#1 Parts Number     []\n Memory#1 Serial Number    []\n Memory#1 Hard Revision    []\n BUD                       [Normal      / 0xE001]\n BUD Parts Number          [TOSHIBA THNSNJ12]\n BUD Serial Number         [56DS10ABTNWV    ]\n BUD Hard Revision         [JYFA0101]\n Port#0                    [Unconnected / 0xC000] (Error Code : 0x0000)\n Port#1                    [Unconnected / 0xC000] (Error Code : 0x0000)\n Port#2                    [Undefined   / 0x0000]\n Port#3                    [Undefined   / 0x0000]\n DMA Port#0                [Normal      / 0xE001]\n DMA Port#1                [Undefined   / 0x0000]\n BIOS#0                    [Normal      / 0xE001]\n BIOS#1                    [Normal      / 0xE001]\n CM EXP                    [Normal      / 0xE001]\n CM EXP InPort#0           [Normal      / 0xE001]\n CM EXP InPort#1           [Normal      / 0xE001]\n SAS Cable#0(OUT)          [Undefined   / 0x0000]\n SAS Cable#1(OUT)          [Undefined   / 0x0000]\n CM RTC                    [Normal      / 0xE001]\n CM NVRAM                  [Normal      / 0xE001]\n CM FPGA                   [Normal      / 0xE001]\n CM LAN Port#0             [Normal      / 0xE001]\n CM LAN Port#1             [Normal      / 0xE001]\n CM LAN Port#2             [Undefined   / 0x0000]\n DI#0 Port#0               [Normal      / 0xE001]\n DI#0 Port#1               [Normal      / 0xE001]\n DI#1 Port#0               [Undefined   / 0x0000]\n DI#1 Port#1               [Undefined   / 0x0000]\n SATA SSD Controller Information\n  Status/Status Code       [Normal      / 0xE001]\n  Active EC                [EC#1]\n  Next EC                  [EC#1]\n  Firmware Version         [V03L04-0000]\n SCU                       [Normal      / 0xE001]\n SCU Voltage               [11.16V]\nCM#0 CA#0 Port#0 Information\n Port Type           [FC]\n Port Mode           [CA]\n Status/Status Code  [Unconnected / 0xC000] (Error Code : 0x0000)\n CA Active EC        [EC#0]\n CA Next EC          [EC#0]\n Connection          [Loop]\n Loop ID             [0x00]\n Transfer Rate       [Auto Negotiation]\n Link Status         [Unknown]\n Port WWN            [500000E0DA0A7D20]\n Node WWN            [500000E0DA0A7D40]\n Host Affinity       [Disable]\n Host Response       [0]\n SFP Type            [Unmount]\n SFP Information\n                  Present    Warning(Low/High)      Alarm(Low/High)\n  Temperature         [-]                [-/-]                [-/-]\n  Voltage             [-]                [-/-]                [-/-]\n  Current             [-]                [-/-]                [-/-]\n  TX Power            [-]                [-/-]                [-/-]\n  RX Power            [-]                [-/-]                [-/-]\nCM#0 CA#0 Port#1 Information\n Port Type           [FC]\n Port Mode           [CA]\n Status/Status Code  [Unconnected / 0xC000] (Error Code : 0x0000)\n CA Active EC        [EC#0]\n CA Next EC          [EC#0]\n Connection          [Loop]\n Loop ID             [0x00]\n Transfer Rate       [Auto Negotiation]\n Link Status         [Unknown]\n Port WWN            [500000E0DA0A7D21]\n Node WWN            [500000E0DA0A7D40]\n Host Affinity       [Disable]\n Host Response       [0]\n SFP Type            [Unmount]\n SFP Information\n                  Present    Warning(Low/High)      Alarm(Low/High)\n  Temperature         [-]                [-/-]                [-/-]\n  Voltage             [-]                [-/-]                [-/-]\n  Current             [-]                [-/-]                [-/-]\n  TX Power            [-]                [-/-]                [-/-]\n  RX Power            [-]                [-/-]                [-/-]\nCM#1 Information\n CPU Status/Status Code    [Normal      / 0xE001]\n Memory Size               [4.0GB]\n Parts Number              [CA07662-D111]\n Serial Number             [WK16201958]\n Hard Revision             [AA]\n CPU Clock                 [1.40GHz]\n CM Active EC              [EC#1]\n CM Next EC                [EC#1]\n BIOS Active EC            [EC#1]\n BIOS Next EC              [EC#1]\n CM EXP Active EC          [EC#1]\n CM EXP Next EC            [EC#1]\nCM#1 Internal Parts Status/Status Code\n Memory#0                  [Normal      / 0xE001]\n Memory#0 Parts Number     [18KDF51272PZ-1G6K1]\n Memory#0 Serial Number    [1612121A6900]\n Memory#0 Hard Revision    [0F4B31]\n Memory#1                  [Undefined   / 0x0000]\n Memory#1 Parts Number     []\n Memory#1 Serial Number    []\n Memory#1 Hard Revision    []\n BUD                       [Normal      / 0xE001]\n BUD Parts Number          [TOSHIBA THNSNJ12]\n BUD Serial Number         [56DS1086TNWV    ]\n BUD Hard Revision         [JYFA0101]\n Port#0                    [Unconnected / 0xC000] (Error Code : 0x0000)\n Port#1                    [Unconnected / 0xC000] (Error Code : 0x0000)\n Port#2                    [Undefined   / 0x0000]\n Port#3                    [Undefined   / 0x0000]\n DMA Port#0                [Normal      / 0xE001]\n DMA Port#1                [Undefined   / 0x0000]\n BIOS#0                    [Normal      / 0xE001]\n BIOS#1                    [Normal      / 0xE001]\n CM EXP                    [Normal      / 0xE001]\n CM EXP InPort#0           [Normal      / 0xE001]\n CM EXP InPort#1           [Normal      / 0xE001]\n SAS Cable#0(OUT)          [Undefined   / 0x0000]\n SAS Cable#1(OUT)          [Undefined   / 0x0000]\n CM RTC                    [Normal      / 0xE001]\n CM NVRAM                  [Normal      / 0xE001]\n CM FPGA                   [Normal      / 0xE001]\n CM LAN Port#0             [Normal      / 0xE001]\n CM LAN Port#1             [Normal      / 0xE001]\n CM LAN Port#2             [Undefined   / 0x0000]\n DI#0 Port#0               [Normal      / 0xE001]\n DI#0 Port#1               [Normal      / 0xE001]\n DI#1 Port#0               [Undefined   / 0x0000]\n DI#1 Port#1               [Undefined   / 0x0000]\n SATA SSD Controller Information\n  Status/Status Code       [Normal      / 0xE001]\n  Active EC                [EC#1]\n  Next EC                  [EC#1]\n  Firmware Version         [V03L04-0000]\n SCU                       [Normal      / 0xE001]\n SCU Voltage               [11.16V]\nCM#1 CA#0 Port#0 Information\n Port Type           [FC]\n Port Mode           [CA]\n Status/Status Code  [Unconnected / 0xC000] (Error Code : 0x0000)\n CA Active EC        [EC#0]\n CA Next EC          [EC#0]\n Connection          [Loop]\n Loop ID             [0x00]\n Transfer Rate       [Auto Negotiation]\n Link Status         [Unknown]\n Port WWN            [500000E0DA0A7D30]\n Node WWN            [500000E0DA0A7D40]\n Host Affinity       [Disable]\n Host Response       [0]\n SFP Type            [Unmount]\n SFP Information\n                  Present    Warning(Low/High)      Alarm(Low/High)\n  Temperature         [-]                [-/-]                [-/-]\n  Voltage             [-]                [-/-]                [-/-]\n  Current             [-]                [-/-]                [-/-]\n  TX Power            [-]                [-/-]                [-/-]\n  RX Power            [-]                [-/-]                [-/-]\nCM#1 CA#0 Port#1 Information\n Port Type           [FC]\n Port Mode           [CA]\n Status/Status Code  [Unconnected / 0xC000] (Error Code : 0x0000)\n CA Active EC        [EC#0]\n CA Next EC          [EC#0]\n Connection          [Loop]\n Loop ID             [0x00]\n Transfer Rate       [Auto Negotiation]\n Link Status         [Unknown]\n Port WWN            [500000E0DA0A7D31]\n Node WWN            [500000E0DA0A7D40]\n Host Affinity       [Disable]\n Host Response       [0]\n SFP Type            [Unmount]\n SFP Information\n                  Present    Warning(Low/High)      Alarm(Low/High)\n  Temperature         [-]                [-/-]                [-/-]\n  Voltage             [-]                [-/-]                [-/-]\n  Current             [-]                [-/-]                [-/-]\n  TX Power            [-]                [-/-]                [-/-]\n  RX Power            [-]                [-/-]                [-/-]\nCE PSU#0 Information\n Status/Status Code  [Normal      / 0xE001]\nCE PSU#1 Information\n Status/Status Code  [Normal      / 0xE001]\nlogin as: f.ce\nPre-authentication banner message from server:\n| FUJITSU Storage ETERNUS login is required. [2021-11-30 06:50:01]\nEnd of banner message from server\nf.ce@192.168.1.1's password:\nAccess denied\nf.ce@192.168.1.1's password:\n\nCurrently Network Configuration is set to factory default.\nCLI>\n\"\"\"\nNODE_DATAS_OLD = \"\"\"your ip address\nyou username is huawei\nCLI> show fru-ce\nCM#0 Information\n Status/Status Code  [Normal      / 0xE001]\n Memory Size         [1.0GB]\n Type                [FC Model]\n Parts Number        [CA07415-C621]\n Serial Number       [WK13510516]\n Hardware Revision   [AA   ]\n CPU Clock           [1.20GHz]\n Active EC           [EC#1]\n Next EC             [EC#1]\nCM#0 Internal Parts Status/Status Code\n Memory              [Normal      / 0xE001]\n BE Expander         [Normal      / 0xE001]\n BE EXP Port#0       [Normal      / 0xE001]\n BE EXP Port#1       [Undefined   / 0x0000]\n BE EXP Port#2       [Normal      / 0xE001]\n DI Port#0           [Normal      / 0xE001]\n DI Port#1           [Normal      / 0xE001]\n FC Port#0           [Normal      / 0xE001]\n FC Port#1           [Normal      / 0xE001]\n SAS Cable#1(OUT)    [-           / -     ]\n NAND Controller     [Normal      / 0xE001]\n Flash ROM           [Normal      / 0xE001]\nCM#0 SCU Information\n Status/Status Code  [Normal      / 0xE001]\n Voltage             [9.50V]\n Expires             [0-00]\nCM#0 Port#0 Information\n Port Mode           [CA]\n Status/Status Code  [Normal      / 0xE001]\n Connection          [Fabric]\n Loop ID             [-]\n Transfer Rate       [Auto Negotiation]\n Link Status         [4Gbit/s Link Up]\n WWN                 [500000E0D0376706]\n Host Affinity       [Enable]\n Host Response       [-]\nCM#0 Port#1 Information\n Port Mode           [CA]\n Status/Status Code  [Normal      / 0xE001]\n Connection          [Loop]\n Loop ID             [-]\n Transfer Rate       [Auto Negotiation]\n Link Status         [Link Down]\n WWN                 [500000E0D0376707]\n Host Affinity       [Enable]\n Host Response       [-]\nCM#1 Information\n Status/Status Code  [Normal      / 0xE001]\n Memory Size         [1.0GB]\n Type                [FC Model]\n Parts Number        [CA07415-C621]\n Serial Number       [WK13510538]\n Hardware Revision   [AA   ]\n CPU Clock           [1.20GHz]\n Active EC           [EC#1]\n Next EC             [EC#1]\nCM#1 Internal Parts Status/Status Code\n Memory              [Normal      / 0xE001]\n BE Expander         [Normal      / 0xE001]\n BE EXP Port#0       [Normal      / 0xE001]\n BE EXP Port#1       [Undefined   / 0x0000]\n BE EXP Port#2       [Normal      / 0xE001]\n DI Port#0           [Normal      / 0xE001]\n DI Port#1           [Normal      / 0xE001]\n FC Port#0           [Normal      / 0xE001]\n FC Port#1           [Normal      / 0xE001]\n SAS Cable#1(OUT)    [-           / -     ]\n NAND Controller     [Normal      / 0xE001]\n Flash ROM           [Normal      / 0xE001]\nCM#1 SCU Information\n Status/Status Code  [Normal      / 0xE001]\n Voltage             [9.50V]\n Expires             [0-00]\nCM#1 Port#0 Information\n Port Mode           [CA]\n Status/Status Code  [Normal      / 0xE001]\n Connection          [Loop]\n Loop ID             [-]\n Transfer Rate       [Auto Negotiation]\n Link Status         [Link Down]\n WWN                 [500000E0D0376786]\n Host Affinity       [Enable]\n Host Response       [-]\nCM#1 Port#1 Information\n Port Mode           [CA]\n Status/Status Code  [Normal      / 0xE001]\n Connection          [Fabric]\n Loop ID             [-]\n Transfer Rate       [Auto Negotiation]\n Link Status         [4Gbit/s Link Up]\n WWN                 [500000E0D0376787]\n Host Affinity       [Enable]\n Host Response       [-]\nCE PSU#0 Information\n Status/Status Code  [Normal      / 0xE001]\nCE PSU#1 Information\n Status/Status Code  [Normal      / 0xE001]\nCLI>\"\"\"\nNODE_STATUS_DATAS = \"\"\"\nController Enclosure Information\n Location      Status       Error Code  Sensor 1 / Sensor 2\n Intake Temp   Normal       0x0000      24 (C)   / 23 (C)\n Exhaust Temp  Normal       0x0000      40 (C)   / 42 (C)\n\nController Enclosure Status\n Controller Module Status/Status Code\n  CM#0         [Normal      / 0xE001]\n  CM#1         [Normal      / 0xE001]\n Power Supply Unit Status/Status Code\n  PSU#0        [Normal      / 0xE001]\n  PSU#1        [Normal      / 0xE001]\n Disk Status\n  CE-Disk#0    [Available                    ]  CE-Disk#1    [Available ]\n  CE-Disk#2    [Available                    ]  CE-Disk#3    [Available ]\n  CE-Disk#4    [Present                      ]  CE-Disk#5    [Available ]\n  CE-Disk#6    [Available                    ]  CE-Disk#7    [Available ]\n  CE-Disk#8    [Available                    ]  CE-Disk#9    [Available ]\nCLI>\n\"\"\"\nPOOL_DATAS = \"\"\"\n[RAID Group No.],[RAID Group Name,R,M,Status,TotalCapacity(MB),FreeCapacity(MB)\n0,pool-1,RAID1+0,CM#0,Available,1118208,1115926\n1,pool-2,RAID5,CM#1,Available,1118208,1118208\nCLI>\n\"\"\"\nPOOL_OLD_DATAS = \"\"\"your ip address\nyou username is huawei\nCLI> show raid-groups\nRAID Group           RAID    Assigned Status\\\n                    Total        Free\nNo. Name             Level   CM                                 Capacity(MB)\\\n Capacity(MB)\n  0 JJ               RAID0   CM#0     Broken                         1676288\\\n        1358848\nCLI> \"\"\"\nPOOL_ERROR_DATAS = \"\"\"                   ^\nError: Ambiguous command\nCLI>\"\"\"\nVOLUME_TPV_DATAS \\\n    = \"\"\"Volume Status RG or TPP or FTRP TFOG Size(MB) Copy Allocation Used Me\nNo. Name No.  Name No. Name Protection Status (%) Level     Capacity(MB)\n----- ------ ---- ---- ------- --- ------ --- ---- --- --- --- -- --- -----\n1 volume-wsv Available 0 thin-1 - - 200 Disable  Thick  Normal - 80 High 200\n4 voo-1 Available 0 thin-1 - - 500 Disable  Thin  Normal >500 80 High 0\nCLI>\n\"\"\"\nVOLUME_FTV_DATAS \\\n    = \"\"\"Error: E0331 Flexible tier mode is not valid.\n             [0305-0505] -type ftv\nCLI>\n\"\"\"\nVOLUME_DATAS = \"\"\"your ip address\nyou username is huawei\nCLI> show raid-groups\nVolume                Status                    Type\\\n      Expansion       RAID Group           Size(MB)   Reserved\nNo.  Name                                                 (Concatenation) No.\\\n Name                        Deletion\n   0 OVM_Repo0        Broken                    Open                    -   0\\\n    JJ                   51200\n   1 OVM_Repo1        Broken                    Open                    -   0\\\n    JJ                   51200\n   2 OVM_raw          Broken                    Open                    -   0\\\n    JJ                   10240\n   3 OVM_Repo2        Broken                    Open                    -   0\\\n    JJ                  204800\nCLI>\"\"\"\n\nVOLUMES_ERROR = \"\"\"                  ^\nCLI>\"\"\"\n\nVOLUMES = \"\"\"CLI> show volumes -mode uid\nVolume                                 Status                    Type\\\n              RG or TPP or FTRP     TFOG                 Size(MB)  UID\nNo.   Name                                                             \\\n            No.  Name             No. Name                       ID\\\n                                           Mode\n----- -------------------------------- ------------------------- ----------\\\n------- ---- ---------------- --- ---------------- --------- ---------------\\\n----------------- -------\n    0 LUN00                            Available                 TPV          \\\n            0 Pool0              - -                    20480\\\n             600000E00D29000000291B6B00000000 Default\n    1 LUN01                            Available                 TPV          \\\n            0 Pool0              - -                    20480\\\n            600000E00D29000000291B6B00010000 Default\n    2 LUN02                            Available                 TPV\\\n                      0 Pool0              - -                    20480\\\n                       600000E00D29000000291B6B00020000 Default\n    3 LUN03                            Available                 TPV\\\n                      0 Pool0              - -                    20480\\\n                       600000E00D29000000291B6B00030000 Default\n    4 LUN04                            Available                 TPV\\\n                      0 Pool0              - -                    20480\\\n                       600000E00D29000000291B6B00040000 Default\nCLI>\"\"\"\n\nSTORAGE_RESULT = {\n    'name': 'dx100-test',\n    'vendor': 'FUJITSU',\n    'description': 'test dx100-test',\n    'model': 'ET103ACU',\n    'status': 'normal',\n    'serial_number': '4601620378',\n    'firmware_version': 'V10L50-9003',\n    'location': 'test location',\n    'raw_capacity': 6657199308800.0,\n    'total_capacity': 2345052143616,\n    'used_capacity': 2392850432,\n    'free_capacity': 2342659293184\n}\nCONTROLLER_RESULT = [\n    {\n        'name': 'CM#0',\n        'storage_id': '12345',\n        'native_controller_id': 'WK16201983',\n        'status': 'normal',\n        'location': 'CM#0',\n        'soft_version': 'AA',\n        'cpu_info': '1.40GHz',\n        'cpu_count': 1,\n        'memory_size': '4294967296'\n    }]\nPOOL_RESULT = [\n    {\n        'name': 'pool-1',\n        'storage_id': '12345',\n        'native_storage_pool_id': '0',\n        'status': 'normal',\n        'storage_type': 'block',\n        'total_capacity': 1172526071808,\n        'used_capacity': 2392850432,\n        'free_capacity': 1170133221376\n    }]\nPOOL_old_RESULT = [\n    {\n        'name': 'JJ',\n        'storage_id': '12345',\n        'native_storage_pool_id': '0',\n        'status': 'abnormal',\n        'storage_type': 'block',\n        'total_capacity': 1757715365888,\n        'used_capacity': 332859965440,\n        'free_capacity': 1424855400448\n    }]\nVOLUME_RESULT = [\n    {\n        'name': 'LUN00',\n        'storage_id': '12345',\n        'status': 'normal',\n        'native_volume_id': '0',\n        'native_storage_pool_id': '0',\n        'type': 'thick',\n        'wwn': '600000E00D29000000291B6B00000000',\n        'total_capacity': 21474836480,\n        'used_capacity': 0,\n        'free_capacity': 21474836480\n    }]\nVOLUME_OLD_RESULT = [\n    {\n        'name': 'OVM_Repo0',\n        'storage_id': '12345',\n        'status': 'abnormal',\n        'native_volume_id': '0',\n        'native_storage_pool_id': '0',\n        'type': 'thick',\n        'total_capacity': 53687091200,\n        'used_capacity': 0,\n        'free_capacity': 0\n    }]\nLIST_ALERT_ERROR = \"\"\"your ip address\nyou username is huawei\nCLI> show raid-groups\n2021-08-19 02:33:08   Error         P 85400008   SS\\\nD 2.5 DE#00-Slot#8(SAS 400GB) Fault (DE) <HUSMM1640ASS204 0QWA8YAA H603 15299\\\nA1>\n2021-08-19 02:33:08   Error         P 85400007   SSD 2.5 DE#00-Slot#7(\\\nSAS 400GB) Fault (DE) <HUSMM1640ASS204 0QWAHN1A H603 15299 A1>\n2021-08-19 02:33:08   Error         P 85400006   SSD 2.5 DE#00-Slot#6(\\\nSAS 400GB) Fault (DE) <HUSMM1640ASS204 0QWA9GSA H603 15299 A1>\n2021-08-19 02:33:08   Error         P 85400005   SSD 2.5 DE#00-Slot#5(\\\nSAS 400GB) Fault (DE) <HUSMM1640ASS204 0QWA91YA H603 15299 A1>\n2021-08-19 02:33:08   Error         P 85400004   SSD 2.5 DE#00-Slot#4(\\\nSAS 400GB) Fault (DE) <HUSMM1640ASS204 0QWA9HMA H603 15299 A1>\n2021-08-19 02:33:08   Error         P 85400003   SSD 2.5 DE#00-Slot#3(\\\nSAS 400GB) Fault (DE) <HUSMM1640ASS204 0QWA908A H603 15299 A1>\n2021-08-19 02:33:08   Error         P 85400002   SSD 2.5 DE#00-Slot#2(\\\nSAS 400GB) Fault (DE) <HUSMM1640ASS204 0QWAHMAA H603 15299 A1>\n2021-08-19 02:33:08   Error         P 85400001   SSD 2.5 DE#00-Slot#1(\\\nSAS 400GB) Fault (DE) <HUSMM1640ASS204 0QWA9KJA H603 15299 A1>\n2021-08-19 02:33:08   Error         P 85400000   SSD 2.5 DE#00-Slot#0(\\\nSAS 400GB) Fault (DE) <HUSMM1640ASS204 0QWA9GMA H603 15299 A1>\nCLI>\"\"\"\n\nLIST_ALERT_WARNING = \"\"\"your ip address\nyou username is huawei\nCLI> show raid-groups\n2021-08-19 02:33:08   Warning       P 85400008   SSD\\\nFault (DE) <HUSMM1640ASS204 0QWA8YAA H603 15299 A1>\n2021-08-19 02:33:08   Warning       P 85400007   SSD 2.5  Fault (DE) <\\\nHUSMM1640ASS204 0QWAHN1A H603 15299 A1>\n2021-08-19 02:33:08   Warning       P 85400006   SSD 2.5 Fault (DE) <\\\nHUSMM1640ASS204 0QWA9GSA H603 15299 A1>\n2021-08-19 02:33:08   Warning       P 85400005   SSD 2.5  Fault (DE) <\\\nHUSMM1640ASS204 0QWA91YA H603 15299 A1>\n2021-08-19 02:33:08   Warning       P 85400004   SSD 2.5 Fault (DE) <\\\nHUSMM1640ASS204 0QWA9HMA H603 15299 A1>\n2021-08-19 02:33:08   Warning       P 85400003   SSD 2.5  Fault (DE) <\\\nHUSMM1640ASS204 0QWA908A H603 15299 A1>\n2021-08-19 02:33:08   Warning       P 85400002   SSD 2.5 DE#00-Fault (DE) <\\\nHUSMM1640ASS204 0QWAHMAA H603 15299 A1>\n2021-08-19 02:33:08   Warning       P 85400001   SSD 2.5 DE#00-S Fault (DE) <\\\nHUSMM1640ASS204 0QWA9KJA H603 15299 A1>\n2021-08-19 02:33:08   Warning       P 85400000   SSD 2.5 DE#00- Fault (DE) <\\\nHUSMM1640ASS204 0QWA9GMA H603 15299 A1>\nCLI>\"\"\"\n\nALERTS_INFO = {\n    'alert_id': '85400008',\n    'severity': 'Warning',\n    'category': 'Fault',\n    'description': 'SSDFault (DE) <HUSMM1640ASS204 0QWA8YAA H603 15299 A1>',\n    'type': 'EquipmentAlarm',\n    'resource_type': 'Storage',\n    'alert_name': 'SSDFault (DE) <HUSMM1640ASS204 0QWA8YAA H603 15299 A1>',\n    'occur_time': 1629311588000,\n    'match_key': '1809bdfa672e8b10ec9ec499a54dcd83'\n}\n\nDISK_LIST_INFO = \"\"\"Controller Enclosure Disk #0 Information\n Location                   [CE-Disk#0]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [400GB]\n Type                       [2.5 SSD-M]\n Speed                      [-]\n Usage                      [Data]\n Health                     [100%]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [HGST]\n Product ID                 [HUSMM1640ASS204]\n Serial Number              [0QWA91YA]\n WWN                        [5000CCA04E4B14F3]\n Firmware Revision          [H603]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [4%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #1 Information\n Location                   [CE-Disk#1]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [400GB]\n Type                       [2.5 SSD-M]\n Speed                      [-]\n Usage                      [Data]\n Health                     [100%]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [HGST]\n Product ID                 [HUSMM1640ASS204]\n Serial Number              [0QWAHN1A]\n WWN                        [5000CCA04E4B77CF]\n Firmware Revision          [H603]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [4%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #2 Information\n Location                   [CE-Disk#2]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [400GB]\n Type                       [2.5 SSD-M]\n Speed                      [-]\n Usage                      [Data]\n Health                     [100%]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [HGST]\n Product ID                 [HUSMM1640ASS204]\n Serial Number              [0QWA9GMA]\n WWN                        [5000CCA04E4B1B17]\n Firmware Revision          [H603]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [4%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #3 Information\n Location                   [CE-Disk#3]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [400GB]\n Type                       [2.5 SSD-M]\n Speed                      [-]\n Usage                      [Data]\n Health                     [100%]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [HGST]\n Product ID                 [HUSMM1640ASS204]\n Serial Number              [0QWA9KJA]\n WWN                        [5000CCA04E4B1C7F]\n Firmware Revision          [H603]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [4%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #4 Information\n Location                   [CE-Disk#4]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [400GB]\n Type                       [2.5 SSD-M]\n Speed                      [-]\n Usage                      [Data]\n Health                     [100%]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [HGST]\n Product ID                 [HUSMM1640ASS204]\n Serial Number              [0QWAHMAA]\n WWN                        [5000CCA04E4B7777]\n Firmware Revision          [H603]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [4%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #5 Information\n Location                   [CE-Disk#5]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [600GB]\n Type                       [2.5 Online]\n Speed                      [15000rpm]\n Usage                      [Data]\n Health                     [-]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST600MP0005]\n Serial Number              [S7M1LC92]\n WWN                        [5000C50098FA0A04]\n Firmware Revision          [VE0C]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [3%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #6 Information\n Location                   [CE-Disk#6]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [600GB]\n Type                       [2.5 Online]\n Speed                      [15000rpm]\n Usage                      [Data]\n Health                     [-]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST600MP0005]\n Serial Number              [W7M0M8PR]\n WWN                        [5000C500A0FA7844]\n Firmware Revision          [VE0C]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [3%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #7 Information\n Location                   [CE-Disk#7]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [600GB]\n Type                       [2.5 Online]\n Speed                      [15000rpm]\n Usage                      [Data]\n Health                     [-]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST600MP0005]\n Serial Number              [S7M1LC99]\n WWN                        [5000C50098FA09DC]\n Firmware Revision          [VE0C]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [3%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #8 Information\n Location                   [CE-Disk#8]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [600GB]\n Type                       [2.5 Online]\n Speed                      [15000rpm]\n Usage                      [Data]\n Health                     [-]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST600MP0005]\n Serial Number              [S7M1L3XD]\n WWN                        [5000C50098EE374C]\n Firmware Revision          [VE0C]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [3%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #9 Information\n Location                   [CE-Disk#9]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [600GB]\n Type                       [2.5 Online]\n Speed                      [15000rpm]\n Usage                      [Data]\n Health                     [-]\n RAID Group                 [ 0 : pool-1]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST600MP0005]\n Serial Number              [S7M1KXS5]\n WWN                        [5000C50098F06184]\n Firmware Revision          [VE0C]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [3%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #10 Information\n Location                   [CE-Disk#10]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [600GB]\n Type                       [2.5 Online]\n Speed                      [15000rpm]\n Usage                      [Data]\n Health                     [-]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST600MP0005]\n Serial Number              [S7M1KCPD]\n WWN                        [5000C50098DB1E50]\n Firmware Revision          [VE0C]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [3%]\n   Completed passes since last Power On [0Cycles]\n\nController Enclosure Disk #11 Information\n Location                   [CE-Disk#11]\n Status                     [Present] (Error Code : 0x0000)\n Size                       [600GB]\n Type                       [2.5 Online]\n Speed                      [15000rpm]\n Usage                      [Data]\n Health                     [-]\n RAID Group                 [-]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST600MP0005]\n Serial Number              [W7M0MYYA]\n WWN                        [5000C500A0F7C4D0]\n Firmware Revision          [VE0C]\n <Disk Patrol Information>\n   Total completed passes               [0Cycles]\n   Progress with current pass           [3%]\n   Completed passes since last Power On [0Cycles]\nCLI>\"\"\"\nDISK_OLD = \"\"\"your ip address\nyou username is huawei\nCLI> show disks -disks all\nController Enclosure Disk #0 Information\n Location                   [CE-Disk#0]\n Status                     [Failed Usable] (Error Code : 0x0001)\n Size                       [450GB]\n Type                       [3.5\" SAS]\n Speed                      [15000rpm]\n Usage                      [System]\n RAID Group                 [ 0 : JJ]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST3450857SS]\n Serial Number              [6SK2CEG91327]\n WWN                        [5000C5006BC80184]\n Firmware Revision          [GF0D]\n\\r\nController Enclosure Disk #1 Information\n Location                   [CE-Disk#1]\n Status                     [Failed Usable] (Error Code : 0x0009)\n Size                       [450GB]\n Type                       [3.5\" SAS]\n Speed                      [15000rpm]\n Usage                      [System]\n RAID Group                 [ 0 : JJ]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST3450857SS]\n Serial Number              [6SK262SZ1312]\n WWN                        [5000C5006806E318]\n Firmware Revision          [GF0D]\n\\r\nController Enclosure Disk #2 Information\n Location                   [CE-Disk#2]\n Status                     [Available]\n Size                       [450GB]\n Type                       [3.5\" SAS]\n Speed                      [15000rpm]\n Usage                      [Data]\n RAID Group                 [ 0 : JJ]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST3450857SS]\n Serial Number              [6SK26QCA1312]\n WWN                        [5000C5006810B6AC]\n Firmware Revision          [GF0D]\n\\r\nController Enclosure Disk #3 Information\n Location                   [CE-Disk#3]\n Status                     [Available]\n Size                       [450GB]\n Type                       [3.5\" SAS]\n Speed                      [15000rpm]\n Usage                      [Data]\n RAID Group                 [ 0 : JJ]\n Motor Status               [Active]\n Rebuild/Copyback Progress  [-]\n Vendor ID                  [SEAGATE]\n Product ID                 [ST3450857SS]\n Serial Number              [6SK2DE941330]\n WWN                        [5000C5006C3E26FC]\n Firmware Revision          [GF0D]\n CLI>\"\"\"\n\nPORT_LIST_INFO = \"\"\"\nPort                          CM#0 CA#0 Port#0       CM#0 CA#0 Port#1\nPort Mode                     CA                     CA\nConnection                    FC-AL                  FC-AL\nLoop ID Assign                Manual(0x00)           Manual(0x00)\nTransfer Rate                 8 Gbit/s               Auto Negotiation\nFrame Size                    2048 bytes             2048 bytes\nHost Affinity                 Disable                Disable\nHost Response No.             0                      0\nHost Response Name            Default                Default\nReset Scope                   I_T_L                  I_T_L\nReserve Cancel at Chip Reset  Disable                Disable\nREC Line No.                  -                      -\nREC Transfer Mode Sync        -                      -\nREC Transfer Mode Stack       -                      -\nREC Transfer Mode Consistency -                      -\nREC Transfer Mode Through     -                      -\nTFO Transfer Mode             -                      -\nWWN Mode                      Custom                 Custom\nWWPN                          500000E0DA0A7D20       500000E0DA0A7D21\n\nPort                          CM#1 CA#0 Port#0       CM#1 CA#0 Port#1\nPort Mode                     CA                     CA\nConnection                    FC-AL                  FC-AL\nLoop ID Assign                Manual(0x00)           Manual(0x00)\nTransfer Rate                 Auto Negotiation       Auto Negotiation\nFrame Size                    2048 bytes             2048 bytes\nHost Affinity                 Disable                Disable\nHost Response No.             0                      0\nHost Response Name            Default                Default\nReset Scope                   I_T_L                  I_T_L\nReserve Cancel at Chip Reset  Disable                Disable\nREC Line No.                  -                      -\nREC Transfer Mode Sync        -                      -\nREC Transfer Mode Stack       -                      -\nREC Transfer Mode Consistency -                      -\nREC Transfer Mode Through     -                      -\nTFO Transfer Mode             -                      -\nWWN Mode                      Custom                 Custom\nWWPN                          500000E0DA0A7D30       500000E0DA0A7D31\nCLI>\"\"\"\n\nFCOE_INFO = \"\"\"Port                          CM#0 CA#0 Port#0           CM#0\\\n CA#0 Port#1\nPort Mode                     CA                         RA\nTransfer Rate                 10Gbit/s                   10Gbit/s\nFrame Size                    2048bytes                  2048bytes\nHost Affinity                 Enable                     Enable\nHost Response No.             1                          2\nHost Response Name            HP01                       HP02\nReset Scope                   I_T_L                      I_T_L\nReserve Cancel at Chip Reset  Disable                    -\nFCF VLAN ID                   Disable                    Disable\nFCF Fabric Name               Disable                    Disable\nMAC Address                   01:02:03:04:05:06          01:02:03:04:05:07\n\nPort                          CM#0 CA#1 Port#0           CM#0 CA#1 Port#1\nPort Mode                     CA                         RA\nTransfer Rate                 10Gbit/s                   10Gbit/s\nFrame Size                    2048bytes                  2048bytes\nHost Affinity                 Enable                     Enable\nHost Response No.             1                          2\nHost Response Name            HP01                       HP02\nReset Scope                   I_T_L                      I_T_L\nReserve Cancel at Chip Reset  Disable                    -\nFCF VLAN ID                   Disable                    Disable\nFCF Fabric Name               Disable                    Disable\nMAC Address                   01:02:03:06:05:06          01:02:03:06:05:07\nCLI>\"\"\"\nFC_INFO_OLD = \"\"\"your ip address\nyou username is huawei\nCLI> show fc-parameters\nPort                          CM#0 Port#0      CM#0 Port#1\\\n      CM#1 Port#0      CM#1 Port#1\nPort Mode                     CA               CA               CA\\\n               CA\nConnection                    Fabric           FC-AL            FC-AL\\\n            Fabric\nLoop ID Assign                -                Auto(Ascending)\\\n  Auto(Ascending)  -\nTransfer Rate                 Auto Negotiation Auto Negotiation\\\n Auto Negotiation Auto Negotiation\nFrame Size                    2048 bytes       2048 bytes\\\n       2048 bytes       2048 bytes\nHost Affinity                 Enable           Enable           Enable\\\n           Enable\nHost Response No.             -                -                -\\\n                -\nHost Response Name            -                -                -\\\n                -\nReset Scope                   I_T_L            I_T_L            I_T_L\\\n            I_T_L\nReserve Cancel at Chip Reset  Enable           Enable           Enable\\\n           Enable\\\nCLI>\"\"\"\nHOST_STATUS_INFO = \"\"\"CLI> show host-path-state\n Port                  Host                  Path State\n                       No.  Name\n --------------------- ---- ---------------- ----------\n CM#0 CA#0 Port#0         0 dbs01_0          Online\n CM#0 CA#0 Port#0         1 dbs01_1          Online\n CM#0 CA#0 Port#1         1 dbs01_1          Online\n CM#0 CA#0 Port#1         2 dbs02_0          Online\n CM#1 CA#0 Port#0         0 dbs01_0          Online\n CM#1 CA#0 Port#0         1 dbs01_1          Online\n CM#1 CA#0 Port#0         3 dbs02_1          Online\n CM#1 CA#0 Port#1         7 h_g_1_0          Online\nCLI>\"\"\"\nFC_HOSTS_INFO = \"\"\"CLI> show host-wwn-names\nHost                  WWN              Host Response\nNo.  Name                              No. Name\n---- ---------------- ---------------- --- ----------------\n   0 dbs01_0          10000090faec8449   0 Default\n   1 dbs01_1          10000090faec84a7   0 Default\n   2 dbs02_0          10000090faec852a   0 Default\n   3 dbs02_1          10000090faec842d   0 Default\n   4 dbs03_0          10000090faec7f2f   0 Default\n   5 dbs03_1          10000090faec7f06   0 Default\n   7 h_g_1_0          12ac13ab15af21ae 252 AIX\nCLI>\"\"\"\nISCSI_HOST_INFO = \"\"\"CLI> show host-iscsi-names\nHost                  Host Response        IP Address\\\n                              iSCSI Name                       CmdSN Count\nNo.  Name             No. Name\n---- ---------------- --- ---------------- ---------------------------------\\\n------ -------------------------------- -----------\n   0 iscsi_host_0     252 AIX              126.0.0.2\\\n                                  iqn.2006-08.com.huawei:21004447d Unlimited\n                                                                cca426::0\n   1 iscsi_host-1_0   252 AIX              126.0.0.3\\\n                                  iqn.2006-08.com.huawei:21004447d Unlimited\n                                                                cca426::1\n   2 iscsi_1_0          0 Default          *(IPv6)\\\n                                    iqn.2007-08.com.huawei:21004447d Unlimited\n                                                                cca426::7\\\nCLI>\"\"\"\nISCSI_HOST_DETAIL_ZERO = \"\"\"CLI> show host-iscsi-names -host-number 0\nHost No.             0\nHost Name            iscsi_host_0\niSCSI Name           iqn.2006-08.com.huawei:21004447dcca426::0\nAlias Name           iscsi 230   25\nIP Address           126.0.0.2\nChap User Name\nHost Response No.    252\nHost Response Name   AIX\nCmdSN Count          Unlimited\n\nCLI>\"\"\"\nISCSI_HOST_DETAIL_ONE = \"\"\"CLI> show host-iscsi-names -host-number 1\nHost No.             1\nHost Name            iscsi_host-1_0\niSCSI Name           iqn.2006-08.com.huawei:21004447dcca426::1\nAlias Name           iscsi1\nIP Address           126.0.0.3\nChap User Name\nHost Response No.    252\nHost Response Name   AIX\nCmdSN Count          Unlimited\n\nCLI>\"\"\"\nISCSI_HOST_DETAIL_TWO = \"\"\"CLI> show host-iscsi-names -host-number 2\nHost No.             2\nHost Name            iscsi_1_0\niSCSI Name           iqn.2007-08.com.huawei:21004447dcca426::7\nAlias Name\nIP Address           *(IPv6)\nChap User Name\nHost Response No.    0\nHost Response Name   Default\nCmdSN Count          Unlimited\n\nCLI>\"\"\"\nSAS_HOST_INFO = \"\"\"CLI> show host-sas-addresses\nHost                  SAS Address      Host Response\nNo.  Name                              No. Name\n---- ---------------- ---------------- --- ----------------\n   6 sas_g_0_0        12ab13ac14ad15af 253 AIX VxVM\n   8 sas2_0           14ab13ac46ae20af   0 Default\nCLI>\"\"\"\nINITIATORS_DATA = [\n    {'name': '10000090faec8449', 'storage_id': '12345',\n     'native_storage_host_initiator_id': '10000090faec8449',\n     'wwn': '10000090faec8449', 'status': 'online',\n     'native_storage_host_id': 'dbs01_0', 'type': 'fc'},\n    {'name': '10000090faec84a7', 'storage_id': '12345',\n     'native_storage_host_initiator_id': '10000090faec84a7',\n     'wwn': '10000090faec84a7', 'status': 'online',\n     'native_storage_host_id': 'dbs01_1', 'type': 'fc'},\n    {'name': '10000090faec852a', 'storage_id': '12345',\n     'native_storage_host_initiator_id': '10000090faec852a',\n     'wwn': '10000090faec852a', 'status': 'online',\n     'native_storage_host_id': 'dbs02_0', 'type': 'fc'},\n    {'name': '10000090faec842d', 'storage_id': '12345',\n     'native_storage_host_initiator_id': '10000090faec842d',\n     'wwn': '10000090faec842d', 'status': 'online',\n     'native_storage_host_id': 'dbs02_1', 'type': 'fc'},\n    {'name': '10000090faec7f2f', 'storage_id': '12345',\n     'native_storage_host_initiator_id': '10000090faec7f2f',\n     'wwn': '10000090faec7f2f', 'status': 'offline',\n     'native_storage_host_id': 'dbs03_0', 'type': 'fc'},\n    {'name': '10000090faec7f06', 'storage_id': '12345',\n     'native_storage_host_initiator_id': '10000090faec7f06',\n     'wwn': '10000090faec7f06', 'status': 'offline',\n     'native_storage_host_id': 'dbs03_1', 'type': 'fc'},\n    {'name': '12ac13ab15af21ae', 'storage_id': '12345',\n     'native_storage_host_initiator_id': '12ac13ab15af21ae',\n     'wwn': '12ac13ab15af21ae', 'status': 'online',\n     'native_storage_host_id': 'h_g_1_0', 'type': 'fc'},\n    {'name': 'iqn.2006-08.com.huawei:21004447dcca426::0',\n     'storage_id': '12345',\n     'native_storage_host_initiator_id':\n         'iqn.2006-08.com.huawei:21004447dcca426::0',\n     'wwn': 'iqn.2006-08.com.huawei:21004447dcca426::0', 'status': 'offline',\n     'native_storage_host_id': 'iscsi_host_0', 'type': 'iscsi',\n     'alias': 'iscsi 230   25'},\n    {'name': 'iqn.2006-08.com.huawei:21004447dcca426::1',\n     'storage_id': '12345',\n     'native_storage_host_initiator_id':\n         'iqn.2006-08.com.huawei:21004447dcca426::1',\n     'wwn': 'iqn.2006-08.com.huawei:21004447dcca426::1', 'status': 'offline',\n     'native_storage_host_id': 'iscsi_host-1_0', 'type': 'iscsi',\n     'alias': 'iscsi1'},\n    {'name': 'iqn.2007-08.com.huawei:21004447dcca426::7',\n     'storage_id': '12345',\n     'native_storage_host_initiator_id':\n         'iqn.2007-08.com.huawei:21004447dcca426::7',\n     'wwn': 'iqn.2007-08.com.huawei:21004447dcca426::7',\n     'status': 'offline',\n     'native_storage_host_id': 'iscsi_1_0',\n     'type': 'iscsi', 'alias': None},\n    {'name': '12ab13ac14ad15af', 'storage_id': '12345',\n     'native_storage_host_initiator_id': '12ab13ac14ad15af',\n     'wwn': '12ab13ac14ad15af', 'status': 'offline',\n     'native_storage_host_id': 'sas_g_0_0', 'type': 'sas'},\n    {'name': '14ab13ac46ae20af', 'storage_id': '12345',\n     'native_storage_host_initiator_id': '14ab13ac46ae20af',\n     'wwn': '14ab13ac46ae20af', 'status': 'offline',\n     'native_storage_host_id': 'sas2_0', 'type': 'sas'}]\nHOSTS_DATA = [\n    {'name': 'dbs01_0', 'storage_id': '12345',\n     'native_storage_host_id': 'dbs01_0', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'dbs01_1', 'storage_id': '12345',\n                           'native_storage_host_id': 'dbs01_1',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'dbs02_0', 'storage_id': '12345',\n     'native_storage_host_id': 'dbs02_0', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'dbs02_1', 'storage_id': '12345',\n                           'native_storage_host_id': 'dbs02_1',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'dbs03_0', 'storage_id': '12345',\n     'native_storage_host_id': 'dbs03_0', 'os_type': 'Unknown',\n     'status': 'offline'}, {'name': 'dbs03_1', 'storage_id': '12345',\n                            'native_storage_host_id': 'dbs03_1',\n                            'os_type': 'Unknown', 'status': 'offline'},\n    {'name': 'h_g_1_0', 'storage_id': '12345',\n     'native_storage_host_id': 'h_g_1_0', 'os_type': 'AIX',\n     'status': 'normal'},\n    {'name': 'iscsi_host_0', 'storage_id': '12345',\n     'native_storage_host_id': 'iscsi_host_0',\n     'os_type': 'AIX', 'status': 'offline',\n     'ip_address': '126.0.0.2'},\n    {'name': 'iscsi_host-1_0', 'storage_id': '12345',\n     'native_storage_host_id': 'iscsi_host-1_0', 'os_type': 'AIX',\n     'status': 'offline', 'ip_address': '126.0.0.3'},\n    {'name': 'iscsi_1_0', 'storage_id': '12345',\n     'native_storage_host_id': 'iscsi_1_0', 'os_type': 'Unknown',\n     'status': 'offline', 'ip_address': None},\n    {'name': 'sas_g_0_0', 'storage_id': '12345',\n     'native_storage_host_id': 'sas_g_0_0', 'os_type': 'AIX',\n     'status': 'offline'},\n    {'name': 'sas2_0', 'storage_id': '12345',\n     'native_storage_host_id': 'sas2_0',\n     'os_type': 'Unknown', 'status': 'offline'}]\nHOST_GROUPS_INFO = \"\"\"CLI> show host-groups -all\nHost Group            Host Response        Host Type\nNo.  Name             No. Name\n---- ---------------- --- ---------------- ----------\n   0 dbs01              0 Default          FC/FCoE\n<Host List>\n  Host                  WWN\n  No.  Name\n  ---- ---------------- ----------------------------------------\n     0 dbs01_0          10000090faec8449\n     1 dbs01_1          10000090faec84a7\n\nHost Group            Host Response        Host Type\nNo.  Name             No. Name\n---- ---------------- --- ---------------- ----------\n   1 dbs02              0 Default          FC/FCoE\n<Host List>\n  Host                  WWN\n  No.  Name\n  ---- ---------------- ----------------------------------------\n     2 dbs02_0          10000090faec852a\n     3 dbs02_1          10000090faec842d\n\nHost Group            Host Response        Host Type\nNo.  Name             No. Name\n---- ---------------- --- ---------------- ----------\n   2 dbs03              0 Default          FC/FCoE\n<Host List>\n  Host                  WWN\n  No.  Name\n  ---- ---------------- ----------------------------------------\n     4 dbs03_0          10000090faec7f2f\n     5 dbs03_1          10000090faec7f06\nCLI>\"\"\"\nHOST_GROUPS_DATA = {\n    'storage_host_groups': [{'name': 'dbs01', 'storage_id': '12345',\n                             'native_storage_host_group_id': '0'},\n                            {'name': 'dbs02', 'storage_id': '12345',\n                             'native_storage_host_group_id': '1'},\n                            {'name': 'dbs03', 'storage_id': '12345',\n                             'native_storage_host_group_id': '2'}],\n    'storage_host_grp_host_rels': [\n        {'storage_id': '12345', 'native_storage_host_group_id': '0',\n         'native_storage_host_id': 'dbs01_0'},\n        {'storage_id': '12345', 'native_storage_host_group_id': '0',\n         'native_storage_host_id': 'dbs01_1'},\n        {'storage_id': '12345', 'native_storage_host_group_id': '1',\n         'native_storage_host_id': 'dbs02_0'},\n        {'storage_id': '12345', 'native_storage_host_group_id': '1',\n         'native_storage_host_id': 'dbs02_1'},\n        {'storage_id': '12345', 'native_storage_host_group_id': '2',\n         'native_storage_host_id': 'dbs03_0'},\n        {'storage_id': '12345', 'native_storage_host_group_id': '2',\n         'native_storage_host_id': 'dbs03_1'}]}\nVOLUME_GROUPS_INFO = \"\"\"CLI> show lun-groups\nLUN Group             LUN Overlap\nNo.  Name             Volumes\n---- ---------------- -----------\n   0 dbs01         20 No\nCLI>\n\"\"\"\nVOLUME_DETAILS_INFO = \"\"\"CLI> show lun-groups -lg-number 0\nLUN Group No.0\nLUN Group Name   dbs01\nLUN  Volume                                 Status                    Size(MB)\\\n  LUN Overlap UID\n     No.   Name\\\n                                                                      Volume\n---- ----- -------------------------------- -------------------------\\\n --------- ----------- --------------------------------\n   0     0 LUN00                            Available\\\n                        20480 No          600000E00D29000000291B6B00000000\n   1     1 LUN01                            Available\\\n                        20480 No          600000E00D29000000291B6B00010000\n   2     2 LUN02                            Available\\\n                        20480 No          600000E00D29000000291B6B00020000\nCLI>\n\"\"\"\nVOLUME_GROUPS_DATA = {\n    'volume_groups': [{'name': 'dbs01         20', 'storage_id': '12345',\n                       'native_volume_group_id': '0'}],\n    'vol_grp_vol_rels': [\n        {'storage_id': '12345', 'native_volume_group_id': '0',\n         'native_volume_id': '0'},\n        {'storage_id': '12345', 'native_volume_group_id': '0',\n         'native_volume_id': '1'},\n        {'storage_id': '12345', 'native_volume_group_id': '0',\n         'native_volume_id': '2'}]}\nPORT_G_VIEW_INFO = \"\"\"CLI> show port-groups -all\nPort Group           CA Type\nNo. Name\n--- ---------------- -------\n  0 PortGroup01      FC\n<Port List>\n  CM#0 CA#0 Port#0\n  CM#1 CA#0 Port#0\n\nPort Group           CA Type\nNo. Name\n--- ---------------- -------\n  1 PortGroup02      FC\n<Port List>\n  CM#0 CA#0 Port#1\n  CM#1 CA#0 Port#1\n\nPort Group           CA Type\nNo. Name\n--- ---------------- -------\n  2 PortGroup03      FC\n<Port List>\n  CM#0 CA#1 Port#0\n  CM#1 CA#1 Port#0\nCLI>\"\"\"\nPORT_G_DATA = {\n    'port_groups': [{'name': 'PortGroup01', 'storage_id': '12345',\n                     'native_port_group_id': '0'},\n                    {'name': 'PortGroup02', 'storage_id': '12345',\n                     'native_port_group_id': '1'},\n                    {'name': 'PortGroup03', 'storage_id': '12345',\n                     'native_port_group_id': '2'}],\n    'port_grp_port_rels': [\n        {'storage_id': '12345', 'native_port_group_id': '0',\n         'native_port_id': 'CM#0 CA#0 Port#0'},\n        {'storage_id': '12345', 'native_port_group_id': '0',\n         'native_port_id': 'CM#1 CA#0 Port#0'},\n        {'storage_id': '12345', 'native_port_group_id': '1',\n         'native_port_id': 'CM#0 CA#0 Port#1'},\n        {'storage_id': '12345', 'native_port_group_id': '1',\n         'native_port_id': 'CM#1 CA#0 Port#1'},\n        {'storage_id': '12345', 'native_port_group_id': '2',\n         'native_port_id': 'CM#0 CA#1 Port#0'},\n        {'storage_id': '12345', 'native_port_group_id': '2',\n         'native_port_id': 'CM#1 CA#1 Port#0'}]}\nMASKING_VIEWS_INFO = \"\"\"CLI> show host-affinity\nPort Group           Host Group           LUN Group             LUN Overlap\nNo. Name             No. Name             No.  Name             Volumes\n--- ---------------- --- ---------------- ---- ---------------- -----------\n  0 huawie             3 Dorado5000V6        7 test             No\n<Connection List>\n  Port             Host\n                   No.  Name\n  ---------------- ---- ----------------\n  CM#0 CA#0 Port#1    6 Dorado5000V6_0\n  CM#0 CA#0 Port#1    7 Dorado5000V6_1\n  CM#1 CA#0 Port#0    6 Dorado5000V6_0\n  CM#1 CA#0 Port#0    7 Dorado5000V6_1\n\nPort Group           Host Group           LUN Group             LUN Overlap\nNo. Name             No. Name             No.  Name             Volumes\n--- ---------------- --- ---------------- ---- ---------------- -----------\n  0 huawie            10 Dorado5500_V6       9 lun_fujitsu      No\n<Connection List>\n  Port             Host\n                   No.  Name\n  ---------------- ---- ----------------\n  CM#0 CA#0 Port#1    4 Dorado5500v6_0\n  CM#0 CA#0 Port#1    5 Dorado5500v6_1\n  CM#1 CA#0 Port#0    4 Dorado5500v6_0\n  CM#1 CA#0 Port#0    5 Dorado5500v6_1\n\nPort Group           Host Group           LUN Group             LUN Overlap\nNo. Name             No. Name             No.  Name             Volumes\n--- ---------------- --- ---------------- ---- ---------------- -----------\n  0 huawie            12 AIX206              8 new1             No\n<Connection List>\n  Port             Host\n                   No.  Name\n  ---------------- ---- ----------------\n  CM#0 CA#0 Port#1   20 AIX206_0\n  CM#0 CA#0 Port#1   21 AIX206_1\n  CM#1 CA#0 Port#0   20 AIX206_0\n  CM#1 CA#0 Port#0   21 AIX206_1\n\nCM#0 CA#0 Port#0 (Host Affinity Mode Enable)\nHost                  LUN Group             LUN Overlap LUN Mask\nNo.  Name             No.  Name             Volumes     Group No.\n---- ---------------- ---- ---------------- ----------- ---------\n   1 RH_196_02           1 RH2288_test      No                  -\n  20 AIX206_0            9 lun_fujitsu      No                  -\n\nCM#0 CA#0 Port#1 (Host Affinity Mode Enable)\n\nCM#1 CA#0 Port#0 (Host Affinity Mode Enable)\nHost                  LUN Group             LUN Overlap LUN Mask\nNo.  Name             No.  Name             Volumes     Group No.\n---- ---------------- ---- ---------------- ----------- ---------\n   2 RH197_0             5 RH196            Yes                 -\n\nCM#1 CA#0 Port#1 (Host Affinity Mode Disable)\nCLI>\"\"\"\nGET_MAPPING = \"\"\"CLI> show mapping\nCM#0 CA#0 Port#0 (Host Affinity Mode Enable)\n\nCM#0 CA#0 Port#1 (Host Affinity Mode Enable)\n\nCM#0 CA#1 Port#0 (Host Affinity Mode Enable)\n\nCM#0 CA#1 Port#1 (Host Affinity Mode Disable)\nLUN  Volume                                 Status                    Size(MB)\n     No.   Name\n---- ----- -------------------------------- ------------------------- ---------\n   0     3 LUN03                            Available                     20480\n   1     6 lun051                           Available                      2048\n\nCM#1 CA#0 Port#0 (Host Affinity Mode Enable)\n\nCM#1 CA#0 Port#1 (Host Affinity Mode Enable)\n\nCM#1 CA#1 Port#0 (Host Affinity Mode Enable)\n\nCM#1 CA#1 Port#1 (Host Affinity Mode Disable)\nLUN  Volume                                 Status                    Size(MB)\n     No.   Name\n---- ----- -------------------------------- ------------------------- ---------\n   1     5 lun050                           Available                      2048\nCLI>\"\"\"\nMASKING_VIEWS_DATA = [\n    {'native_masking_view_id': '37host_idvolume_id',\n     'name': '37host_idvolume_id', 'native_storage_host_group_id': '3',\n     'native_port_group_id': '0', 'native_volume_group_id': '7',\n     'storage_id': '12345'}, {'native_masking_view_id': '109host_idvolume_id',\n                              'name': '109host_idvolume_id',\n                              'native_storage_host_group_id': '10',\n                              'native_port_group_id': '0',\n                              'native_volume_group_id': '9',\n                              'storage_id': '12345'},\n    {'native_masking_view_id': '128host_idvolume_id',\n     'name': '128host_idvolume_id', 'native_storage_host_group_id': '12',\n     'native_port_group_id': '0', 'native_volume_group_id': '8',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'host_group_id1RH_196_02volume_id',\n     'name': 'host_group_id1RH_196_02volume_id',\n     'native_storage_host_id': 'RH_196_02', 'native_volume_group_id': '1',\n     'native_port_id': 'CM#0 CA#0 Port#0', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host_group_id9AIX206_0volume_id',\n     'name': 'host_group_id9AIX206_0volume_id',\n     'native_storage_host_id': 'AIX206_0', 'native_volume_group_id': '9',\n     'native_port_id': 'CM#0 CA#0 Port#0', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host_group_id5RH197_0volume_id',\n     'name': 'host_group_id5RH197_0volume_id',\n     'native_storage_host_id': 'RH197_0', 'native_volume_group_id': '5',\n     'native_port_id': 'CM#1 CA#0 Port#0', 'storage_id': '12345'}]\nPARSE_ALERT_DATA = {\n    'alert_id': '123456', 'severity': 'Fatal',\n    'category': 'Fault', 'occur_time': 1644827799328,\n    'description': 'cm0 error', 'location': 'cm0#eterus-213546',\n    'type': 'EquipmentAlarm', 'resource_type': 'Storage',\n    'alert_name': 'cm0 error', 'match_key': 'e10adc3949ba59abbe56e057f20f883e'}\nPORTS_OLD_DATA = [\n    {'name': 'CM#0 Port#0', 'storage_id': '12345',\n     'native_port_id': 'CM#0 Port#0', 'location': 'CM#0 Port#0', 'type': 'fc',\n     'speed': 4000000000, 'connection_status': 'connected',\n     'wwn': '500000E0D0376706', 'health_status': 'normal'},\n    {'name': 'CM#0 Port#1', 'storage_id': '12345',\n     'native_port_id': 'CM#0 Port#1', 'location': 'CM#0 Port#1', 'type': 'fc',\n     'speed': None, 'connection_status': 'disconnected',\n     'wwn': '500000E0D0376707', 'health_status': 'normal'},\n    {'name': 'CM#1 Port#0', 'storage_id': '12345',\n     'native_port_id': 'CM#1 Port#0', 'location': 'CM#1 Port#0', 'type': 'fc',\n     'speed': None, 'connection_status': 'disconnected',\n     'wwn': '500000E0D0376786', 'health_status': 'normal'},\n    {'name': 'CM#1 Port#1', 'storage_id': '12345',\n     'native_port_id': 'CM#1 Port#1', 'location': 'CM#1 Port#1', 'type': 'fc',\n     'speed': 4000000000, 'connection_status': 'connected',\n     'wwn': '500000E0D0376787', 'health_status': 'normal'}]\nPORTS_DATA = [{'name': 'CM#0 CA#0 Port#0', 'storage_id': '12345',\n               'native_port_id': 'CM#0 CA#0 Port#0',\n               'location': 'CM#0 CA#0 Port#0', 'type': 'fc',\n               'speed': 10000000000, 'connection_status': 'unknown',\n               'wwn': '500000E0DA0A7D20', 'health_status': 'unknown'},\n              {'name': 'CM#0 CA#0 Port#1', 'storage_id': '12345',\n               'native_port_id': 'CM#0 CA#0 Port#1',\n               'location': 'CM#0 CA#0 Port#1', 'type': 'fc',\n               'speed': 10000000000, 'connection_status': 'unknown',\n               'wwn': '500000E0DA0A7D21', 'health_status': 'unknown'},\n              {'name': 'CM#0 CA#1 Port#0', 'storage_id': '12345',\n               'native_port_id': 'CM#0 CA#1 Port#0',\n               'location': 'CM#0 CA#1 Port#0', 'type': 'fc',\n               'speed': 10000000000},\n              {'name': 'CM#0 CA#1 Port#1', 'storage_id': '12345',\n               'native_port_id': 'CM#0 CA#1 Port#1',\n               'location': 'CM#0 CA#1 Port#1', 'type': 'fc',\n               'speed': 10000000000}]\nDISKS_OLD = [\n    {'name': 'CE-Disk#0', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#0',\n     'serial_number': '6SK2CEG91327', 'manufacturer': 'SEAGATE',\n     'model': '3.5\" SAS', 'firmware': 'GF0D', 'location': 'CE-Disk#0',\n     'speed': 15000, 'capacity': 483183820800.0, 'status': 'abnormal',\n     'physical_type': 'sas', 'logical_type': 'unknown'},\n    {'name': 'CE-Disk#1', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#1',\n     'serial_number': '6SK262SZ1312', 'manufacturer': 'SEAGATE',\n     'model': '3.5\" SAS', 'firmware': 'GF0D', 'location': 'CE-Disk#1',\n     'speed': 15000, 'capacity': 483183820800.0, 'status': 'abnormal',\n     'physical_type': 'sas', 'logical_type': 'unknown'},\n    {'name': 'CE-Disk#2', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#2',\n     'serial_number': '6SK26QCA1312', 'manufacturer': 'SEAGATE',\n     'model': '3.5\" SAS', 'firmware': 'GF0D', 'location': 'CE-Disk#2',\n     'speed': 15000, 'capacity': 483183820800.0, 'status': 'normal',\n     'physical_type': 'sas', 'logical_type': 'member'},\n    {'name': 'CE-Disk#3', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#3',\n     'serial_number': '6SK2DE941330', 'manufacturer': 'SEAGATE',\n     'model': '3.5\" SAS', 'firmware': 'GF0D', 'location': 'CE-Disk#3',\n     'speed': 15000, 'capacity': 483183820800.0, 'status': 'normal',\n     'physical_type': 'sas', 'logical_type': 'member'}]\nDISKS_DATA = [\n    {'name': 'CE-Disk#0', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#0',\n     'serial_number': '0QWA91YA', 'manufacturer': 'HGST', 'model': '2.5 SSD-M',\n     'firmware': 'H603', 'location': 'CE-Disk#0', 'speed': None,\n     'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd',\n     'logical_type': 'member'},\n    {'name': 'CE-Disk#1', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#1',\n     'serial_number': '0QWAHN1A', 'manufacturer': 'HGST', 'model': '2.5 SSD-M',\n     'firmware': 'H603', 'location': 'CE-Disk#1', 'speed': None,\n     'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd',\n     'logical_type': 'member'},\n    {'name': 'CE-Disk#2', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#2',\n     'serial_number': '0QWA9GMA', 'manufacturer': 'HGST', 'model': '2.5 SSD-M',\n     'firmware': 'H603', 'location': 'CE-Disk#2', 'speed': None,\n     'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd',\n     'logical_type': 'member'},\n    {'name': 'CE-Disk#3', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#3',\n     'serial_number': '0QWA9KJA', 'manufacturer': 'HGST', 'model': '2.5 SSD-M',\n     'firmware': 'H603', 'location': 'CE-Disk#3', 'speed': None,\n     'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd',\n     'logical_type': 'member'},\n    {'name': 'CE-Disk#4', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#4',\n     'serial_number': '0QWAHMAA', 'manufacturer': 'HGST', 'model': '2.5 SSD-M',\n     'firmware': 'H603', 'location': 'CE-Disk#4', 'speed': None,\n     'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd',\n     'logical_type': 'member'},\n    {'name': 'CE-Disk#5', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#5',\n     'serial_number': 'S7M1LC92', 'manufacturer': 'SEAGATE',\n     'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#5',\n     'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal',\n     'physical_type': 'unknown', 'logical_type': 'member'},\n    {'name': 'CE-Disk#6', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#6',\n     'serial_number': 'W7M0M8PR', 'manufacturer': 'SEAGATE',\n     'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#6',\n     'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal',\n     'physical_type': 'unknown', 'logical_type': 'member'},\n    {'name': 'CE-Disk#7', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#7',\n     'serial_number': 'S7M1LC99', 'manufacturer': 'SEAGATE',\n     'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#7',\n     'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal',\n     'physical_type': 'unknown', 'logical_type': 'member'},\n    {'name': 'CE-Disk#8', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#8',\n     'serial_number': 'S7M1L3XD', 'manufacturer': 'SEAGATE',\n     'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#8',\n     'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal',\n     'physical_type': 'unknown', 'logical_type': 'member'},\n    {'name': 'CE-Disk#9', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#9',\n     'serial_number': 'S7M1KXS5', 'manufacturer': 'SEAGATE',\n     'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#9',\n     'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal',\n     'physical_type': 'unknown', 'logical_type': 'member'},\n    {'name': 'CE-Disk#10', 'storage_id': '12345',\n     'native_disk_id': 'CE-Disk#10', 'serial_number': 'S7M1KCPD',\n     'manufacturer': 'SEAGATE', 'model': '2.5 Online', 'firmware': 'VE0C',\n     'location': 'CE-Disk#10', 'speed': 15000, 'capacity': 644245094400.0,\n     'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'member'},\n    {'name': 'CE-Disk#11', 'storage_id': '12345',\n     'native_disk_id': 'CE-Disk#11', 'serial_number': 'W7M0MYYA',\n     'manufacturer': 'SEAGATE', 'model': '2.5 Online', 'firmware': 'VE0C',\n     'location': 'CE-Disk#11', 'speed': 15000, 'capacity': 644245094400.0,\n     'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'member'}]\nPARSE_ALERT_INFO = {\n    '1.3.6.1.2.1.1.3.0': '123456',\n    '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.211.4.1.1.126.1.150.0.2',\n    '1.3.6.1.4.1.211.1.21.1.150.7.0': '-213546',\n    '1.3.6.1.4.1.211.1.21.1.150.1.1.0': 'cm0#eterus',\n    '1.3.6.1.4.1.211.1.21.1.150.11.0': 'cm0 error'\n}\n\n\ndef create_driver():\n    EternusSSHPool.do_exec_shell = mock.Mock(\n        side_effect=[\"Summary Status  [Normal]\"])\n    return EternusDriver(**ACCESS_INFO)\n\n\nclass TestEternusDriver(TestCase):\n    driver = create_driver()\n\n    def test_get_storage(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[STORAGE_NAME_DATA,\n                         STORAGE_MODEL_DATA,\n                         STORAGE_STATUS_DATA,\n                         DISK_LIST_INFO,\n                         POOL_DATAS])\n        storage = self.driver.get_storage(context)\n        self.assertDictEqual(storage, STORAGE_RESULT)\n\n    def test_list_storage_pools(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(side_effect=[POOL_DATAS])\n        pools = self.driver.list_storage_pools(context)\n        self.assertDictEqual(pools[0], POOL_RESULT[0])\n\n    def test_list_storage_pools_old(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(side_effect=[\n            POOL_ERROR_DATAS, POOL_OLD_DATAS])\n        pools = self.driver.list_storage_pools(context)\n        self.assertDictEqual(pools[0], POOL_old_RESULT[0])\n\n    def test_list_volumes(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[VOLUMES, VOLUME_TPV_DATAS, VOLUME_FTV_DATAS])\n        volumes = self.driver.list_volumes(context)\n        self.assertDictEqual(volumes[0], VOLUME_RESULT[0])\n\n    def test_list_volumes_old(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[VOLUMES_ERROR, VOLUMES_ERROR, VOLUMES_ERROR,\n                         VOLUME_DATAS])\n        volumes = self.driver.list_volumes(context)\n        self.assertDictEqual(volumes[0], VOLUME_OLD_RESULT[0])\n\n    def test_get_controllers(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[NODE_DATAS, NODE_STATUS_DATAS])\n        controllers = self.driver.list_controllers(context)\n        self.assertDictEqual(controllers[0], CONTROLLER_RESULT[0])\n\n    def test_list_alerts(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[LIST_ALERT_WARNING,\n                         LIST_ALERT_ERROR])\n        list_alerts = self.driver.list_alerts(context)\n        ALERTS_INFO['occur_time'] = list_alerts[0].get('occur_time')\n        ALERTS_INFO['match_key'] = list_alerts[0].get('match_key')\n        self.assertDictEqual(list_alerts[0], ALERTS_INFO)\n\n    def test_list_alerts_old(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[None, None, LIST_ALERT_WARNING,\n                         LIST_ALERT_ERROR])\n        list_alerts = self.driver.list_alerts(context)\n        ALERTS_INFO['occur_time'] = list_alerts[0].get('occur_time')\n        ALERTS_INFO['match_key'] = list_alerts[0].get('match_key')\n        self.assertDictEqual(list_alerts[0], ALERTS_INFO)\n\n    def test_list_disks(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(side_effect=[DISK_LIST_INFO])\n        data = self.driver.list_disks(context)\n        self.assertEqual(data, DISKS_DATA)\n\n    def test_list_disks_OLD(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(side_effect=[DISK_OLD])\n        data = self.driver.list_disks(context)\n        self.assertListEqual(data, DISKS_OLD)\n\n    def test_list_ports(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[FCOE_INFO, NODE_DATAS])\n        data = self.driver.list_ports(context)\n        self.assertListEqual(data, PORTS_DATA)\n\n    def test_list_ports_old(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[FC_INFO_OLD, NODE_DATAS_OLD])\n        data = self.driver.list_ports(context)\n        self.assertListEqual(data, PORTS_OLD_DATA)\n\n    def test_parse_alert(self):\n        parse_alert = self.driver.parse_alert(context, PARSE_ALERT_INFO)\n        PARSE_ALERT_DATA['occur_time'] = parse_alert.get('occur_time')\n        self.assertDictEqual(parse_alert, PARSE_ALERT_DATA)\n\n    def test_list_storage_host_initiators(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[HOST_STATUS_INFO, FC_HOSTS_INFO, ISCSI_HOST_INFO,\n                         ISCSI_HOST_DETAIL_ZERO, ISCSI_HOST_DETAIL_ONE,\n                         ISCSI_HOST_DETAIL_TWO, SAS_HOST_INFO])\n        initiators = self.driver.list_storage_host_initiators(context)\n        self.assertListEqual(initiators, INITIATORS_DATA)\n\n    def test_list_storage_hosts(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[HOST_STATUS_INFO, FC_HOSTS_INFO, ISCSI_HOST_INFO,\n                         ISCSI_HOST_DETAIL_ZERO, ISCSI_HOST_DETAIL_ONE,\n                         ISCSI_HOST_DETAIL_TWO, SAS_HOST_INFO])\n        hosts = self.driver.list_storage_hosts(context)\n        self.assertListEqual(hosts, HOSTS_DATA)\n\n    def test_list_storage_host_groups(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[HOST_GROUPS_INFO])\n        host_groups = self.driver.list_storage_host_groups(context)\n        self.assertDictEqual(host_groups, HOST_GROUPS_DATA)\n\n    def test_list_port_groups(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[PORT_G_VIEW_INFO])\n        host_groups = self.driver.list_port_groups(context)\n        self.assertDictEqual(host_groups, PORT_G_DATA)\n\n    def test_list_volume_groups(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[VOLUME_GROUPS_INFO, VOLUME_DETAILS_INFO])\n        volume_groups = self.driver.list_volume_groups(context)\n        self.assertDictEqual(volume_groups, VOLUME_GROUPS_DATA)\n\n    def test_list_masking_views(self):\n        EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        EternusSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[MASKING_VIEWS_INFO])\n        masking_views = self.driver.list_masking_views(context)\n        self.assertListEqual(masking_views, MASKING_VIEWS_DATA)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/hitachi/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/hitachi/hnas/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/hitachi/hnas/constants.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"hitachi\",\n    \"model\": \"hnas\",\n    \"ssh\": {\n        \"host\": \"192.168.3.211\",\n        \"port\": 22,\n        \"username\": \"manager\",\n        \"password\": \"manager\",\n    }\n}\n\nSTORAGE_INFO = \"\"\"\\r\ncluster-show\\r\n\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ cluster-show\\r\nOverall Status = Online\\r\nCluster Health = Robust\\r\nCluster Mode = Not clustered\\r\nCluster Name = pba-hnas-1\\r\nCluster UUID = a39f815a-e582-11d6-9000-b76f3098a657\\r\nCluster Size = 1\\r\n   Node Name = pba-hnas-1-1\\r\n     Node ID = 1\\r\nCluster GenId = 1\\r\nCluster Master = No\\r\n\\r\npba-hnas-1-1:$ \"\"\"\n\nVERSION_INFO = \"\"\"\\r\nver\\r\n\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ ver\\r\n\\r\nModel: HNAS 4060\\r\n\\r\nSoftware: 12.7.4221.12 (built 2016-10-28 21:51:37+01:00)\\r\n\\r\nHardware: NAS Platform (M4SJKW1423160)\\r\n\\r\nboard        MMB1\\r\nmmb          12.7.4221.12 release (2016-10-28 21:51:37+01:00)\\r\n\\r\nboard        MFB2\\r\nmfb2hw       MB v0132 WL v0132 TD v0132 FD v0132 TC v00C6 RY v00C6 \\r\nTY v00C6 IC v00C6 WF v007C FS v007C OS v007C WD v007C D0 v0077 \\r\nSerial no    B1423125 (Tue Jun 17 13:38:33 2014)\\r\n\\r\nboard        MCP\\r\nSerial no    B1423160 (Wed Jun 18 20:39:53 2014)\\r\n\\r\npba-hnas-1-1:$ \"\"\"\n\nLOCATION_INFO = \"\"\"\\r\nsystem-information-get\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ system-information-get\\r\n\\r\n      Name: pba-hnas-1\\r\n  Location: chengdu\\r\n   Contact: \\r\n\\r\npba-hnas-1-1:$ \"\"\"\n\nDISK_INFO = \"\"\"\\r\nsd-list --scsi\\r\n\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ sd-list --scsi\\r\nDevice ID:      0\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span1' (capacity 200GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:00\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            0\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           1\\r\nHDS ctrlr port: 0000\\r\nHDS dev name:   1000\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      1\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span1' (capacity 200GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:01\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            1\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           1\\r\nHDS ctrlr port: 0400\\r\nHDS dev name:   1001\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      2\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span1' (capacity 200GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:02\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            2\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           1\\r\nHDS ctrlr port: 0000\\r\nHDS dev name:   1002\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      3\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span1' (capacity 200GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:03\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            3\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           1\\r\nHDS ctrlr port: 0400\\r\nHDS dev name:   1003\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      4\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span2' (capacity 400GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:04\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            4\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           None\\r\nHDS ctrlr port: 0000\\r\nHDS dev name:   1004\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      5\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span2' (capacity 400GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:05\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            5\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           None\\r\nHDS ctrlr port: 0400\\r\nHDS dev name:   1005\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      6\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span2' (capacity 400GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:06\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            6\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           None\\r\nHDS ctrlr port: 0000\\r\nHDS dev name:   1006\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      7\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span2' (capacity 400GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:07\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            7\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           None\\r\nHDS ctrlr port: 0400\\r\nHDS dev name:   1007\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      8\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span2' (capacity 400GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:08\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            8\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           None\\r\nHDS ctrlr port: 0400\\r\nHDS dev name:   1008\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      9\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span2' (capacity 400GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:09\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            9\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           None\\r\nHDS ctrlr port: 0000\\r\nHDS dev name:   1009\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      10\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span2' (capacity 400GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:0A\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            10\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           None\\r\nHDS ctrlr port: 0400\\r\nHDS dev name:   100A\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\nDevice ID:      11\\r\nComment:        \\r\nCapacity:       50GiB (53687746560 bytes)\\r\nStatus:         OK\\r\nRole:           Primary\\r\nAccess:         Allowed\\r\nUsed in span:   'span2' (capacity 400GiB)\\r\nType:           Make: HITACHI; Model: OPEN-V; Revision: 7303\\r\nSubmodel:       HM70\\r\nLuid:           [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:0B\\r\nBlocksize:      512\\r\nSuperflush:     Default\\r\nLun:            11\\r\nSerial number:  212902\\r\nSite ID:        0\\r\nTier:           None\\r\nHDS ctrlr port: 0000\\r\nHDS dev name:   100B\\r\nHDP pool no:    0\\r\nGAD:            No\\r\nQueue depth:    min 16, default 32, max 512, configured [default],\n effective 32\\r\n\\r\npba-hnas-1-1:$ \"\"\"\n\nPOOL_INFO = \"\"\"\\r\nspan-list\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ span-list\\r\nSpan instance name     OK?  Free  Cap/GiB  System drives              Con\\r\n---------------------  ---  ----  -------  -------------------------  ---\\r\nspan1                  Yes  100%      200  0,1,2,3                    90%\\r\n   Tier 0: empty: file systems can't be created or mounted\\r\n   Tier 1: capacity     200GiB; free: 200GiB (100%); HDP pool free 996GiB\\r\nspan2                  Yes   86%      400  4,5,6,7;8,9,10,11          90%\\r\npba-hnas-1-1:$ \"\"\"\n\nPOOL_DETAIL_INFO = \"\"\"\\r\n\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ span-space-distribution\\r\nSpan span2:\\r\n\\r\n  How each stripeset is used:\\r\n    Stripeset 0:\\r\n              18GiB     9.09%   fs1\\r\n              18GiB     9.09%   fs2\\r\n              18GiB     9.09%   fs3\\r\n             145GiB    72.74%   [Free space]\\r\n    Stripeset 1:\\r\n             200GiB   100.00%   [Free space]\\r\n\\r\n  Where each filesystem resides:\\r\n    Filesystem fs1:\\r\n      Stripeset  0            18GiB   100.00%\\r\n    Filesystem fs2:\\r\n      Stripeset  0            18GiB   100.00%\\r\n    Filesystem fs3:\\r\n      Stripeset  0            18GiB   100.00%\\r\n\\r\nSpan span1:\\r\n\\r\n  How each stripeset is used:\\r\n    Stripeset 0:\\r\n             200GiB   100.00%   [Free space]\\r\n\\r\n  Where each filesystem resides:\\r\n\\r\npba-hnas-1-1:$\"\"\"\n\nALERT_INFO = \"\"\"\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ event-log-show -w -s\\r\n****** Current time : 2021-10-25 11:12:35+08:00 ******\\r\n8208 Information 2021-11-02 08:26:01+08:00 Chassis device 'md0'\nis running background media scan.\\r\n    CAUSE:      Chassis drive volume is running a media check.\\r\n    RESOLUTION: No Action required.\\r\n\\r\n8462 Warning     2021-11-02 08:00:10+08:00 [ pba-hnas-1 ] The\nSMU does not have an email\nalert profile relating to a managed server.\\r\n    CAUSE:      An email alert profile relating to a managed\n    server must be applied to the SMU so that alert and diagnostic\n    emails can be sent to the required recipients.\\r\n    RESOLUTION: Go to an SMTP Email Profile page and apply a\n    profile to the SMU.\\r\n\\r\n8208 Information 2021-11-02 04:04:01+08:00 Chassis device 'md2'\nis running background media scan.\\r\n    CAUSE:      Chassis drive volume is running a media check.\\r\n    RESOLUTION: No Action required.\\r\n\\r\n8209 Information 2021-11-02 04:04:00+08:00 Chassis device 'md3'\nhas completed background media scan.\\r\n    CAUSE:      Chassis drive volume media check has completed.\\r\n    RESOLUTION: No Action required.\\r\n\\r\n9995 Information 2021-11-01 20:50:36+08:00 wq test snmp.\\r\n    CAUSE:      A test event was requested.\\r\n    RESOLUTION: No action required.\\r\n\\r\\\n3303 Information 2021-11-01 19:27:22+08:00 Exceeded socket backlog:\ndropping additional connection request from 127.0.0.1:34008->127.0.0.1:206:\nthis event, Id 3303, happened once in the last 6.25 d on the MMB1.\\r\n    CAUSE:      Socket backlogged: could not allow a new connection.\\r\n    RESOLUTION: This is expected behavior on receiving a flurry of\n    connection requests.  If it happens in other circumstances,\n    run the Performance Info Report, then report this and send the\n    PIR results to your support provider.\\r\n\\r\n8208 Information 2021-11-01 16:44:01+08:00 Chassis device 'md3' is\nrunning background media scan.\\r\n    CAUSE:      Chassis drive volume is running a media check.\\r\n    RESOLUTION: No Action required.\\r\n\\r\n8462 Warning     2021-11-01 08:00:10+08:00 [ pba-hnas-1 ] The SMU\ndoes not have an email alert profile relating to a managed server.\\r\n    CAUSE:      An email alert profile relating to a managed server\n    must be applied to the SMU so that alert and diagnostic emails\n    can be sent to the required recipients.\\r\n    RESOLUTION: Go to an SMTP Email Profile page and apply a profile\n    to the SMU.\\r\n****** Current time : 2021-10-25 11:12:35+08:00 ******\\r\npba-hnas-1-1:$ \"\"\"\n\nTRAP_INFO = {\n    '1.3.6.1.4.1.11096.6.1.1':\n        \"8462 Warning: [ pba-hnas-1 ] The SMU does not have an email alert \"\n        \"profile relating to a managed server.\"\n}\n\nNODE_INFO = \"\"\"Linux pba-hnas-1 2.6.32-5-amd64 #1 SMP Sun Dec 21 18:\n01:12 UTC 2014 x86_64\\r\n\\r\n\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ cluster-show -y\\r\n                                           Ethernet    Mgmnt\\r\nID  Node Name        Status    FS Access   Aggs        Netwrk  FC   EVS IDs\\r\n--  ---------------  --------  ----------  ----------  ------  ---  -------\\r\n1   pba-hnas-1-1     ONLINE    OK          Degraded    OK      OK   [0,1,2]\\r\npba-hnas-1-1:$ \"\"\"\n\nFC_PORT_INFO = \"\"\"\\r\nfc-hports\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ fc-hports\\r\n\\r\nHost Port 1\\r\nAddrs: 0x1\\r\nPort name: 50:03:01:70:00:06:8B:01\\r\nNode name: 50:03:01:70:00:06:8B:00 \\r\nFC Link is up\\r\nStatus : Good \\r\n\\r\nHost Port 2\\r\nAddrs: not assigned\\r\nPort name: 50:03:01:70:00:06:8B:02\\r\nNode name: 50:03:01:70:00:06:8B:00 \\r\nFC Link is down\\r\n\\r\nHost Port 3\\r\nAddrs: 0x1\\r\nPort name: 50:03:01:70:00:06:8B:03\\r\nNode name: 50:03:01:70:00:06:8B:00 \\r\nFC Link is up\\r\nStatus : Good \\r\n\\r\nHost Port 4\\r\nAddrs: not assigned\\r\nPort name: 50:03:01:70:00:06:8B:04\\r\nNode name: 50:03:01:70:00:06:8B:00 \\r\nFC Link is down\\r\n\\r\npba-hnas-1-1:$ \"\"\"\n\nFC_PORT_STATUS = \"\"\"\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ fc-link-speed\\r\nFC 1:      8 Gbps\\r\nFC 2:      4 Gbps\\r\nFC 3:      8 Gbps\\r\nFC 4:      8 Gbps\\r\npba-hnas-1-1:$ \"\"\"\n\nETH_PORT_INFO = \"\"\"\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ ifconfig\\r\nag1       Link encap:1         HWaddr 00-30-17-09-fc-08\\r\n          inet addr:192.168.0.1  Bcast:192.168.0.255  mask:255.255.255.0\\r\n          inet addr:192.168.0.2  Bcast:192.168.0.255  mask:255.255.255.0\\r\n          Link:DOWN Admin:UP   MTU:1500  Metric:1  txqueuelen:64\\r\n\\r\nag2       Link encap:1         HWaddr 00-30-17-09-fc-09\\r\n          Link:DOWN Admin:DOWN MTU:1500  Metric:1  txqueuelen:64\\r\n\\r\nc1        Link encap:1         HWaddr 00-30-17-09-fc-10\\r\n          inet addr:240.152.166.87  Bcast:240.255.255.255  mask:255.0.0.0\\r\n          Link:DOWN Admin:UP   MTU:1488  Metric:2  txqueuelen:64\\r\n\\r\nc2        Link encap:1         HWaddr 00-30-17-09-fc-11\\r\n          Link:DOWN Admin:DOWN MTU:1488  Metric:2  txqueuelen:64\\r\n\\r\neth0      Link encap:1         HWaddr 0c-c4-7a-05-9e-a0\\r\n          inet addr:192.168.3.211  Bcast:192.168.3.255  mask:255.255.255.0\\r\n          inet6 addr: fe80::ec4:7aff:fe05:9ea0/64 Scope:Link\\r\n          Link:UP   Admin:UP   MTU:1500  Metric:4  txqueuelen:64\\r\n\\r\neth1      Link encap:1         HWaddr 0c-c4-7a-05-9e-a1\\r\n          inet addr:192.0.2.2  Bcast:192.0.255.255  mask:255.255.0.0\\r\n          inet addr:192.0.2.200  Bcast:192.0.255.255  mask:255.255.0.0\\r\n          Link:DOWN Admin:UP   MTU:1500  Metric:4  txqueuelen:64\\r\n\\r\nlo        Link encap:1         \\r\n          inet addr:127.0.0.1  Bcast:127.255.255.255  mask:255.0.0.0\\r\n          inet6 addr: ::1/128 Scope:Global\\r\n          inet6 addr: fe80::200:ff:fe00:0/64 Scope:Link\\r\n          Link:UP   Admin:UP   MTU:1500  Metric:4  txqueuelen:64\\r\n\\r\npba-hnas-1-1:$ \"\"\"\n\nFS_INFO = \"\"\"\\r\nfilesystem-list\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ filesystem-list\\r\nInstance name      Dev   On span      State  EVS  Cap/GiB  Confined Flag\\r\n-----------------  ----  -----------  -----  ---  -------  -------- ----\\r\nfs1                1024  span2        Mount   1        18       20      \\r\npba-hnas-1-1:$ \"\"\"\n\nQTREE_INFO = \"\"\"\\r\nevs-select 1\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ evs-select 1\\r\npba-hnas-1-1[EVS1]:$ virtual-volume list --verbose fs1\\r\ntree1\\r\n  email        : \\r\n  root         : /12323\\r\n  tag          : 2\\r\n  usage  bytes : 0 B  files: 1\\r\n  last modified: 2021-09-23 07:18:14.714807865+00:00\\r\nvol2\\r\n  email        : \\r\n  root         : /123\\r\n  tag          : 1\\r\n  usage  bytes : 0 B  files: 1\\r\n  last modified: 2021-09-15 07:17:02.790323869+00:00\\r\npba-hnas-1-1[EVS1]:$ \"\"\"\n\nCIFS_SHARE_INFO = \"\"\"\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ evs-select 1\\r\npba-hnas-1-1[EVS1]:$ cifs-share list\\r\n\\r\n           Share name: tree1\\r\n           Share path: \\12323\\r\n          Share users: 0\\r\n         Share online: Yes\\r\n        Share comment: Share associated with Virtual Volume tree1\\r\n        Cache options: Manual local caching for documents\\r\n            ABE enabled: No\\r\nContinuous Availability: No\\r\n       Access snapshots: Yes\\r\n      Display snapshots: Yes\\r\n     ShadowCopy enabled: Yes\\r\n   Lower case on create: No\\r\n        Follow symlinks: Yes\\r\n Follow global symlinks: No\\r\n       Scan for viruses: Yes\\r\n     File system label: fs1\\r\n      File system size: 18 GB\\r\nFile system free space: 15.6 GB\\r\n     File system state: \\r\n                formatted = Yes\\r\n                  mounted = Yes\\r\n                   failed = No\\r\n         thin provisioned = No\\r\nDisaster recovery setting:\\r\nRecovered = No\\r\nTransfer setting = Use file system default\\r\n     Home directories: Off\\r\n  Mount point options:\\r\n\\r\n           Share name: C$\\r\n           Share path: \\\\r\n          Share users: 0\\r\n         Share online: Yes\\r\n        Share comment: Default share\\r\n        Cache options: Manual local caching for documents\\r\n            ABE enabled: No\\r\nContinuous Availability: No\\r\n       Access snapshots: Yes\\r\n      Display snapshots: No\\r\n     ShadowCopy enabled: Yes\\r\n   Lower case on create: No\\r\n        Follow symlinks: Yes\\r\n Follow global symlinks: No\\r\n       Scan for viruses: Yes\\r\n      File system info: *** not available ***\\r\nDisaster recovery setting:\\r\nRecovered = No\\r\nTransfer setting = Use file system default\\r\nHome directories: Off\\r\n  Mount point options:\\r\n\\r\n\\r\n           Share name: vol6\\r\n           Share path: \\666\\r\n          Share users: 0\\r\n         Share online: No\\r\n        Share comment: Share associated with Virtual Volume vol6\\r\n        Cache options: Manual local caching for documents\\r\n            ABE enabled: No\\r\nContinuous Availability: No\\r\n       Access snapshots: Yes\\r\n      Display snapshots: Yes\\r\n     ShadowCopy enabled: Yes\\r\n   Lower case on create: No\\r\n        Follow symlinks: Yes\\r\n Follow global symlinks: No\\r\n       Scan for viruses: Yes\\r\n      File system info: *** not available ***\\r\nDisaster recovery setting:\\r\nRecovered = No\\r\nTransfer setting = Use file system default\\r\nHome directories: Off\\r\n  Mount point options:\\r\n  \\r\npba-hnas-1-1[EVS1]:$ \"\"\"\n\nNFS_SHARE_INFO = \"\"\"\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ evs-select 1\\r\npba-hnas-1-1[EVS1]:$ nfs-export list\\r\n\\r\n            Export name: /nfs1\\r\n            Export path: /\\r\n      File system label: fs1\\r\n       File system size: 18 GB\\r\n File system free space: 15.6 GB\\r\n      File system state: \\r\n               formatted = Yes\\r\n                 mounted = Yes\\r\n                  failed = No\\r\n        thin provisioned = No\\r\n       Access snapshots: Yes\\r\n      Display snapshots: Yes\\r\n           Read Caching: Disabled\\r\nDisaster recovery setting:\\r\nRecovered = No\\r\nTransfer setting = Use file system default\\r\n\\r\nExport configuration:\\r\n192.168.3.163\\r\n\\r\n\\r\n            Export name: /vol6\\r\n            Export path: /666\\r\n       File system info: *** not available *** \\r\n       Access snapshots: Yes\\r\n      Display snapshots: Yes\\r\n           Read Caching: Disabled\\r\nDisaster recovery setting:\\r\nRecovered = No\\r\nTransfer setting = Use file system default\\r\n\\r\nExport configuration:\\r\n\\r\n\\r\n\\r\n            Export name: /vol2\\r\n            Export path: /123\\r\n      File system label: fs1\\r\n       File system size: 18 GB\\r\n File system free space: 15.6 GB\\r\n      File system state: \\r\n               formatted = Yes\\r\n                 mounted = Yes\\r\n                  failed = No\\r\n        thin provisioned = No\\r\n       Access snapshots: Yes\\r\n      Display snapshots: Yes\\r\n           Read Caching: Disabled\\r\nDisaster recovery setting:\\r\nRecovered = No\\r\nTransfer setting = Use file system default\\r\n\\r\nExport configuration:\\r\n\\r\n\\r\npba-hnas-1-1[EVS1]:$ \"\"\"\n\nFS_DETAIL_INFO = \"\"\"\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ df -k\\r\n\\r\n  ID  Label  EVS         Size              Used  Snapshots \"\"\"\\\n                 + \"\"\" Deduped              Avail  Thin     FS Type  \\r\n----  -----  ---  -----------  ----------------  --------- \"\"\"\\\n                 + \"\"\"   -------  -----------------  ----  -----  \\r\n1024    fs1    1  18874368 KB  2520544 KB (13%)  0 KB (0%)     \"\"\"\\\n                 + \"\"\"    NA  16353824 KB (87%)    No  32 KB,WFS-2,128 DSBs \\r\n\\r\npba-hnas-1-1:$ \"\"\"\n\nQUOTA_INFO = \"\"\"\\r\n\\r\nHDS NAS OS Console\\r\nMAC ID : B7-6F-30-98-A6-57\\r\n\\r\npba-hnas-1-1:$ evs-select 1\\r\npba-hnas-1-1[EVS1]:$ quota list fs1\\r\nType            : Explicit\\r\nTarget          : Group: root\\r\nUsage           : 10 GB\\r\n  Limit         : 1 GB (Soft)\\r\n  Warning       : 75% (768 MB)\\r\n  Critical      : 85% (870.4 MB)\\r\n  Reset         : 5% (51.2 MB)\\r\nFile Count      : 7\\r\n  Limit         : 213 (Soft)\\r\n  Warning       : 75% (159)\\r\n  Critical      : 85% (181)\\r\n  Reset         : 5% (10)\\r\nGenerate Events : Disabled\\r\n\\r\nType            : Explicit\\r\nTarget          : User: root\\r\nUsage           : 10 GB\\r\n  Limit         : 1 GB (Soft)\\r\n  Warning       : 75% (768 MB)\\r\n  Critical      : 85% (870.4 MB)\\r\n  Reset         : 5% (51.2 MB)\\r\nFile Count      : 7\\r\n  Limit         : 213 (Soft)\\r\n  Warning       : 75% (159)\\r\n  Critical      : 85% (181)\\r\n  Reset         : 5% (10)\\r\nGenerate Events : Disabled\\r\n\\r\nType            : Explicit\\r\nTarget          : ViVol: vol2\\r\nUsage           : 0 B\\r\n  Limit         : 1 GB (Soft)\\r\n  Warning       : 75% (768 MB)\\r\n  Critical      : 85% (870.4 MB)\\r\n  Reset         : 5% (51.2 MB)\\r\nFile Count      : 1\\r\n  Limit         : 213 (Soft)\\r\n  Warning       : 75% (159)\\r\n  Critical      : 85% (181)\\r\n  Reset         : 5% (10)\\r\nGenerate Events : Disabled\\r\n\\r\npba-hnas-1-1[EVS1]:$\"\"\"\n"
  },
  {
    "path": "delfin/tests/unit/drivers/hitachi/hnas/test_hnas.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nfrom unittest import TestCase, mock\n\nimport paramiko\n\nfrom delfin.tests.unit.drivers.hitachi.hnas import constants\nfrom delfin import context\nfrom delfin.drivers.hitachi.hnas.hds_nas import HitachiHNasDriver\nfrom delfin.drivers.utils.ssh_client import SSHPool\n\n\nclass TestHitachiHNasDriver(TestCase):\n    SSHPool.get = mock.Mock({paramiko.SSHClient()})\n\n    SSHPool.do_exec_shell = mock.Mock(\n        side_effect=[constants.NODE_INFO])\n    hnas_client = HitachiHNasDriver(**constants.ACCESS_INFO)\n\n    @mock.patch.object(HitachiHNasDriver, 'reset_connection')\n    def test_reset_connection(self, reset_connection):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.NODE_INFO,\n                         constants.NODE_INFO])\n        kwargs = constants.ACCESS_INFO\n        hnas_client = HitachiHNasDriver(**kwargs)\n        hnas_client.reset_connection(context, **kwargs)\n        self.assertEqual(reset_connection.call_count, 1)\n        self.assertEqual(hnas_client.nas_handler.ssh_pool.ssh_host,\n                         \"192.168.3.211\")\n        self.assertEqual(hnas_client.nas_handler.ssh_pool.ssh_port, 22)\n\n    def test_get_storage(self):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.STORAGE_INFO,\n                         constants.VERSION_INFO,\n                         constants.LOCATION_INFO,\n                         constants.DISK_INFO,\n                         constants.POOL_INFO,\n                         constants.POOL_DETAIL_INFO])\n        data = self.hnas_client.get_storage(context)\n        self.assertEqual(data['vendor'], 'Hitachi')\n\n    def test_list_storage_pools(self):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.POOL_INFO,\n                         constants.POOL_DETAIL_INFO])\n        data = self.hnas_client.list_storage_pools(context)\n        self.assertEqual(data[0]['name'], 'span1')\n\n    def test_list_alerts(self):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.ALERT_INFO])\n        data = self.hnas_client.list_alerts(context)\n        self.assertEqual(data[0]['alert_name'],\n                         '8208')\n\n    def test_parse_alert(self):\n        data = self.hnas_client.parse_alert(context, constants.TRAP_INFO)\n        self.assertEqual(data['alert_name'], '8462')\n\n    def test_list_controllers(self):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.NODE_INFO])\n        data = self.hnas_client.list_controllers(context)\n        self.assertEqual(data[0]['name'], 'pba-hnas-1-1')\n\n    def test_list_ports(self):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.FC_PORT_INFO,\n                         constants.FC_PORT_STATUS,\n                         constants.ETH_PORT_INFO])\n        data = self.hnas_client.list_ports(context)\n        self.assertEqual(data[0]['name'], 'FC1')\n\n    def test_list_disks(self):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.DISK_INFO])\n        data = self.hnas_client.list_disks(context)\n        self.assertEqual(data[0]['name'], '1000')\n\n    def test_list_qtrees(self):\n        SSHPool.do_exec_shell = mock.Mock(side_effect=[\n            constants.FS_INFO, constants.QTREE_INFO])\n        data = self.hnas_client.list_qtrees(context)\n        self.assertEqual(data[0]['name'], 'tree1')\n\n    def test_list_shares(self):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.FS_INFO,\n                         constants.CIFS_SHARE_INFO,\n                         constants.NFS_SHARE_INFO,\n                         constants.QTREE_INFO])\n        data = self.hnas_client.list_shares(context)\n        self.assertEqual(data[0]['name'], 'tree1')\n\n    def test_list_filesystems(self):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.FS_DETAIL_INFO,\n                         constants.FS_INFO])\n        data = self.hnas_client.list_filesystems(context)\n        self.assertEqual(data[0]['name'], 'fs1')\n\n    def test_list_quotas(self):\n        SSHPool.do_exec_shell = mock.Mock(\n            side_effect=[constants.FS_INFO,\n                         constants.QUOTA_INFO])\n        data = self.hnas_client.list_quotas(context)\n        self.assertEqual(data[0]['file_soft_limit'], '213')\n"
  },
  {
    "path": "delfin/tests/unit/drivers/hitachi/vsp/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom unittest import TestCase, mock\nsys.modules['delfin.cryptor'] = mock.Mock()\n\nfrom requests import Session\n\nfrom delfin import context\nfrom delfin.drivers.hitachi.vsp.rest_handler import RestHandler\nfrom delfin.drivers.hitachi.vsp.vsp_stor import HitachiVspDriver\n\n\nclass Request:\n    def __init__(self):\n        self.environ = {'delfin.context': context.RequestContext()}\n        pass\n\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"rest\": {\n        \"host\": \"51.10.192.90\",\n        \"port\": \"8443\",\n        \"username\": \"username\",\n        \"password\": \"cGFzc3dvcmQ=\"\n    },\n    \"ssh\": {\n        \"host\": \"110.143.132.231\",\n        \"port\": \"22\",\n        \"username\": \"username\",\n        \"password\": \"password\",\n        \"host_key\": \"weqewrerwerwerwe\"\n    },\n    \"vendor\": \"hitachi\",\n    \"model\": \"vsp\",\n    \"extra_attributes\": {\n        \"array_id\": \"00112233\"\n    }\n}\nGET_DEVICE_ID = {\n    \"data\": [\n        {\n            \"storageDeviceId\": \"800000011633\",\n            \"model\": \"VSP F1500\",\n            \"serialNumber\": 11633,\n            \"svpIp\": \"51.10.192.90\",\n        }\n    ]\n}\nGET_ALL_POOLS = {\n    \"data\": [\n        {\n            \"poolId\": 0,\n            \"poolStatus\": \"POLN\",\n            \"usedCapacityRate\": 56,\n            \"snapshotCount\": 0,\n            \"poolName\": \"p3-1\",\n            \"availableVolumeCapacity\": 7796586,\n            \"totalPoolCapacity\": 17821524,\n            \"numOfLdevs\": 8,\n            \"firstLdevId\": 4,\n            \"warningThreshold\": 70,\n            \"depletionThreshold\": 80,\n            \"virtualVolumeCapacityRate\": -1,\n            \"isMainframe\": False,\n            \"isShrinking\": False,\n            \"locatedVolumeCount\": 65,\n            \"totalLocatedCapacity\": 15694896,\n            \"blockingMode\": \"NB\",\n            \"totalReservedCapacity\": 0,\n            \"reservedVolumeCount\": 0,\n            \"poolType\": \"HDP\",\n            \"duplicationNumber\": 0,\n            \"dataReductionAccelerateCompCapacity\": 0,\n            \"dataReductionCapacity\": 0,\n            \"dataReductionBeforeCapacity\": 0,\n            \"dataReductionAccelerateCompRate\": 0,\n            \"duplicationRate\": 0,\n            \"compressionRate\": 0,\n            \"dataReductionRate\": 0,\n            \"snapshotUsedCapacity\": 0,\n            \"suspendSnapshot\": True\n        }\n    ]\n}\nGET_SPECIFIC_STORAGE = {\n    \"storageDeviceId\": \"800000011633\",\n    \"model\": \"VSP G350\",\n    \"serialNumber\": 11633,\n    \"svpIp\": \"51.10.192.90\",\n    \"rmiPort\": 1099,\n    \"dkcMicroVersion\": \"80-06-70/00\",\n    \"communicationModes\": [\n        {\n            \"communicationMode\": \"lanConnectionMode\"\n        }\n    ],\n    \"isSecure\": False\n}\nGET_ALL_VOLUMES = {\n    \"data\": [\n        {\n            \"ldevId\": 0,\n            \"clprId\": 0,\n            \"emulationType\": \"OPEN-V\",\n            \"byteFormatCapacity\": \"2.57 T\",\n            \"blockCapacity\": 5538459648,\n            \"composingPoolId\": 1,\n            \"attributes\": [\n                \"POOL\"\n            ],\n            \"raidLevel\": \"RAID5\",\n            \"raidType\": \"3D+1P\",\n            \"numOfParityGroups\": 1,\n            \"parityGroupIds\": [\n                \"5-1\"\n            ],\n            \"driveType\": \"SLB5E-M1R9SS\",\n            \"driveByteFormatCapacity\": \"1.74 T\",\n            \"driveBlockCapacity\": 3750000030,\n            \"status\": \"NML\",\n            \"mpBladeId\": 1,\n            \"ssid\": \"0004\",\n            \"resourceGroupId\": 0,\n            \"isAluaEnabled\": False\n        }\n    ]\n}\nGET_ALL_DISKS = {\n    \"data\": [\n        {\n            \"driveLocationId\": \"0-0\",\n            \"driveTypeName\": \"SSD(FMC)\",\n            \"driveSpeed\": 10000,\n            \"totalCapacity\": 600,\n            \"driveType\": \"DKR5D-J600SS\",\n            \"usageType\": \"DATA\",\n            \"status\": \"NML\",\n            \"parityGroupId\": \"1-6\",\n            \"serialNumber\": \"123456789012345678901\"\n        }, {\n            \"driveLocationId\": \"0-1\",\n            \"driveTypeName\": \"SAS\",\n            \"driveSpeed\": 10000,\n            \"totalCapacity\": 600,\n            \"driveType\": \"DKR5D-J600SS\",\n            \"usageType\": \"DATA\",\n            \"status\": \"NML\",\n            \"parityGroupId\": \"1-6\",\n            \"serialNumber\": \"123456789012345678902\"\n        }, {\n            \"driveLocationId\": \"0-2\",\n            \"driveTypeName\": \"SAS\",\n            \"driveSpeed\": 10000,\n            \"totalCapacity\": 600,\n            \"driveType\": \"DKR5D-J600SS\",\n            \"usageType\": \"DATA\",\n            \"status\": \"NML\",\n            \"parityGroupId\": \"1-6\",\n            \"serialNumber\": \"123456789012345678903\"\n        }, {\n            \"driveLocationId\": \"0-3\",\n            \"driveTypeName\": \"SAS\",\n            \"driveSpeed\": 10000,\n            \"totalCapacity\": 600,\n            \"driveType\": \"DKR5D-J600SS\",\n            \"usageType\": \"DATA\",\n            \"status\": \"NML\",\n            \"parityGroupId\": \"1-6\",\n            \"serialNumber\": \"123456789012345678904\"\n        }\n    ]\n}\nGET_ALL_CONTROLLERS = {\n    \"system\": {\n        \"powerConsumption\": 283\n    },\n    \"ctls\": [\n        {\n            \"location\": \"CTL1\",\n            \"status\": \"Normal\",\n            \"temperature\": 29,\n            \"temperatureStatus\": \"Normal\",\n            \"type\": \"Controller Board\"\n        }, {\n            \"location\": \"CTL2\",\n            \"status\": \"Normal\",\n            \"temperature\": 29,\n            \"temperatureStatus\": \"Normal\",\n            \"charge\": 100,\n            \"type\": \"Controller Board\"\n        }\n    ]\n}\nTRAP_INFO = {\n    \"1.3.6.1.2.1.1.3.0\": \"0\",\n    '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.116.3.11.4.1.1.0.1',\n    '1.3.6.1.4.1.116.5.11.4.2.3': 'eeeeeeeee',\n    '1.3.6.1.4.1.116.5.11.4.2.7': 'ddddddd',\n    '1.3.6.1.4.1.116.5.11.4.2.6': '14:10:10',\n    '1.3.6.1.4.1.116.5.11.4.2.5': '2020/11/20',\n    '1.3.6.1.4.1.116.5.11.4.2.2': ' System Version = 7.4.0.11 ',\n    '1.3.6.1.4.1.116.5.11.4.2.4': '# FRU = None '\n}\nALERT_INFO = [\n    {\n        'location': \"test\",\n        'alertId': '223232',\n        'alertIndex': '1111111',\n        'errorDetail': 'test alert',\n        'errorSection': 'someting wrong',\n        'occurenceTime': '2020-11-20T10:10:10',\n        'errorLevel': 'Serious'\n    }\n]\n\nstorage_result = {\n    'name': 'VSP F1500_51.10.192.90',\n    'vendor': 'Hitachi',\n    'description': 'Hitachi VSP Storage',\n    'model': 'VSP F1500',\n    'status': 'normal',\n    'serial_number': '11633',\n    'firmware_version': '80-06-70/00',\n    'location': '',\n    'raw_capacity': 18687222349824,\n    'total_capacity': 18687222349824,\n    'used_capacity': 10511909388288,\n    'free_capacity': 8175312961536\n}\n\nvolume_result = [\n    {\n        'name': '00:00:00',\n        'storage_id': '12345',\n        'description': 'Hitachi VSP volume',\n        'status': 'normal',\n        'native_volume_id': '00:00:00',\n        'native_storage_pool_id': None,\n        'type': 'thick',\n        'total_capacity': 2835691339776,\n        'used_capacity': 2835691339776,\n        'free_capacity': 0,\n        'compressed': False,\n        'deduplicated': False,\n    }\n]\n\npool_result = [\n    {\n        'name': 'p3-1',\n        'storage_id': '12345',\n        'native_storage_pool_id': '0',\n        'description': 'Hitachi VSP Pool',\n        'status': 'normal',\n        'storage_type': 'block',\n        'total_capacity': 18687222349824,\n        'used_capacity': 10511909388288,\n        'free_capacity': 8175312961536,\n    }\n]\n\nalert_result = [\n    {\n        'location': 'test',\n        'alert_id': '223232',\n        'sequence_number': '1111111',\n        'description': 'test alert',\n        'alert_name': 'someting wrong',\n        'resource_type': 'Storage',\n        'occur_time': 1605838210000,\n        'category': 'Fault',\n        'type': 'EquipmentAlarm',\n        'severity': 'Major',\n    }\n]\n\ntrap_alert_result = {\n    'alert_id': 'eeeeeeeee',\n    'alert_name': 'ddddddd',\n    'severity': 'Critical',\n    'category': 'Fault',\n    'type': 'EquipmentAlarm',\n    'occur_time': 1605852610000,\n    'description': 'ddddddd',\n    'resource_type': 'Storage',\n    'location': ' System Version = 7.4.0.11 ',\n    'match_key': '338d811d532553557ca33be45b6bde55'\n}\ncontroller_result = [\n    {\n        'name': 'CTL1',\n        'storage_id': '12345',\n        'native_controller_id': 'CTL1',\n        'status': 'normal',\n        'location': 'CTL1'\n    },\n    {\n        'name': 'CTL2',\n        'storage_id': '12345',\n        'native_controller_id': 'CTL2',\n        'status': 'normal',\n        'location': 'CTL2'\n    }\n]\ndisk_result = [\n    {\n        'name': '0-0',\n        'storage_id': '12345',\n        'native_disk_id': '0-0',\n        'serial_number': '123456789012345678901',\n        'speed': 10000,\n        'capacity': 644245094400,\n        'status': 'normal',\n        'physical_type': 'ssd',\n        'logical_type': 'member',\n        'native_disk_group_id': '1-6',\n        'location': '0-0'\n    }, {\n        'name': '0-1',\n        'storage_id': '12345',\n        'native_disk_id': '0-1',\n        'serial_number': '123456789012345678902',\n        'speed': 10000,\n        'capacity': 644245094400,\n        'status': 'normal',\n        'physical_type': 'sas',\n        'logical_type': 'member',\n        'native_disk_group_id': '1-6',\n        'location': '0-1'\n    }, {\n        'name': '0-2',\n        'storage_id': '12345',\n        'native_disk_id': '0-2',\n        'serial_number': '123456789012345678903',\n        'speed': 10000,\n        'capacity': 644245094400,\n        'status': 'normal',\n        'physical_type': 'sas',\n        'logical_type': 'member',\n        'native_disk_group_id': '1-6',\n        'location': '0-2'\n    }, {\n        'name': '0-3',\n        'storage_id': '12345',\n        'native_disk_id': '0-3',\n        'serial_number': '123456789012345678904',\n        'speed': 10000,\n        'capacity': 644245094400,\n        'status': 'normal',\n        'physical_type': 'sas',\n        'logical_type': 'member',\n        'native_disk_group_id': '1-6',\n        'location': '0-3'\n    }\n]\nGET_ALL_PORTS = {\n    'data': [\n        {\n            'portId': 'CL1-A',\n            'portType': 'FIBRE',\n            'portSpeed': 'AUT',\n            'loopId': 'EF',\n            'fabricMode': True,\n            'portConnection': 'PtoP',\n            'lunSecuritySetting': True,\n            'wwn': '50060e80124e3b00'\n        },\n        {\n            'portId': 'CL1-B',\n            'portType': 'ISCSI',\n            'portSpeed': '10G',\n            'loopId': '00',\n            'fabricMode': False,\n            'lunSecuritySetting': True\n        }]\n}\nGET_DETAIL_PORT = {\n    'portId': 'CL1-B',\n    'portType': 'ISCSI',\n    'portSpeed': '10G',\n    'loopId': '00',\n    'fabricMode': False,\n    'lunSecuritySetting': True,\n    'tcpMtu': 1500,\n    'iscsiWindowSize': '64KB',\n    'keepAliveTimer': 60,\n    'tcpPort': '3260',\n    'ipv4Address': '192.168.116.19',\n    'ipv4Subnetmask': '255.255.0.0',\n    'ipv4GatewayAddress': '0.0.0.0',\n    'ipv6LinkLocalAddress': {\n        'status': 'INV',\n        'addressingMode': 'AM',\n        'address': 'fe80::'\n    },\n    'ipv6GlobalAddress': {\n        'status': 'INV',\n        'addressingMode': 'AM',\n        'address': '::'\n    },\n    'ipv6GatewayGlobalAddress': {\n        'status': 'INV',\n        'address': '::',\n        'currentAddress': '::'\n    }\n}\nport_result = [\n    {\n        'name': 'CL1-A',\n        'storage_id': '12345',\n        'native_port_id': 'CL1-A',\n        'location': 'CL1-A',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'fc',\n        'logical_type': '',\n        'max_speed': 8589934592,\n        'mac_address': None,\n        'wwn': '50060E80124E3B00',\n        'ipv4': None,\n        'ipv4_mask': None,\n        'ipv6': None\n    },\n    {\n        'name': 'CL1-B',\n        'storage_id': '12345',\n        'native_port_id': 'CL1-B',\n        'location': 'CL1-B',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'eth',\n        'logical_type': '',\n        'max_speed': 10737418240,\n        'mac_address': None,\n        'wwn': None,\n        'ipv4': '192.168.116.19',\n        'ipv4_mask': '255.255.0.0',\n        'ipv6': None\n    }]\nGET_ALL_GROUPS = {\n    \"data\": [\n        {\n            \"hostGroupId\": \"CL1-A,0\",\n            \"portId\": \"CL1-A\",\n            \"hostGroupNumber\": 0,\n            \"hostGroupName\": \"1A-G00\",\n            \"hostMode\": \"LINUX/IRIX\"\n        }\n    ]\n}\nGET_SINGLE_WWN_GROUP = {\n    \"data\": [\n        {\n            \"hostGroupId\": \"CL1-A,0\",\n            \"portId\": \"CL1-A\",\n            \"hostGroupNumber\": 0,\n            \"hostGroupName\": \"1A-G00\",\n            \"hostMode\": \"LINUX/IRIX\"\n        }\n    ]\n}\nGET_SINGLE_ISCSI_GROUP = {\n    \"data\": [\n        {\n            \"hostGroupId\": \"CL1-A,0\",\n            \"portId\": \"CL1-A\",\n            \"hostGroupNumber\": 0,\n            \"hostGroupName\": \"1A-G00\",\n            \"hostMode\": \"LINUX/IRIX\",\n            \"iscsiName\": \"iqn.ewdhehdhdhh\"\n        }\n    ]\n}\nGET_HOST_WWN = {\n    \"data\": [\n        {\n            \"hostWwnId\": \"CL1-A,0,21000024ff8f5296\",\n            \"portId\": \"CL1-A\",\n            \"hostGroupNumber\": 0,\n            \"hostGroupName\": \"1A-G00\",\n            \"hostWwn\": \"21000024ff8f5296\",\n            \"wwnNickname\": \"-\"\n        }\n    ]\n}\nGET_HOST_ISCSI = {\n    \"data\": [\n        {\n            \"hostIscsiId\": \"CL1-A,0,iqn.ewdhehdhdhh\",\n            \"portId\": \"CL1-A\",\n            \"hostGroupNumber\": 0,\n            \"hostGroupName\": \"3C-G00\",\n            \"iscsiName\": \"iqn.ewdhehdhdhh\",\n            \"iscsiNickname\": \"test_tjy\"\n        }\n    ]\n}\nGET_LUN_PATH = {\n    \"data\": [\n        {\n            \"lunId\": \"CL1-A,1,1\",\n            \"portId\": \"CL1-A\",\n            \"hostGroupNumber\": 0,\n            \"hostMode\": \"LINUX/IRIX\",\n            \"lun\": 1,\n            \"ldevId\": 1\n        }\n    ]\n}\ninitator_result = [\n    {\n        'name': '21000024ff8f5296',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': '21000024ff8f5296',\n        'wwn': '21000024ff8f5296',\n        'status': 'online',\n        'type': 'fc',\n        'alias': 'CL1-A',\n        'native_storage_host_id': 'CL1-A_0_21000024ff8f5296'\n    }\n]\nhost_result = [\n    {\n        'name': 'test_tjy',\n        'storage_id': '12345',\n        'native_storage_host_id': 'CL1-A_0_iqn.ewdhehdhdhh',\n        'os_type': 'Linux',\n        'status': 'normal'\n    }\n]\nview_result = [\n    {\n        'name': 'CL1-A,1,1',\n        'native_storage_host_group_id': 'CL1-A_0',\n        'storage_id': '12345',\n        'native_volume_id': '00:00:01',\n        'native_masking_view_id': 'CL1-A_1_1'\n    }\n]\ngroups_result = {\n    'storage_host_groups': [\n        {\n            'name': '1A-G00',\n            'storage_id': '12345',\n            'native_storage_host_group_id': 'CL1-A_0',\n            'storage_hosts': 'CL1-A_0_iqn.ewdhehdhdhh'\n        }\n    ],\n    'storage_host_grp_host_rels': [\n        {\n            'storage_id': '12345',\n            'native_storage_host_group_id': 'CL1-A_0',\n            'native_storage_host_id': 'CL1-A_0_iqn.ewdhehdhdhh'\n        }\n    ]\n}\n\n\ndef create_driver():\n    kwargs = ACCESS_INFO\n\n    RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID)\n\n    m = mock.MagicMock(status_code=200)\n    with mock.patch.object(Session, 'post', return_value=m):\n        m.raise_for_status.return_value = 201\n        m.json.return_value = {\n            \"token\": \"97c13b8082444b36bc2103026205fa64\",\n            \"sessionId\": 9\n        }\n        return HitachiVspDriver(**kwargs)\n\n\nclass TestHitachiVspStorStorageDriver(TestCase):\n    driver = create_driver()\n\n    def test_initrest(self):\n        m = mock.MagicMock(status_code=200)\n        with mock.patch.object(Session, 'get', return_value=m):\n            m.raise_for_status.return_value = 200\n            m.json.return_value = GET_DEVICE_ID\n            kwargs = ACCESS_INFO\n            rh = RestHandler(**kwargs)\n            rh.get_device_id()\n\n    def test_get_storage(self):\n        RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID)\n        RestHandler.get_rest_info = mock.Mock(\n            side_effect=[GET_ALL_POOLS, GET_SPECIFIC_STORAGE])\n        storage = self.driver.get_storage(context)\n        self.assertDictEqual(storage, storage_result)\n\n    def test_list_storage_pools(self):\n        RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_POOLS)\n        pool = self.driver.list_storage_pools(context)\n        self.assertDictEqual(pool[0], pool_result[0])\n\n    def test_list_volumes(self):\n        RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_VOLUMES)\n        volume = self.driver.list_volumes(context)\n        self.assertDictEqual(volume[0], volume_result[0])\n\n    def test_list_alerts(self):\n        with self.assertRaises(Exception) as exc:\n            RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO)\n            RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO)\n            RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO)\n            self.driver.list_alerts(context)\n        self.assertEqual('list_alerts is not supported in model VSP F1500',\n                         str(exc.exception))\n\n    def test_parse_queried_alerts(self):\n        alert_list = []\n        HitachiVspDriver.parse_queried_alerts(ALERT_INFO, alert_list)\n        self.assertEqual(alert_list[0].get('alert_id'),\n                         alert_result[0].get('alert_id'))\n\n    def test_parse_alert(self):\n        trap_alert = self.driver.parse_alert(context, TRAP_INFO)\n        trap_alert_result['occur_time'] = trap_alert['occur_time']\n        self.assertEqual(trap_alert, trap_alert_result)\n\n    @mock.patch.object(RestHandler, 'call_with_token')\n    def test_get_token(self, mock_token):\n        with self.assertRaises(Exception) as exc:\n            mock_token.return_value = mock.MagicMock(\n                status_code=403, text='KART30005-E')\n            self.driver.rest_handler.get_token()\n        self.assertEqual('Exception from Storage Backend: KART30005-E.',\n                         str(exc.exception))\n\n    @mock.patch.object(RestHandler, 'get_controllers')\n    def test_list_controllers(self, mock_controller):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_controller.return_value = GET_ALL_CONTROLLERS\n        controller = HitachiVspDriver(**ACCESS_INFO).list_controllers(context)\n        self.assertEqual(controller, controller_result)\n\n    @mock.patch.object(RestHandler, 'get_disks')\n    def test_list_disks(self, mock_disk):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_disk.return_value = GET_ALL_DISKS\n        disk = HitachiVspDriver(**ACCESS_INFO).list_disks(context)\n        self.assertEqual(disk, disk_result)\n\n    @mock.patch.object(RestHandler, 'get_all_ports')\n    @mock.patch.object(RestHandler, 'get_detail_ports')\n    def test_list_ports(self, mock_detail, mock_all):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_all.return_value = GET_ALL_PORTS\n        mock_detail.return_value = GET_DETAIL_PORT\n        port = HitachiVspDriver(**ACCESS_INFO).list_ports(context)\n        self.assertEqual(port, port_result)\n\n    @mock.patch.object(RestHandler, 'get_specific_host_group')\n    @mock.patch.object(RestHandler, 'get_all_host_groups')\n    @mock.patch.object(RestHandler, 'get_host_wwn')\n    def test_host_initiators(self, mock_wwn, mock_groups, mock_group):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_groups.return_value = GET_ALL_GROUPS\n        mock_group.return_value = GET_SINGLE_WWN_GROUP\n        mock_wwn.return_value = GET_HOST_WWN\n        initiators = HitachiVspDriver(\n            **ACCESS_INFO).list_storage_host_initiators(context)\n        self.assertEqual(initiators, initator_result)\n\n    @mock.patch.object(RestHandler, 'get_specific_host_group')\n    @mock.patch.object(RestHandler, 'get_all_host_groups')\n    @mock.patch.object(RestHandler, 'get_iscsi_name')\n    def test_hosts(self, mock_iscsi, mock_groups, mock_group):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_groups.return_value = GET_ALL_GROUPS\n        mock_group.return_value = GET_SINGLE_ISCSI_GROUP\n        mock_iscsi.return_value = GET_HOST_ISCSI\n        hosts = HitachiVspDriver(**ACCESS_INFO).list_storage_hosts(context)\n        self.assertEqual(hosts, host_result)\n\n    @mock.patch.object(RestHandler, 'get_all_host_groups')\n    @mock.patch.object(RestHandler, 'get_lun_path')\n    def test_masking_views(self, mock_view, mock_groups):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_groups.return_value = GET_ALL_GROUPS\n        mock_view.return_value = GET_LUN_PATH\n        views = HitachiVspDriver(**ACCESS_INFO).list_masking_views(context)\n        self.assertEqual(views, view_result)\n\n    @mock.patch.object(RestHandler, 'get_specific_host_group')\n    @mock.patch.object(RestHandler, 'get_all_host_groups')\n    @mock.patch.object(RestHandler, 'get_iscsi_name')\n    def test_host_groups(self, mock_iscsi, mock_groups, mock_group):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_groups.return_value = GET_ALL_GROUPS\n        mock_group.return_value = GET_SINGLE_ISCSI_GROUP\n        mock_iscsi.return_value = GET_HOST_ISCSI\n        groups = \\\n            HitachiVspDriver(**ACCESS_INFO).list_storage_host_groups(context)\n        self.assertEqual(groups, groups_result)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/hpe/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/hpe/hpe_3par/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom unittest import TestCase, mock\n\nimport paramiko\n\nfrom delfin.common import constants\n\nsys.modules['delfin.cryptor'] = mock.Mock()\nfrom delfin import exception\nfrom delfin import context\nfrom delfin.drivers.hpe.hpe_3par.hpe_3parstor import Hpe3parStorDriver\nfrom delfin.drivers.hpe.hpe_3par.alert_handler import AlertHandler\nfrom delfin.drivers.hpe.hpe_3par.rest_handler import RestHandler\nfrom delfin.drivers.hpe.hpe_3par.ssh_handler import SSHHandler\nfrom delfin.drivers.utils.rest_client import RestClient\nfrom delfin.drivers.utils.ssh_client import SSHPool\n\nfrom requests import Session\n\n\nclass Request:\n    def __init__(self):\n        self.environ = {'delfin.context': context.RequestContext()}\n        pass\n\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"hpe\",\n    \"model\": \"3par\",\n    \"rest\": {\n        \"host\": \"10.0.0.1\",\n        \"port\": 8443,\n        \"username\": \"user\",\n        \"password\": \"cGFzc3dvcmQ=\"\n    },\n    \"ssh\": {\n        \"host\": \"110.143.132.231\",\n        \"port\": 22,\n        \"username\": \"user\",\n        \"password\": \"cGFzc3dvcmQ=\"\n    }\n}\n\nNODE_DATAS = \"\"\"\n                                  Control    Data        Cache\nNode --Name--- -State-- Master IC SLED LED Mem(MB) Mem(MB) Available(%)\n   0 1307327-0 Degraded Yes Yes unknown AmberBlnk    4096    6144            0\n   1 1307327-1 Degraded No  Yes unknown AmberBlnk    4096    6144            0\n\"\"\"\nNODE_CPU_DATAS = \"\"\"\n----------------------------CPUs----------------------------\nNode CPU -Manufacturer- -Serial- CPUSpeed(MHz) BusSpeed(MHz)\n   0   0 GenuineIntel   --                2327       1334.57\n   0   1 GenuineIntel   --                2327       1334.57\n   0   2 GenuineIntel   --                2327       1334.57\n   0   3 GenuineIntel   --                2327       1334.57\n   1   0 GenuineIntel   --                2327       1332.19\n   1   1 GenuineIntel   --                2327       1332.19\n   1   2 GenuineIntel   --                2327       1332.19\n   1   3 GenuineIntel   --                2327       1332.19\n\"\"\"\nNODE_VERSION = \"\"\"\nNode: 0\n--------\nSystem serial: 1000183\nBIOS version: 4.8.34\nOS version: 3.2.2.204\nReset reason: Unknown\n\nNode: 1\n--------\nBIOS version: 4.8.34\nOS version: 3.2.2.204\nReset reason: Unknown\n\"\"\"\nDISK_DATAS = \"\"\"\n                           ---Size(MB)--- ----Ports----\nId CagePos Type RPM State      Total   Free A      B      Cap(GB)\n 0 0:14:0  FC    15 degraded  571904  83968 0:2:2* -----      600\n 1 0:1:0   FC    15 degraded  571904  62720 0:2:2* -----      600\n-----------------------------------------------------------------\n16 total                     9150464 912896\n\n\"\"\"\nDISK_I_DATAS = \"\"\"\nId CagePos State Node_WWN MFR Model Serial FW_Rev Protocol MediaType AdminTime\n0 0:14:0  degraded WWN11  MFR111 Model11  Serial111 FW_Rev111 Pl  MT1 600\n1 0:1:0   degraded WWN22  MFR2222  Model22  Serial222 FW_Rev222 P2   MT2 600\n\n\"\"\"\nPORT_DATAS = \"\"\"\nN:S:P Mode State -Node_WWN- -Port_WWN/HW_Addr- Type Protocol Label Ptner FState\n0:0:1 target ready 2FF70002AC001C9F 20010002AC001C9F host FC - 1:0:1 none\n0:0:2 target loss_sync 2FF70002AC001C9F 20020002AC001C9F  free FC - - -\n0:2:2 target loss_sync 2FF70002AC001C9F 20020002AC001C9F  free FC - - -\n0:6:1 target loss_sync 2FF70002AC001C9F 20020002AC001C9F  free FC - - -\n--------------------------------------------------------------------------\n   18\n\"\"\"\nPORT_I_DATAS = \"\"\"\nN:S:P Brand Model Rev Firmware Serial HWType\n0:0:1 LSI 9205-8e 01 17.11.00.00 SP12430085 SAS\n0:0:2 LSI 9205-8e 01 17.11.00.00 SP12430085 FC\n0:1:1 QLOGIC QLE2672 02 8.1.1 RFE1228G50820 FC\n0:1:2 QLOGIC QLE2672 02 8.1.1 RFE1228G50820 FC\n0:2:1 QLOGIC QLE8242 58 4.15.2 PCGLTX0RC1G3PX CNA\n\"\"\"\nPORT_PER_DATAS = \"\"\"\nN:S:P Connmode ConnType CfgRate MaxRate Class2 UniqNodeWwn VCN Il TMWO SSAN\n0:0:1 disk point 6Gbps 6Gbps n/a n/a n/a enabled n/a n/a\n0:0:2 disk point 6Gbps 6Gbps n/a n/a n/a enabled n/a n/a\n0:1:1 host point auto 16Gbps disabled disabled disabled enabled disabled n/a\n0:1:2 host point auto 16Gbps disabled disabled disabled enabled disabled n/a\n\"\"\"\nPORT_ISCSI_DATAS = \"\"\"\nN:S:P State IPAddr Netmask/PrefixLen Gateway TPGT MTU Rate iAddr iPort ST VLAN\n0:2:1 ready 1df9:7b7b:790::21 64 :: 21 1500 10Gbps :: 3205 21 Y\n0:2:2 ready 10.99.1.3 255.255.255.0 0.0.0.0 22 1500 10Gbps 0.0.0.0 3205 22 Y\n\"\"\"\nPORT_RCIP_DATAS = \"\"\"\nN:S:P State ---HwAddr--- IPAddr Netmask Gateway  MTU Rate Duplex AutoNeg\n0:6:1 loss_sync 0002AC684AAD 10.11.35.10 255.255.0.0 10.11.0.1 900 n/a n/a n/a\n1:6:1   offline 0002AC6A3A0F              -           - - -  n/a n/a n/a\n-----------------------------------------------------------------------------\n    2\n\"\"\"\nPORT_C_DATAS = \"\"\"\nN:S:P      Mode Device Pos Config Topology   Rate Cls Mode_change\n0:0:1    target RedHat_196   0  valid fabric  8Gbps   3     allowed\n                 RedHat_196   0  valid fabric  8Gbps   3     allowe\n0:0:2    target Dorado5000V3_F1   0  valid fabric  8Gbps   3     allowed\n                Dorado5000V3_F1   0  valid fabric  8Gbps   3     allowed\n--------------------------------------------------------------------------\n  108\n\"\"\"\n\nPOOL_DATAS = ret = {\n    \"total\": 12,\n    \"members\": [\n        {\n            \"id\": 0,\n            \"uuid\": \"aa43f218-d3dd-4626-948f-8a160b0eac1d\",\n            \"name\": \"Lcltest333\",\n            \"numFPVVs\": 21,\n            \"numTPVVs\": 25,\n            \"UsrUsage\": {\n                \"totalMiB\": 1381504,\n                \"rawTotalMiB\": 1842004,\n                \"usedMiB\": 1376128,\n                \"rawUsedMiB\": 712703\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 140800,\n                \"rawTotalMiB\": 422400,\n                \"usedMiB\": 5120,\n                \"rawUsedMiB\": 15360\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 388736,\n                \"rawTotalMiB\": 518315,\n                \"usedMiB\": 0,\n                \"rawUsedMiB\": 0\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 4,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 1,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"cpg_Migration1\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 2,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"cpg_Oracle\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 3,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"cpg_filesystem\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 4,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"cpg_test\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 5,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"fs_cpg\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 6,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"ljn2\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 7,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"ljn4_xiuGai\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 8,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"ljn_330\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 9,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"xulin_cpg1\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 10,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"zyz\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        },\n        {\n            \"id\": 11,\n            \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n            \"name\": \"22\",\n            \"numFPVVs\": 14,\n            \"numTPVVs\": 319,\n            \"UsrUsage\": {\n                \"totalMiB\": 1418752,\n                \"rawTotalMiB\": 1702500,\n                \"usedMiB\": 1417984,\n                \"rawUsedMiB\": 568934\n            },\n            \"SAUsage\": {\n                \"totalMiB\": 56832,\n                \"rawTotalMiB\": 170496,\n                \"usedMiB\": 42752,\n                \"rawUsedMiB\": 128256\n            },\n            \"SDUsage\": {\n                \"totalMiB\": 187648,\n                \"rawTotalMiB\": 225179,\n                \"usedMiB\": 157184,\n                \"rawUsedMiB\": 188620\n            },\n            \"SAGrowth\": {\n                \"incrementMiB\": 8192,\n                \"LDLayout\": {\n                    \"HA\": 3,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"SDGrowth\": {\n                \"incrementMiB\": 32768,\n                \"LDLayout\": {\n                    \"RAIDType\": 3,\n                    \"HA\": 3,\n                    \"setSize\": 6,\n                    \"chunkletPosPref\": 1,\n                    \"diskPatterns\": [\n                        {\n                            \"diskType\": 1\n                        }\n                    ]\n                }\n            },\n            \"state\": 1,\n            \"failedStates\": [],\n            \"degradedStates\": [],\n            \"additionalStates\": []\n        }\n    ]\n}\nPOOL_METRICS_DATAS = {\n    \"sampleTime\": \"2020-03-01T03:50:00+08:00\",\n    \"sampleTimeSec\": 1583005800,\n    \"total\": 2,\n    \"members\": [\n        {\n            \"name\": \"22\",\n            \"IO\": {\n                \"read\": 0,\n                \"write\": 0,\n                \"total\": 10\n            },\n            \"KBytes\": {\n                \"read\": 0,\n                \"write\": 0,\n                \"total\": 0\n            },\n            \"serviceTimeMS\": {\n                \"read\": 0,\n                \"write\": 0,\n                \"total\": 0\n            },\n            \"IOSizeKB\": {\n                \"read\": 0,\n                \"write\": 0,\n                \"total\": 0\n            },\n            \"queueLength\": 0,\n            \"busyPct\": 0\n        },\n        {\n            \"name\": \"Lcltest333\",\n            \"IO\": {\n                \"read\": 0,\n                \"write\": 0,\n                \"total\": 20\n            },\n            \"KBytes\": {\n                \"read\": 0,\n                \"write\": 0,\n                \"total\": 0\n            },\n            \"serviceTimeMS\": {\n                \"read\": 0,\n                \"write\": 0,\n                \"total\": 0\n            },\n            \"IOSizeKB\": {\n                \"read\": 0,\n                \"write\": 0,\n                \"total\": 0\n            },\n            \"queueLength\": 0,\n            \"busyPct\": 0\n        }\n    ]\n}\nPORT_METRICS_DATAS = \"\"\"\nTime: 2021-07-14 14:10:00 CST (1626243000)\n                 ----IO/s----- ---KBytes/s---- ----Svct ms----- -IOSz KBytes-\nPORT_N PORT_S PORT_P Rd Wr Tot Rd Wr Tot Rd Wr Tot Rd  Wr   Tot QLen AvgBusy%\n0 0 1 0.0  0.0  0.0 0.0   0.0   0.0 0.00  0.00  0.00 0.0 0.0   0.0    0 0.0\n0 1 1 0.0 14.3 14.3 0.0  86.4  86.4 0.00 11.52 11.52 0.0 6.1   6.1    1 11.9\n----------------------------------------------------------------------------\n      2 7.6 31.4 39.0 0.6 192.0 192.6 0.00 12.34  9.93 0.1 6.2   5.0    1 3.0\n\"\"\"\nDISK_METRICS_DATAS = \"\"\"\nTime: 2021-07-14 15:35:00 CST (1626248100)\n     ----IO/s----- ---KBytes/s---- ----Svct ms----- -IOSz KBytes-\nPDID  Rd   Wr  Tot  Rd    Wr   Tot   Rd    Wr   Tot  Rd   Wr  Tot QLen AvgBusy%\n   0 0.0  0.5  0.5 0.0   4.9   4.9 0.00  3.04  3.04 0.0 10.0 10.0    0      0.1\n   1 0.0  1.6  1.6 0.0  10.2  10.2 0.00  0.89  0.89 0.0  6.3  6.3    0      0.1\n-------------------------------------------------------------------------------\n  2 0.0 31.4 31.4 0.0 191.4 191.4 0.00 11.98 11.98 0.0  6.2  6.2    0      1.5\n\"\"\"\nVOLUME_METRICS_DATAS = \"\"\"\nTime: 2021-07-14 14:10:00 CST (1626243000)\n                 ----IO/s----- ---KBytes/s---- ----Svct ms----- -IOSz KBytes-\nVVID VV_NAME Rd Wr Tot Rd Wr Tot Rd Wr Tot Rd  Wr   Tot QLen AvgBusy%\n0 srdata 0.0  1.0  2.0 3.0 11.0 22.0 33.00 111.00  222.00 333.0 0.0 0.0 0 0.0\n1 admin 0.0 14.3 14.3 0.0  86.4  86.4 0.00 11.52 11.52 0.0 6.1 6.1 1 11.9\n----------------------------------------------------------------------------\n      2 7.6 31.4 39.0 0.6 192.0 192.6 0.00 12.34  9.93 0.1 6.2   5.0    1 3.0\n\"\"\"\nHOST_GROUP_DATAS = \"\"\"\n Id Name                    Members                 Comment\n194 HostSet_VMware          Host_ESXi6.5_125        --\n229 HostSet_Suse11_Oracle   Host_Suse11_8.44.75.122 --\n257 HostGroup_ESX6.0        ESX6.0_8.44.75.145      --\n                            ESX6.0_8.44.75.146\n264 HostSet_Win2016_WSFC    RH2288V5_Win2016_node2  --\n                            RH2288V5_Win2016_node1\n266 HostSet_Win2012_WSFC    RH2285_Win2012_wsfc1    --\n                            Rh2285_Win2012_wsfc2\n268 HostSet_AIX             Host_AIX_51.10.192.20   --\n270 HostSet_Suse11          Host_Suse11_8.44.75.123 --\n274 Suse11sp4_150           litng138.150            --\n-----------------------------------------------------------\n 32 total                   28\n\"\"\"\nHOST_ID_DATAS = \"\"\"\n  Id Name                      Persona        -WWN/iSCSI_Name- Port  IP_addr\n 175 Host_ESXi6.5_125               Generic        2408244427906812 ---   n/a\n 54 Doradov3_lm               Generic        2418244427906812 ---   n/a\n 57 AIX_wenbin                AIX-legacy     10000000C9E74BCC ---   n/a\n 65 SKY-ESXI60                Generic        2100001B321BE0FF ---   n/a\n 65 SKY-ESXI60                Generic        2101001B323BE0FF ---   n/a\n 67 zouming                   Generic        2012E4A8B6B0A1CC ---   n/a\n 67 zouming                   Generic        2002E4A8B6B0A1CC ---   n/a\n 68 powerpath                 Generic        21000024FF36D406 ---   n/a\n 68 powerpath                 Generic        21000024FF36D407 ---   n/a\n 69 power_v3                  Generic        20809CE37435D845 ---   n/a\n 69 power_v3                  Generic        20909CE37435D845 ---   n/a\n 89 vplex_meta_important      Generic        5000144280292012 0:1:2 n/a\n 89 vplex_meta_important      Generic        5000144280292010 0:1:2 n/a\n 89 vplex_meta_important      Generic        5000144290292012 1:1:2 n/a\n 89 vplex_meta_important      Generic        500014429029E910 1:1:2 n/a\n 89 vplex_meta_important      Generic        500014429029E912 1:1:2 n/a\n 89 vplex_meta_important      Generic        500014428029E912 1:1:2 n/a\n 89 vplex_meta_important      Generic        500014428029E910 1:1:2 n/a\n 89 vplex_meta_important      Generic        5000144290292010 1:1:2 n/a\n 89 vplex_meta_important      Generic        5000144290292012 0:1:2 n/a\n 89 vplex_meta_important      Generic        5000144290292010 0:1:2 n/a\n 89 vplex_meta_important      Generic        500014429029E912 0:1:2 n/a\n 89 vplex_meta_important      Generic        500014429029E910 0:1:2 n/a\n 89 vplex_meta_important      Generic        5000144280292012 1:1:2 n/a\n 89 vplex_meta_important      Generic        5000144280292010 1:1:2 n/a\n 89 vplex_meta_important      Generic        500014428029E912 0:1:2 n/a\n 89 vplex_meta_important      Generic        500014428029E910 0:1:2 n/a\n 91 Dorado5000_51.45          Generic        200080D4A58EA53A ---   n/a\n 91 Dorado5000_51.45          Generic        201080D4A58EA53A ---   n/a\n 98 AIX6.1_LN                 AIX-legacy     10000000C9781C57 ---   n/a\n 98 AIX6.1_LN                 AIX-legacy     10000000C9781853 ---   n/a\n115 huhuihost                 Generic        2100000E1E1A9B30 ---   n/a\n121 Dorado5000V3_F3           Generic        201880D4A58EA53A ---   n/a\n160 host002                     Generic        21000024FF41DCF8 ---   n/a\n -- --                        --             21000024FF41DCF7 1:0:2 n/a\n -- --                        --             21000024FF41DCF6 1:0:2 n/a\n -- --                        --             21000024FF0CC6CA 0:1:2 n/a\n -- --                        --             21000024FF0CC6CA 1:1:2 n/a\n -- --                        --             21000024FF0CBF47 0:1:2 n/a\n -- --                        --             21000024FF0CBF47 1:1:2 n/a\n\"\"\"\nVOLUME_GROUP_DATAS = \"\"\"\nId Name              Members              Comment\n 91 wcj_2             wcj_2.0              --\n                      wcj_2.1\n                      wcj_2.2\n                      wcj_2.3\n110 HP-Esxi-LUNSet    --                   --\n124 zhangjun          --                   --\n126 wcj_1             wcj_1.1              --\n127 wcj_3             wcj_3.0              --\n                      wcj_3.1\n128 IBM_SVC           --                   --\n129 zyz_3parF200_     zyz_3parF200.0       --\n                      zyz_3parF200.1\n                      zyz_3parF200.2\n                      zyz_3parF200.3\n130 zyz               zyz_2                --\n131 tx                --                   --\n132 tx9               --                   --\n133 wcj_hp_1          --                   --\n136 AIX_YG_WYK_LUN    AIX_YG_WYK_LUN.0     --\n                      AIX_YG_WYK_LUN.1\n                      AIX_YG_WYK_LUN.2\n                      AIX_YG_WYK_LUN.3\n140 st11              --                   --\n146 Solaris_lun_group Solaris_LUN1_13G     --\n                      solaris_LUN_2_33G\n147 wcj_vplex         wcj_vplex.0          --\n-----------------------------------------------------------\n 32 total                   28\n\"\"\"\nVOLUME_ID_DATAS = \"\"\"\n  Id Name        Prov Type CopyOf BsId Rd -Detailed_State-  Adm Snp Usr VSize\n4836 wcj_2.0    tpvv base ---    4836 RW normal        256 512 512  5120\n4798 zyz_2         tpvv base ---    4836 RW normal        256 512 512  5120\n4797 wcj_3.1         tpvv base ---    4836 RW normal        256 512 512  5120\n666 yytest_vv_001     tpvv base ---    4836 RW normal        256 512 512  5120\n------------------------------------------------------------------------\n 409 total                             51072 158720 3279488 18186240\n\"\"\"\nHOST_DATAS = [\n    {\n        \"total\": 38,\n        \"members\": [\n            {\n                \"id\": 54,\n                \"name\": \"Doradov3_lm\",\n                \"descriptors\": {\n                    \"location\": \"U9-3-B17R_B7\",\n                    \"IPAddr\": \"100.157.61.100\",\n                    \"os\": \"ESXI6.0\",\n                    \"model\": \"RH2288H V3\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2408244427906812\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2418244427906812\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 57,\n                \"name\": \"AIX_wenbin\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"10000000C9E74BCC\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 5,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 65,\n                \"name\": \"SKY-ESXI60\",\n                \"descriptors\": {\n                    \"location\": \"U9-3-B17R_B7\",\n                    \"IPAddr\": \"100.157.61.100\",\n                    \"os\": \"ESXI6.0\",\n                    \"model\": \"RH2288H V3\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2100001B321BE0FF\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2101001B323BE0FF\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 67,\n                \"name\": \"zouming\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2012E4A8B6B0A1CC\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2002E4A8B6B0A1CC\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 68,\n                \"name\": \"powerpath\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF36D406\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF36D407\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 69,\n                \"name\": \"power_v3\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"20809CE37435D845\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"20909CE37435D845\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 89,\n                \"name\": \"vplex_meta_important\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"5000144280292012\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"5000144280292010\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"5000144290292012\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500014429029E910\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500014429029E912\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500014428029E912\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500014428029E910\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"5000144290292010\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"5000144290292012\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"5000144290292010\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500014429029E912\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500014429029E910\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"5000144280292012\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"5000144280292010\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500014428029E912\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500014428029E910\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 91,\n                \"name\": \"Dorado5000_51.45\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"200080D4A58EA53A\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"201080D4A58EA53A\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 98,\n                \"name\": \"AIX6.1_LN\",\n                \"descriptors\": {\n                    \"os\": \"AIX\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"10000000C9781C57\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"10000000C9781853\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 5,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 115,\n                \"name\": \"huhuihost\",\n                \"descriptors\": {\n                    \"os\": \"SuSE\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2100000E1E1A9B30\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 121,\n                \"name\": \"Dorado5000V3_F3\",\n                \"descriptors\": {\n                    \"os\": \"Red Hat Enterprise Linux\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"201880D4A58EA53A\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"200380D4A58EA53A\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 122,\n                \"name\": \"DYP_RHEL\",\n                \"descriptors\": {\n                    \"IPAddr\": \"100.157.18.22\",\n                    \"os\": \"Red Hat Enterprise Linux\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"10000090FA76D446\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"10000090FA76D447\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 123,\n                \"name\": \"DYP_Dorado6000\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2618346AC212FB94\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 124,\n                \"name\": \"tool_rhel6.8\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF543687\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF543686\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 125,\n                \"name\": \"OceanStor6800\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2430E0979656725A\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2208E0979656725A\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2218E0979656725A\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2428E0979656725A\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 126,\n                \"name\": \"fyc_test\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF41DE7E\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 127,\n                \"name\": \"huhui\",\n                \"descriptors\": {\n                    \"os\": \"SuSE\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"500601610864241E\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 132,\n                \"name\": \"ESX8.44.161.152\",\n                \"descriptors\": {\n                    \"os\": \"ESX 4.x/5.x\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF2F3266\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF2F3267\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 8,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 133,\n                \"name\": \"ESX89PT_suse_8.44.190.111\",\n                \"descriptors\": {\n                    \"os\": \"SuSE\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF36F1ED\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 134,\n                \"name\": \"SVC\",\n                \"descriptors\": {\n                    \"os\": \"Exanet\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"500507680110EF7C\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500507680120EF7C\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500507680120EF3E\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"500507680110EF3E\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 3,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 135,\n                \"name\": \"NSS_8.44.162.50\",\n                \"descriptors\": {\n                    \"os\": \"Red Hat Enterprise Linux\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF0DC381\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 137,\n                \"name\": \"D185_8.44.143.201\",\n                \"descriptors\": {\n                    \"os\": \"Red Hat Enterprise Linux\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"29A11603042D0306\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"28D01603042D0306\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2903010203040509\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2802010203040509\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 139,\n                \"name\": \"Dorado3000V6\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2019CC64A68314D3\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2009CC64A68314D3\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 141,\n                \"name\": \"8.44.143.27T2\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"10000090FA50C4DF\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"10000090FA50C4DE\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 142,\n                \"name\": \"8.44.143.27T1\",\n                \"FCPaths\": [],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 144,\n                \"name\": \"C61_51.10.58.190\",\n                \"descriptors\": {\n                    \"os\": \"Red Hat Enterprise Linux\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2210112224901223\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2200112224901223\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2230112224901223\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2220112224901223\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 145,\n                \"name\": \"8.44.43.19\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF754606\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF1A99E1\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 146,\n                \"name\": \"ZTY_win2012\",\n                \"descriptors\": {\n                    \"os\": \"Windows 2012\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF40272B\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF40272A\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 2,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 147,\n                \"name\": \"DoradoV6_183\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"240B121314151617\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2409121314151617\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 148,\n                \"name\": \"rhev_125\",\n                \"descriptors\": {\n                    \"os\": \"Windows 2012\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF4BC1B7\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF4BC1B6\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 2,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 150,\n                \"name\": \"windows2012_68\",\n                \"descriptors\": {\n                    \"os\": \"Windows 2012\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2101001B32B0667A\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2100001B3290667A\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 2,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 151,\n                \"name\": \"Dorado5000V6_80\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"2001183D5E0F5131\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 0,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"2011183D5E0F5131\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 0,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 152,\n                \"name\": \"windows2012_60\",\n                \"descriptors\": {\n                    \"os\": \"Windows 2012\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF53B4BC\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF53B4BD\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 2,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 153,\n                \"name\": \"aix_8.44.134.204\",\n                \"descriptors\": {\n                    \"os\": \"AIX\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"10000000C975804C\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 0,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"10000000C9765E79\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 0,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 5,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 154,\n                \"name\": \"Dorado5500_V6_109\",\n                \"descriptors\": {\n                    \"IPAddr\": \"8.44.133.82\",\n                    \"os\": \"Windows 2012\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"221818022D189653\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 0,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"220818022D189653\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 0,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 2,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 155,\n                \"name\": \"aix134.205\",\n                \"descriptors\": {\n                    \"IPAddr\": \"8.44.134.205\",\n                    \"os\": \"AIX\"\n                },\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"20000000C9781C81\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"10000000C9781C0C\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 0,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 5,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"id\": 158,\n                \"name\": \"hsv6\",\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"28130A2B304438A8\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"28120A2B304438A8\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"28F20A2B304438A8\",\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"28F30A2B304438A8\",\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"persona\": 1,\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            },\n            {\n                \"FCPaths\": [\n                    {\n                        \"wwn\": \"21000024FF41DCF7\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 0,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF41DCF6\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 0,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF0CC6CA\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF0CC6CA\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF0CBF47\",\n                        \"portPos\": {\n                            \"node\": 0,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    },\n                    {\n                        \"wwn\": \"21000024FF0CBF47\",\n                        \"portPos\": {\n                            \"node\": 1,\n                            \"slot\": 1,\n                            \"cardPort\": 2\n                        },\n                        \"hostSpeed\": 0\n                    }\n                ],\n                \"iSCSIPaths\": [],\n                \"initiatorChapEnabled\": False,\n                \"targetChapEnabled\": False\n            }\n        ]\n    }\n]\nVIEW_DATAS = \"\"\"\n  Lun VVName        HostName       -Host_WWN/iSCSI_Name- Port     Type\n  2 yytest_vv_001 host002        ----------------       0:2:1     host\n  0 set:vvset001  set:hostset111 ----------------       1:2:1 host set\n--------------------------------------------------------------------\n  2 total\n\"\"\"\n\nCONTROLLER_RESULT = [\n    {\n        'name': '1307327-0',\n        'storage_id': '12345',\n        'native_controller_id': '0',\n        'status': 'degraded',\n        'location': None,\n        'soft_version': '3.2.2.204',\n        'cpu_info': '4 * 2327 MHz',\n        'cpu_count': 4,\n        'memory_size': '10737418240'\n    }]\nDISK_RESULT = [\n    {\n        'name': '0:14:0',\n        'storage_id': '12345',\n        'native_disk_id': '0',\n        'serial_number': 'Serial111',\n        'manufacturer': 'MFR111',\n        'model': 'Model11',\n        'firmware': 'FW_Rev111',\n        'speed': 15000,\n        'capacity': 599684808704,\n        'status': 'degraded',\n        'physical_type': 'fc',\n        'logical_type': None,\n        'health_score': None,\n        'native_disk_group_id': None,\n        'location': '0:14:0'\n    }]\nPORT_RESULT = [\n    {\n        'name': '0:0:1',\n        'storage_id': '12345',\n        'native_port_id': '0:0:1',\n        'location': '0:0:1',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'sas',\n        'logical_type': None,\n        'speed': 8000000000,\n        'max_speed': 6000000000,\n        'native_parent_id': None,\n        'wwn': '20010002AC001C9F',\n        'mac_address': None,\n        'ipv4': None,\n        'ipv4_mask': None,\n        'ipv6': None,\n        'ipv6_mask': None\n    }]\nMETRICS_RESULT = [\n    constants.metric_struct(name='iops',\n                            labels={\n                                'storage_id': '12345',\n                                'resource_type': 'storagePool',\n                                'resource_id': '11',\n                                'type': 'RAW',\n                                'unit': 'IOPS'},\n                            values={1583005800000: 10}\n                            ),\n    constants.metric_struct(name='iops',\n                            labels={\n                                'storage_id': '12345',\n                                'resource_type': 'volume',\n                                'resource_id': '0',\n                                'type': 'RAW',\n                                'unit': 'IOPS'},\n                            values={1626243000000: 2.0}\n                            ),\n    constants.metric_struct(name='iops',\n                            labels={\n                                'storage_id': '12345',\n                                'resource_type': 'port',\n                                'resource_id': '0:0:1',\n                                'type': 'RAW',\n                                'unit': 'IOPS'\n                            },\n                            values={1626243000000: 0.0}\n                            ),\n    constants.metric_struct(name='iops',\n                            labels={\n                                'storage_id': '12345',\n                                'resource_type': 'disk',\n                                'resource_id': '0',\n                                'type': 'RAW',\n                                'unit': 'IOPS'\n                            },\n                            values={1626248100000: 0.5}\n                            ),\n]\nHOST_GROUP_RESULT = [\n    {\n        'name': 'HostSet_VMware',\n        'description': '',\n        'storage_id': '12345',\n        'native_storage_host_group_id': '194'\n    }]\nVOLUME_GROUP_RESULT = [\n    {\n        'name': 'wcj_2',\n        'description': '',\n        'storage_id': '12345',\n        'native_volume_group_id': '91'\n    }]\nPORT_GROUP_RESULT = [\n    {\n        'name': 'port_group_0:2:1',\n        'description': 'port_group_0:2:1',\n        'storage_id': '12345',\n        'native_port_group_id': 'port_group_0:2:1'\n    }]\nHOST_RESULT = [\n    {\n        'name': 'Doradov3_lm',\n        'description': None,\n        'storage_id': '12345',\n        'native_storage_host_id': 54,\n        'os_type': 'VMware ESX',\n        'status': 'normal',\n        'ip_address': '100.157.61.100'\n    }]\nINITIATOR_RESULT = [\n    {\n        'name': '2408244427906812',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': '2408244427906812',\n        'wwn': '2408244427906812',\n        'type': 'fc',\n        'status': 'online',\n        'native_storage_host_id': '175'\n    }]\nVIEW_RESULT = [\n    {\n        'native_masking_view_id': '2_0:2:1_host002_yytest_vv_001',\n        'name': '2',\n        'storage_id': '12345',\n        'native_port_group_id': 'port_group_0:2:1',\n        'native_volume_id': '666',\n        'native_storage_host_id': '160'\n    }]\n\n\ndef create_driver():\n    kwargs = ACCESS_INFO\n\n    SSHHandler.login = mock.Mock(\n        return_value={\"result\": \"success\", \"reason\": \"null\"})\n\n    m = mock.MagicMock(status_code=201)\n    with mock.patch.object(Session, 'post', return_value=m):\n        m.raise_for_status.return_value = 201\n        m.json.return_value = {\n            'key': 'deviceid123ABC456'\n        }\n        return Hpe3parStorDriver(**kwargs)\n\n\nclass TestHpe3parStorageDriver(TestCase):\n\n    def test_a_init(self):\n        kwargs = ACCESS_INFO\n        SSHHandler.login = mock.Mock(\n            return_value={\"\"})\n        RestHandler.login = mock.Mock(\n            return_value={\"\"})\n        Hpe3parStorDriver(**kwargs)\n\n    def test_b_initrest(self):\n        m = mock.MagicMock()\n        with mock.patch.object(Session, 'post', return_value=m):\n            m.raise_for_status.return_value = 201\n            m.json.return_value = {\n                'key': '1&2F28CA9FC1EA0B8EAB80E9D8FD'\n            }\n            kwargs = ACCESS_INFO\n            rc = RestClient(**kwargs)\n            RestHandler(rc)\n\n    def test_d_get_storage(self):\n        driver = create_driver()\n        expected = {\n            'name': 'hp3parf200',\n            'vendor': 'HPE',\n            'model': 'InServ F200',\n            'status': 'abnormal',\n            'serial_number': '1307327',\n            'firmware_version': '3.1.2.484',\n            'location': None,\n            'total_capacity': 7793486594048,\n            'raw_capacity': 9594956939264,\n            'used_capacity': 6087847706624,\n            'free_capacity': 1705638887424\n        }\n\n        ret = {\n            \"id\": 7327,\n            \"name\": \"hp3parf200\",\n            \"systemVersion\": \"3.1.2.484\",\n            \"IPv4Addr\": \"100.157.92.213\",\n            \"model\": \"InServ F200\",\n            \"serialNumber\": \"1307327\",\n            \"totalNodes\": 2,\n            \"masterNode\": 0,\n            \"onlineNodes\": [\n                0,\n                1\n            ],\n            \"clusterNodes\": [\n                0,\n                1\n            ],\n            \"chunkletSizeMiB\": 256,\n            \"totalCapacityMiB\": 9150464,\n            \"allocatedCapacityMiB\": 5805824,\n            \"freeCapacityMiB\": 1626624,\n            \"failedCapacityMiB\": 1718016,\n            \"timeZone\": \"Asia/Shanghai\"\n        }\n\n        RestHandler.get_capacity = mock.Mock(\n            return_value={\n                \"allCapacity\": {\n                    \"totalMiB\": 9150464,\n                    \"allocated\": {\n                        \"system\": {\n                            \"totalSystemMiB\": 1232384,\n                            \"internalMiB\": 303104,\n                            \"spareMiB\": 929280,\n                            \"spareUsedMiB\": 307456,\n                            \"spareUnusedMiB\": 621824\n                        }\n                    }\n                }\n            }\n        )\n        health_state = 'PDs that are degraded'\n        SSHHandler.get_health_state = mock.Mock(return_value=health_state)\n        m = mock.MagicMock(status_code=200)\n        with mock.patch.object(RestHandler, 'call', return_value=m):\n            m.raise_for_status.return_value = 200\n            m.json.return_value = ret\n\n            storage = driver.get_storage(context)\n            self.assertDictEqual(storage, expected)\n\n    def test_e_list_storage_pools(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'test',\n                'storage_id': '12345',\n                'native_storage_pool_id': '0',\n                'description': 'Hpe 3par CPG:test',\n                'status': 'normal',\n                'storage_type': 'block',\n                'total_capacity': 2003870679040,\n                'subscribed_capacity': 2917892358144,\n                'used_capacity': 1448343502848,\n                'free_capacity': 555527176192\n            }, {\n                'name': 'cxd',\n                'storage_id': '12345',\n                'native_storage_pool_id': '1',\n                'description': 'Hpe 3par CPG:cxd',\n                'status': 'normal',\n                'storage_type': 'block',\n                'total_capacity': 1744025157632,\n                'subscribed_capacity': 2200095948800,\n                'used_capacity': 1696512081920,\n                'free_capacity': 47513075712\n            }\n        ]\n\n        ret = [\n            {\n                \"total\": 2,\n                \"members\": [\n                    {\n                        \"id\": 0,\n                        \"uuid\": \"aa43f218-d3dd-4626-948f-8a160b0eac1d\",\n                        \"name\": \"test\",\n                        \"numFPVVs\": 21,\n                        \"numTPVVs\": 25,\n                        \"UsrUsage\": {\n                            \"totalMiB\": 1381504,\n                            \"rawTotalMiB\": 1842004,\n                            \"usedMiB\": 1376128,\n                            \"rawUsedMiB\": 712703\n                        },\n                        \"SAUsage\": {\n                            \"totalMiB\": 140800,\n                            \"rawTotalMiB\": 422400,\n                            \"usedMiB\": 5120,\n                            \"rawUsedMiB\": 15360\n                        },\n                        \"SDUsage\": {\n                            \"totalMiB\": 388736,\n                            \"rawTotalMiB\": 518315,\n                            \"usedMiB\": 0,\n                            \"rawUsedMiB\": 0\n                        },\n                        \"SAGrowth\": {\n                            \"incrementMiB\": 8192,\n                            \"LDLayout\": {\n                                \"HA\": 3,\n                                \"diskPatterns\": [\n                                    {\n                                        \"diskType\": 1\n                                    }\n                                ]\n                            }\n                        },\n                        \"SDGrowth\": {\n                            \"incrementMiB\": 32768,\n                            \"LDLayout\": {\n                                \"RAIDType\": 3,\n                                \"HA\": 3,\n                                \"setSize\": 4,\n                                \"chunkletPosPref\": 1,\n                                \"diskPatterns\": [\n                                    {\n                                        \"diskType\": 1\n                                    }\n                                ]\n                            }\n                        },\n                        \"state\": 1,\n                        \"failedStates\": [],\n                        \"degradedStates\": [],\n                        \"additionalStates\": []\n                    },\n                    {\n                        \"id\": 1,\n                        \"uuid\": \"c392910e-7648-4972-b594-47dd3d28f3ec\",\n                        \"name\": \"cxd\",\n                        \"numFPVVs\": 14,\n                        \"numTPVVs\": 319,\n                        \"UsrUsage\": {\n                            \"totalMiB\": 1418752,\n                            \"rawTotalMiB\": 1702500,\n                            \"usedMiB\": 1417984,\n                            \"rawUsedMiB\": 568934\n                        },\n                        \"SAUsage\": {\n                            \"totalMiB\": 56832,\n                            \"rawTotalMiB\": 170496,\n                            \"usedMiB\": 42752,\n                            \"rawUsedMiB\": 128256\n                        },\n                        \"SDUsage\": {\n                            \"totalMiB\": 187648,\n                            \"rawTotalMiB\": 225179,\n                            \"usedMiB\": 157184,\n                            \"rawUsedMiB\": 188620\n                        },\n                        \"SAGrowth\": {\n                            \"incrementMiB\": 8192,\n                            \"LDLayout\": {\n                                \"HA\": 3,\n                                \"diskPatterns\": [\n                                    {\n                                        \"diskType\": 1\n                                    }\n                                ]\n                            }\n                        },\n                        \"SDGrowth\": {\n                            \"incrementMiB\": 32768,\n                            \"LDLayout\": {\n                                \"RAIDType\": 3,\n                                \"HA\": 3,\n                                \"setSize\": 6,\n                                \"chunkletPosPref\": 1,\n                                \"diskPatterns\": [\n                                    {\n                                        \"diskType\": 1\n                                    }\n                                ]\n                            }\n                        },\n                        \"state\": 1,\n                        \"failedStates\": [],\n                        \"degradedStates\": [],\n                        \"additionalStates\": []\n                    }\n                ]\n            }\n        ]\n\n        with mock.patch.object(RestHandler, 'get_resinfo_call',\n                               side_effect=ret):\n            pools = driver.list_storage_pools(context)\n            self.assertDictEqual(pools[0], expected[0])\n            self.assertDictEqual(pools[1], expected[1])\n\n        with mock.patch.object(RestHandler, 'get_all_pools',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_storage_pools(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n    def test_f_list_volumes(self):\n        driver = create_driver()\n        expected = [{\n            'name': 'admin',\n            'storage_id': '12345',\n            'description': None,\n            'status': 'normal',\n            'native_volume_id': '0',\n            'native_storage_pool_id': '',\n            'wwn': '50002AC000001C9F',\n            'type': 'thick',\n            'total_capacity': 10737418240,\n            'used_capacity': 10737418240,\n            'free_capacity': 0,\n            'compressed': True,\n            'deduplicated': True\n        }]\n        ret = [{\n            \"members\": [{\n                \"id\": 0,\n                \"name\": \"admin\",\n                \"provisioningType\": 1,\n                \"copyType\": 1,\n                \"baseId\": 0,\n                \"readOnly\": False,\n                \"state\": 1,\n                \"userSpace\": {\n                    \"reservedMiB\": 10240,\n                    \"rawReservedMiB\": 20480,\n                    \"usedMiB\": 10240,\n                    \"freeMiB\": 0\n                },\n                \"sizeMiB\": 10240,\n                \"wwn\": \"50002AC000001C9F\"\n            }]\n        }]\n        pool_ret = {\n            \"members\": [{\n                \"id\": 0,\n                \"uuid\": \"aa43f218-d3dd-4626-948f-8a160b0eac1d\",\n                \"name\": \"test\"\n            }]\n        }\n        RestHandler.get_all_pools = mock.Mock(return_value=pool_ret)\n        with mock.patch.object(RestHandler, 'get_resinfo_call',\n                               side_effect=ret):\n            volumes = driver.list_volumes(context)\n            self.assertDictEqual(volumes[0], expected[0])\n\n    def test_h_parse_alert(self):\n        \"\"\" Success flow with all necessary parameters\"\"\"\n        driver = create_driver()\n        alert = {\n            'sysUpTime': '1399844806',\n            'snmpTrapOID': 'alertNotify',\n            '1.3.6.1.4.1.12925.1.7.1.5.1': 'test_trap',\n            '1.3.6.1.4.1.12925.1.7.1.6.1': 'This is a test trap',\n            'nodeID': '0',\n            '1.3.6.1.4.1.12925.1.7.1.2.1': '6',\n            '1.3.6.1.4.1.12925.1.7.1.3.1': 'test time',\n            '1.3.6.1.4.1.12925.1.7.1.7.1': '89',\n            '1.3.6.1.4.1.12925.1.7.1.8.1': '2555934',\n            '1.3.6.1.4.1.12925.1.7.1.9.1': '5',\n            'serialNumber': '1307327',\n            'transport_address': '100.118.18.100',\n            'storage_id': '1c094309-70f2-4da3-ac47-e87cc1492ad5'\n        }\n\n        expected_alert_model = {\n            'alert_id': '0x027001e',\n            'alert_name': 'CPG growth non admin limit',\n            'severity': 'NotSpecified',\n            'category': 'Recovery',\n            'type': 'EquipmentAlarm',\n            'sequence_number': '89',\n            'description': 'This is a test trap',\n            'resource_type': 'Storage',\n            'location': 'test_trap',\n            'occur_time': '',\n            'clear_category': 'Automatic'\n        }\n        context = {}\n        alert_model = driver.parse_alert(context, alert)\n\n        # Verify that all other fields are matching\n        self.assertDictEqual(expected_alert_model, alert_model)\n\n    def test_list_alert(self):\n        \"\"\" Success flow with all necessary parameters\"\"\"\n        driver = create_driver()\n        alert = \"\"\"\n        Id : 1\n        State : New\n        MessageCode : 0x2200de\n        Time : 2015-07-17 20:14:29 PDT\n        Severity : Degraded\n        Type : Component state change\n        Message : Node 0, Power Supply 1, Battery 0 Degraded\n        Component: 100.118.18.100\n\n        \"\"\"\n\n        expected_alert = [{\n            'alert_id': '0x2200de',\n            'alert_name': 'Component state change',\n            'severity': 'Warning',\n            'category': 'Fault',\n            'type': 'EquipmentAlarm',\n            'sequence_number': '1',\n            'occur_time': 1437135269000,\n            'description': 'Node 0, Power Supply 1, Battery 0 Degraded',\n            'resource_type': 'Storage',\n            'location': '100.118.18.100'\n        }]\n        SSHHandler.get_all_alerts = mock.Mock(return_value=alert)\n        alert_list = driver.list_alerts(context, None)\n        expected_alert[0]['occur_time'] = alert_list[0]['occur_time']\n        self.assertDictEqual(alert_list[0], expected_alert[0])\n\n    @mock.patch.object(AlertHandler, 'clear_alert')\n    def test_clear_alert(self, mock_clear_alert):\n        driver = create_driver()\n        alert_id = '230584300921369'\n        driver.clear_alert(context, alert_id)\n        self.assertEqual(mock_clear_alert.call_count, 1)\n\n    def test_get_controllers(self):\n        driver = create_driver()\n        SSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[NODE_DATAS, NODE_CPU_DATAS, NODE_VERSION])\n        controllers = driver.list_controllers(context)\n        self.assertDictEqual(controllers[0], CONTROLLER_RESULT[0])\n\n    def test_get_disks(self):\n        driver = create_driver()\n        SSHPool.do_exec = mock.Mock(side_effect=[DISK_DATAS, DISK_I_DATAS])\n        disks = driver.list_disks(context)\n        self.assertDictEqual(disks[0], DISK_RESULT[0])\n\n    def test_get_ports(self):\n        driver = create_driver()\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[PORT_DATAS, PORT_I_DATAS, PORT_PER_DATAS,\n                         PORT_ISCSI_DATAS, PORT_RCIP_DATAS, PORT_C_DATAS,\n                         PORT_RCIP_DATAS, PORT_RCIP_DATAS])\n        ports = driver.list_ports(context)\n        self.assertDictEqual(ports[0], PORT_RESULT[0])\n\n    @mock.patch.object(RestHandler, 'get_pool_metrics')\n    @mock.patch.object(SSHPool, 'do_exec')\n    def test_get_perf_metrics(self, mock_exec, mock_pool):\n        driver = create_driver()\n        resource_metrics = {\n            'storagePool': [\n                'iops', 'readIops', 'writeIops',\n                'throughput', 'readThroughput', 'writeThroughput',\n                'responseTime'\n            ],\n            'volume': [\n                'iops', 'readIops', 'writeIops',\n                'throughput', 'readThroughput', 'writeThroughput',\n                'responseTime',\n                'ioSize', 'readIoSize', 'writeIoSize',\n            ],\n            'port': [\n                'iops', 'readIops', 'writeIops',\n                'throughput', 'readThroughput', 'writeThroughput',\n                'responseTime'\n            ],\n            'disk': [\n                'iops', 'readIops', 'writeIops',\n                'throughput', 'readThroughput', 'writeThroughput',\n                'responseTime'\n            ],\n            'filesystem': [\n                'iops', 'readIops', 'writeIops',\n                'throughput', 'readThroughput', 'writeThroughput',\n                'readResponseTime', 'writeResponseTime',\n                'readIoSize', 'writeIoSize'\n            ]\n        }\n        start_time = 1628472280000\n        end_time = 1628472900000\n        RestHandler.get_all_pools = mock.Mock(return_value=POOL_DATAS)\n        mock_pool.return_value = POOL_METRICS_DATAS\n        mock_exec.side_effect = [VOLUME_METRICS_DATAS, PORT_METRICS_DATAS,\n                                 DISK_METRICS_DATAS]\n        metrics = driver.collect_perf_metrics(context, '12345',\n                                              resource_metrics, start_time,\n                                              end_time)\n        self.assertEqual(metrics[0], METRICS_RESULT[0])\n        self.assertEqual(metrics[14], METRICS_RESULT[1])\n        self.assertEqual(metrics[34], METRICS_RESULT[2])\n        self.assertEqual(metrics[48], METRICS_RESULT[3])\n\n    def test_get_capabilities(self):\n        driver = create_driver()\n        cap = driver.get_capabilities(context)\n        self.assertIsNotNone(cap.get('resource_metrics'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('storagePool'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('volume'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('port'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('disk'))\n\n    def test_get_storage_host_groups(self):\n        driver = create_driver()\n        SSHPool.do_exec = mock.Mock(side_effect=[HOST_GROUP_DATAS,\n                                                 HOST_ID_DATAS])\n        host_groups = driver.list_storage_host_groups(context)\n        self.assertDictEqual(host_groups.get('storage_host_groups')[0],\n                             HOST_GROUP_RESULT[0])\n\n    def test_get_volume_groups(self):\n        driver = create_driver()\n        SSHPool.do_exec = mock.Mock(side_effect=[VOLUME_GROUP_DATAS,\n                                                 VOLUME_ID_DATAS])\n        volume_groups = driver.list_volume_groups(context)\n        self.assertDictEqual(volume_groups.get('volume_groups')[0],\n                             VOLUME_GROUP_RESULT[0])\n\n    def test_storage_hosts(self):\n        driver = create_driver()\n        with mock.patch.object(RestHandler, 'get_resinfo_call',\n                               side_effect=HOST_DATAS):\n            storage_hosts = driver.list_storage_hosts(context)\n            self.assertDictEqual(storage_hosts[0], HOST_RESULT[0])\n\n    def test_get_storage_host_initiators(self):\n        driver = create_driver()\n        SSHPool.do_exec = mock.Mock(side_effect=[HOST_ID_DATAS])\n        initiators = driver.list_storage_host_initiators(context)\n        self.assertDictEqual(initiators[0], INITIATOR_RESULT[0])\n\n    def test_get_masking_views(self):\n        driver = create_driver()\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[VIEW_DATAS, HOST_ID_DATAS, HOST_GROUP_DATAS,\n                         VOLUME_ID_DATAS, VOLUME_GROUP_DATAS])\n        views = driver.list_masking_views(context)\n        self.assertDictEqual(views[0], VIEW_RESULT[0])\n\n    def test_get_port_groups(self):\n        driver = create_driver()\n        SSHPool.do_exec = mock.Mock(side_effect=[VIEW_DATAS])\n        port_groups = driver.list_port_groups(context)\n        self.assertDictEqual(port_groups.get('port_groups')[0],\n                             PORT_GROUP_RESULT[0])\n"
  },
  {
    "path": "delfin/tests/unit/drivers/hpe/hpe_msa/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/hpe/hpe_msa/test_constans.py",
    "content": "LIST_CONTROLLERS = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show controllers\">\n<COMP G=\"0\" P=\"1\"/>\n<OBJECT basetype=\"controllers\" name=\"controllers\">\n    <PROPERTY name=\"durable-id\" >controller_a</PROPERTY>\n    <PROPERTY name=\"controller-id\">A</PROPERTY>\n    <PROPERTY name=\"serial-number\">7CE539M591</PROPERTY>\n    <PROPERTY name=\"cache-memory-size\">4096</PROPERTY>\n    <PROPERTY name=\"system-memory-size\">6144</PROPERTY>\n    <PROPERTY name=\"sc-fw\">GLS210R04-01</PROPERTY>\n    <PROPERTY name=\"sc-cpu-type\">Gladden</PROPERTY>\n    <PROPERTY name=\"cpu_count\">1</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"position\">Top</PROPERTY>\n    </OBJECT>\n<COMP G=\"0\" P=\"13\"/>\n<OBJECT basetype=\"controllers\" name=\"controllers\">\n    <PROPERTY name=\"durable-id\">controller_b</PROPERTY>\n    <PROPERTY name=\"controller-id\">B</PROPERTY>\n    <PROPERTY name=\"serial-number\">7CE539M591</PROPERTY>\n    <PROPERTY name=\"cache-memory-size\">4096</PROPERTY>\n    <PROPERTY name=\"system-memory-size\">6144</PROPERTY>\n    <PROPERTY name=\"sc-fw\">GLS210R04-01</PROPERTY>\n    <PROPERTY name=\"sc-cpu-type\">Gladden</PROPERTY>\n    <PROPERTY name=\"cpu_count\">1</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"position\">Bottom</PROPERTY>\n    </OBJECT>\n</RESPONSE>\n\"\"\"\n\nLIST_SYSTEM = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show system\">\n<COMP G=\"0\" P=\"1\"/>\n<OBJECT basetype=\"system\" >\n    <PROPERTY name=\"system-name\">msa2040</PROPERTY>\n    <PROPERTY name=\"midplane-serial-number\">00C0FF26DCB0</PROPERTY>\n    <PROPERTY name=\"system-location\">Uninitialized Location</PROPERTY>\n    <PROPERTY name=\"vendor-name\">HP</PROPERTY>\n    <PROPERTY name=\"product-id\">MSA 2040 SAN</PROPERTY>\n    <PROPERTY name=\"product-brand\" >MSA Storage</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    </OBJECT>\n</RESPONSE>\n\"\"\"\n\nLIST_VISION = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show version\">\n<COMP G=\"0\" P=\"1\"/>\n<OBJECT basetype=\"versions\">\n    <PROPERTY name=\"bundle-version\">GL210R004</PROPERTY>\n</OBJECT>\n</RESPONSE>\n\"\"\"\n\nLIST_PORTS = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show ports\">\n<COMP G=\"0\" P=\"1\"/>\n <OBJECT basetype=\"port\" name=\"ports\" oid=\"1\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">hostport_A1</PROPERTY>\n    <PROPERTY name=\"port\">A1</PROPERTY>\n     <PROPERTY name=\"configured-speed\">8Gb</PROPERTY>\n    <PROPERTY name=\"port-type\">FC</PROPERTY>\n    <PROPERTY name=\"target-id\">207000c0ff26dcb0</PROPERTY>\n    <PROPERTY name=\"health\">N/A</PROPERTY>\n  </OBJECT>\n<COMP G=\"1\" P=\"2\"/>\n<OBJECT basetype=\"fc-port\" name=\"port-details\" oid=\"2\" format=\"rows\">\n    <PROPERTY name=\"sfp-supported-speeds\">4G,8G</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"3\"/>\n <OBJECT basetype=\"port\" name=\"ports\" oid=\"3\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">hostport_A2</PROPERTY>\n    <PROPERTY name=\"port\">A2</PROPERTY>\n    <PROPERTY name=\"target-id\">217000c0ff26dcb0</PROPERTY>\n    <PROPERTY name=\"port-type\">FC</PROPERTY>\n     <PROPERTY name=\"configured-speed\">8Gb</PROPERTY>\n    <PROPERTY name=\"health\">N/A</PROPERTY>\n  </OBJECT>\n<COMP G=\"3\" P=\"4\"/>\n<OBJECT basetype=\"fc-port\" name=\"port-details\" oid=\"4\" format=\"rows\">\n    <PROPERTY name=\"sfp-supported-speeds\">4G,8G</PROPERTY>\n</OBJECT>\n<COMP G=\"0\" P=\"5\"/>\n <OBJECT basetype=\"port\" name=\"ports\" oid=\"5\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">hostport_A3</PROPERTY>\n    <PROPERTY name=\"port\">A3</PROPERTY>\n    <PROPERTY name=\"port-type\">iSCSI</PROPERTY>\n    <PROPERTY name=\"health\">N/A</PROPERTY>\n</OBJECT>\n<COMP G=\"5\" P=\"6\"/>\n<OBJECT basetype=\"iscsi-port\" name=\"port-details\" oid=\"6\" format=\"pairs\">\n    <PROPERTY name=\"ip-address\">0.0.0.0</PROPERTY>\n    <PROPERTY name=\"mac-address\">00:C0:FF:35:BD:64</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"7\"/>\n <OBJECT basetype=\"port\" name=\"ports\" oid=\"7\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >hostport_A4</PROPERTY>\n    <PROPERTY name=\"port\">A4</PROPERTY>\n    <PROPERTY name=\"port-type\">iSCSI</PROPERTY>\n    <PROPERTY name=\"configured-speed\">Auto</PROPERTY>\n    <PROPERTY name=\"health\">N/A</PROPERTY>\n  </OBJECT>\n<COMP G=\"7\" P=\"8\"/>\n<OBJECT basetype=\"iscsi-port\" name=\"port-details\" oid=\"8\" format=\"pairs\">\n    <PROPERTY name=\"ip-address\">0.0.0.0</PROPERTY>\n    <PROPERTY name=\"mac-address\">00:C0:FF:35:BD:65</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"9\"/>\n<OBJECT basetype=\"port\" name=\"ports\" oid=\"9\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">hostport_B1</PROPERTY>\n    <PROPERTY name=\"port\">B1</PROPERTY>\n    <PROPERTY name=\"target-id\">247000c0ff26dcb0</PROPERTY>\n    <PROPERTY name=\"port-type\">FC</PROPERTY>\n    <PROPERTY name=\"configured-speed\">8Gb</PROPERTY>\n    <PROPERTY name=\"health\">N/A</PROPERTY>\n  </OBJECT>\n<COMP G=\"9\" P=\"10\"/>\n<OBJECT basetype=\"fc-port\" name=\"port-details\" oid=\"10\" format=\"rows\">\n    <PROPERTY name=\"sfp-supported-speeds\">4G,8G</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"11\"/>\n<OBJECT basetype=\"port\" name=\"ports\" oid=\"11\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">hostport_B2</PROPERTY>\n    <PROPERTY name=\"port\">B2</PROPERTY>\n    <PROPERTY name=\"port-type\">FC</PROPERTY>\n    <PROPERTY name=\"target-id\">257000c0ff26dcb0</PROPERTY>\n    <PROPERTY name=\"configured-speed\">8Gb</PROPERTY>\n    <PROPERTY name=\"health\">N/A</PROPERTY>\n  </OBJECT>\n<COMP G=\"11\" P=\"12\"/>\n <OBJECT basetype=\"fc-port\" name=\"port-details\" oid=\"12\" format=\"rows\">\n    <PROPERTY name=\"sfp-supported-speeds\">4G,8G</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"13\"/>\n<OBJECT basetype=\"port\" name=\"ports\" oid=\"13\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">hostport_B3</PROPERTY>\n    <PROPERTY name=\"port\">B3</PROPERTY>\n    <PROPERTY name=\"port-type\">iSCSI</PROPERTY>\n    <PROPERTY name=\"configured-speed\">Auto</PROPERTY>\n    <PROPERTY name=\"health\">N/A</PROPERTY>\n  </OBJECT>\n<COMP G=\"13\" P=\"14\"/>\n<OBJECT basetype=\"iscsi-port\" name=\"port-details\" oid=\"14\" format=\"pairs\">\n    <PROPERTY name=\"ip-address\">0.0.0.0</PROPERTY>\n    <PROPERTY name=\"mac-address\">00:C0:FF:35:BA:BC</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"15\"/>\n <OBJECT basetype=\"port\" name=\"ports\" oid=\"15\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">hostport_B4</PROPERTY>\n    <PROPERTY name=\"port\">B4</PROPERTY>\n    <PROPERTY name=\"port-type\">iSCSI</PROPERTY>\n    <PROPERTY name=\"configured-speed\">Auto</PROPERTY>\n    <PROPERTY name=\"health\">N/A</PROPERTY>\n  </OBJECT>\n<COMP G=\"15\" P=\"16\"/>\n<OBJECT basetype=\"iscsi-port\" name=\"port-details\" oid=\"16\" format=\"pairs\">\n    <PROPERTY name=\"ip-address\">0.0.0.0</PROPERTY>\n    <PROPERTY name=\"mac-address\">00:C0:FF:35:BA:BD</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\"\"\"\n\nLIST_POOLS = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show pools\">\n<COMP G=\"0\" P=\"1\"/>\n <OBJECT basetype=\"pools\" name=\"pools\" oid=\"1\" format=\"rows\">\n    <PROPERTY name=\"name\">A</PROPERTY>\n    <PROPERTY name=\"serial-number\">00c0ff26c4ea0000d980546101000000</PROPERTY>\n    <PROPERTY name=\"total-size\">1196.8GB</PROPERTY>\n    <PROPERTY name=\"total-avail\">1196.8GB</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\"\"\"\n\nLIST_VOLUMES = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show volumes\">\n<COMP G=\"0\" P=\"1\"/>\n<OBJECT basetype=\"volumes\" name=\"volume\"  format=\"rows\">\n    <PROPERTY name=\"durable-id\">V1</PROPERTY>\n    <PROPERTY name=\"volume-name\">Vol0001</PROPERTY>\n    <PROPERTY name=\"size\">99.9GB</PROPERTY>\n    <PROPERTY name=\"allocated-size\">0B</PROPERTY>\n    <PROPERTY name=\"total-size\">99.9GB</PROPERTY>\n    <PROPERTY name=\"blocks\">195305472</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"wwn\">600C0FF00026C4EAFA80546101000000</PROPERTY>\n    <PROPERTY name=\"volume-type\">base</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"2\"/>\n <OBJECT basetype=\"volumes\" name=\"volume\"  format=\"rows\">\n    <PROPERTY name=\"durable-id\">V2</PROPERTY>\n    <PROPERTY name=\"volume-name\">Vol0002</PROPERTY>\n    <PROPERTY name=\"allocated-size\">0B</PROPERTY>\n    <PROPERTY name=\"total-size\">99.9GB</PROPERTY>\n    <PROPERTY name=\"blocks\">195305472</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"wwn\">600C0FF00026C4EA0A81546101000000</PROPERTY>\n    <PROPERTY name=\"volume-type\">base</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\"\"\"\n\nLIST_DISKS = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show disks\">\n<COMP G=\"0\" P=\"1\"/>\n<OBJECT basetype=\"drives\" name=\"drive\" oid=\"1\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">disk_01.01</PROPERTY>\n    <PROPERTY name=\"location\" >1.1</PROPERTY>\n    <PROPERTY name=\"port\">0</PROPERTY>\n    <PROPERTY name=\"serial-number\">6SL9CD560000N51404EF</PROPERTY>\n    <PROPERTY name=\"vendor\">SEAGATE</PROPERTY>\n    <PROPERTY name=\"model\">ST3600057SS</PROPERTY>\n    <PROPERTY name=\"description\">SAS</PROPERTY>\n    <PROPERTY name=\"type\">SAS</PROPERTY>\n    <PROPERTY name=\"rpm\" >15</PROPERTY>\n    <PROPERTY name=\"size\">600.1GB</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"disk-group\">dgA01</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"2\"/>\n <OBJECT basetype=\"drives\" name=\"drive\" oid=\"2\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">disk_01.02</PROPERTY>\n    <PROPERTY name=\"location\">1.2</PROPERTY>\n    <PROPERTY name=\"serial-number\">6SL7X4RE0000B42601SF</PROPERTY>\n    <PROPERTY name=\"vendor\">SEAGATE</PROPERTY>\n    <PROPERTY name=\"model\">ST3600057SS</PROPERTY>\n    <PROPERTY name=\"description\">SAS</PROPERTY>\n    <PROPERTY name=\"type\">SAS</PROPERTY>\n    <PROPERTY name=\"rpm\">15</PROPERTY>\n    <PROPERTY name=\"size\">600.1GB</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"disk-group\">dgA01</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"3\"/>\n <OBJECT basetype=\"drives\" name=\"drive\" oid=\"3\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">disk_01.03</PROPERTY>\n    <PROPERTY name=\"location\">1.3</PROPERTY>\n    <PROPERTY name=\"serial-number\">6SL9QR5T0000N52120SK</PROPERTY>\n    <PROPERTY name=\"vendor\">SEAGATE</PROPERTY>\n     <PROPERTY name=\"description\">SAS</PROPERTY>\n    <PROPERTY name=\"model\">ST3600057SS</PROPERTY>\n    <PROPERTY name=\"rpm\">15</PROPERTY>\n    <PROPERTY name=\"size\">600.1GB</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"disk-group\">dgA01</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"4\"/>\n<OBJECT basetype=\"drives\" name=\"drive\" oid=\"4\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">disk_01.04</PROPERTY>\n    <PROPERTY name=\"port\">0</PROPERTY>\n    <PROPERTY name=\"location\">1.4</PROPERTY>\n     <PROPERTY name=\"description\">SAS</PROPERTY>\n    <PROPERTY name=\"serial-number\">3SL0WT7G00009051YBTF</PROPERTY>\n    <PROPERTY name=\"vendor\">SEAGATE</PROPERTY>\n    <PROPERTY name=\"model\">ST3600057SS</PROPERTY>\n    <PROPERTY name=\"rpm\" >15</PROPERTY>\n    <PROPERTY name=\"size\">600.1GB</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"disk-group\">dgA01</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\"\"\"\nLIST_ERROR = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show events error\">\n<COMP G=\"0\" P=\"13\"/>\n <OBJECT basetype=\"events\" name=\"event\" oid=\"1\" format=\"packed\">\n    <PROPERTY name=\"time-stamp\">2021-11-12 08:16:20</PROPERTY>\n    <PROPERTY name=\"time-stamp-numeric\" >1636704980</PROPERTY>\n    <PROPERTY name=\"event-code\">557</PROPERTY>\n    <PROPERTY name=\"event-id\" >A891</PROPERTY>\n    <PROPERTY name=\"model\">MSA 2040 SAN</PROPERTY>\n    <PROPERTY name=\"serial-number\">00C0FF26C236</PROPERTY>\n    <PROPERTY name=\"controller\" >A</PROPERTY>\n    <PROPERTY name=\"controller-numeric\">1</PROPERTY>\n    <PROPERTY name=\"severity\">ERROR</PROPERTY>\n    <PROPERTY name=\"severity-numeric\">2</PROPERTY>\n    <PROPERTY name=\"message\" >An Enclosure Management Processor(EMP)</PROPERTY>\n     <PROPERTY name=\"additional-information\">Management</PROPERTY>\n     <PROPERTY name=\"recommended-action\">Management</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\"\"\"\n\nLIST_HOST_INITIATORS = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show host-groups\">\n<COMP G=\"2\" P=\"3\"/>\n <OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"3\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">I2</PROPERTY>\n    <PROPERTY name=\"nickname\">FC-port1</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\">No</PROPERTY>\n    <PROPERTY name=\"profile\">HP-UX</PROPERTY>\n    <PROPERTY name=\"host-bus-type\">FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" key=\"true\">21000024ff3dfed1</PROPERTY>\n    <PROPERTY name=\"host-id\">NOHOST</PROPERTY>\n    <PROPERTY name=\"host-key\" >HU</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\">0</PROPERTY>\n  </OBJECT>\n<COMP G=\"5\" P=\"6\"/>\n <OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"6\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">I1</PROPERTY>\n    <PROPERTY name=\"nickname\">FC-port2</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\">Yes</PROPERTY>\n    <PROPERTY name=\"profile\">HP-UX</PROPERTY>\n    <PROPERTY name=\"host-bus-type\">FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" key=\"true\">10000090fa13870e</PROPERTY>\n    <PROPERTY name=\"host-id\">00c0ff26c2360000e2399f6101010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H1</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\">0</PROPERTY>\n  </OBJECT>\n<COMP G=\"5\" P=\"7\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"7\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">I0</PROPERTY>\n    <PROPERTY name=\"nickname\">FC-port3</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\">Yes</PROPERTY>\n    <PROPERTY name=\"profile\">HP-UX</PROPERTY>\n    <PROPERTY name=\"host-bus-type\">FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" key=\"true\">10000090fa13870f</PROPERTY>\n    <PROPERTY name=\"host-id\">00c0ff26c2360000e2399f6101010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H1</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\">0</PROPERTY>\n  </OBJECT>\n<COMP G=\"9\" P=\"10\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"10\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">I6</PROPERTY>\n    <PROPERTY name=\"nickname\">rac01_01</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\">Yes</PROPERTY>\n    <PROPERTY name=\"profile\">Standard</PROPERTY>\n    <PROPERTY name=\"host-bus-type\">FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" key=\"true\">500143801875548e</PROPERTY>\n    <PROPERTY name=\"host-id\">00c0ff26c4ea0000057f245b01010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H4</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\">0</PROPERTY>\n  </OBJECT>\n<COMP G=\"9\" P=\"11\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"11\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">I5</PROPERTY>\n    <PROPERTY name=\"nickname\">rac01_02</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\">Yes</PROPERTY>\n    <PROPERTY name=\"profile\">Standard</PROPERTY>\n    <PROPERTY name=\"host-bus-type\">FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" key=\"true\">5001438012097ed6</PROPERTY>\n    <PROPERTY name=\"host-id\">00c0ff26c4ea0000057f245b01010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H4</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\">0</PROPERTY>\n  </OBJECT>\n<COMP G=\"12\" P=\"13\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"13\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">I3</PROPERTY>\n    <PROPERTY name=\"nickname\">rac02_01</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\">Yes</PROPERTY>\n    <PROPERTY name=\"profile\">Standard</PROPERTY>\n    <PROPERTY name=\"host-bus-type\">FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" key=\"true\">50014380029ceb58</PROPERTY>\n    <PROPERTY name=\"host-id\">00c0ff26c4ea0000f77f245b01010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H3</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\">0</PROPERTY>\n  </OBJECT>\n<COMP G=\"12\" P=\"14\"/>\n <OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"14\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">I4</PROPERTY>\n    <PROPERTY name=\"nickname\">rac02_02</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\">No</PROPERTY>\n    <PROPERTY name=\"profile\">Standard</PROPERTY>\n    <PROPERTY name=\"host-bus-type\">FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" key=\"true\">500143801209031c</PROPERTY>\n    <PROPERTY name=\"host-id\">00c0ff26c4ea0000f77f245b01010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H3</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\">0</PROPERTY>\n  </OBJECT>\n<COMP G=\"15\" P=\"16\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"16\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">I2</PROPERTY>\n    <PROPERTY name=\"nickname\">FC-port1</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\">No</PROPERTY>\n    <PROPERTY name=\"profile\">HP-UX</PROPERTY>\n    <PROPERTY name=\"host-bus-type\">FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" key=\"true\">21000024ff3dfed1</PROPERTY>\n    <PROPERTY name=\"host-id\">NOHOST</PROPERTY>\n    <PROPERTY name=\"host-key\" >HU</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\">0</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\n\"\"\"\n\nLIST_HOST_GROUPS = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show host-groups\">\n<COMP G=\"0\" P=\"1\"/>\n <OBJECT basetype=\"host-group\" name=\"host-group\" oid=\"1\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >HGU</PROPERTY>\n    <PROPERTY name=\"name\">-ungrouped-</PROPERTY>\n    <PROPERTY name=\"serial-number\" >UNGROUPEDHOSTS</PROPERTY>\n    <PROPERTY name=\"member-count\" >0</PROPERTY>\n  </OBJECT>\n<COMP G=\"1\" P=\"2\"/>  <OBJECT basetype=\"host\" name=\"host\" oid=\"2\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">HU</PROPERTY>\n    <PROPERTY name=\"name\">-nohost-</PROPERTY>\n    <PROPERTY name=\"serial-number\">NOHOST</PROPERTY>\n    <PROPERTY name=\"member-count\" >0</PROPERTY>\n    <PROPERTY name=\"host-group\">UNGROUPEDHOSTS</PROPERTY>\n    <PROPERTY name=\"group-key\" >HGU</PROPERTY>\n  </OBJECT>\n<COMP G=\"2\" P=\"3\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"3\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >I2</PROPERTY>\n    <PROPERTY name=\"nickname\" >FC-port1</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\" >No</PROPERTY>\n    <PROPERTY name=\"profile\" >HP-UX</PROPERTY>\n    <PROPERTY name=\"host-bus-type\" >FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" >21000024ff3dfed1</PROPERTY>\n    <PROPERTY name=\"host-id\" >NOHOST</PROPERTY>\n    <PROPERTY name=\"host-key\" >HU</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\" >0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\" >0</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"4\"/>\n<OBJECT basetype=\"host-group\" name=\"host-group\" oid=\"4\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >HG0</PROPERTY>\n    <PROPERTY name=\"name\">HostGroup1</PROPERTY>\n    <PROPERTY name=\"serial-number\" >00c0ff26c2360000223a9f6101010000</PROPERTY>\n    <PROPERTY name=\"member-count\">1</PROPERTY>\n  </OBJECT>\n<COMP G=\"4\" P=\"5\"/>  <OBJECT basetype=\"host\" name=\"host\" oid=\"5\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >H1</PROPERTY>\n    <PROPERTY name=\"name\">Host1</PROPERTY>\n    <PROPERTY name=\"serial-number\" >00c0ff26c2360000e2399f6101010000</PROPERTY>\n    <PROPERTY name=\"member-count\">2</PROPERTY>\n    <PROPERTY name=\"host-group\">00c0ff26c2360000223a9f6101010000</PROPERTY>\n    <PROPERTY name=\"group-key\">HG0</PROPERTY>\n  </OBJECT>\n<COMP G=\"5\" P=\"6\"/>\n <OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"6\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >I1</PROPERTY>\n    <PROPERTY name=\"nickname\" >FC-port2</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\" >Yes</PROPERTY>\n    <PROPERTY name=\"profile\" >HP-UX</PROPERTY>\n    <PROPERTY name=\"host-bus-type\" >FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" >10000090fa13870e</PROPERTY>\n    <PROPERTY name=\"host-id\" >00c0ff26c2360000e2399f6101010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H1</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\" >0</PROPERTY>\n  </OBJECT>\n<COMP G=\"5\" P=\"7\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"7\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >I0</PROPERTY>\n    <PROPERTY name=\"nickname\" >FC-port3</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\" >Yes</PROPERTY>\n    <PROPERTY name=\"profile\" >HP-UX</PROPERTY>\n    <PROPERTY name=\"host-bus-type\" >FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" >10000090fa13870f</PROPERTY>\n    <PROPERTY name=\"host-id\" >00c0ff26c2360000e2399f6101010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H1</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\" >0</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"8\"/>\n<OBJECT basetype=\"host-group\" name=\"host-group\" oid=\"8\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >HG2</PROPERTY>\n    <PROPERTY name=\"name\" >rac</PROPERTY>\n    <PROPERTY name=\"serial-number\">00c0ff26c4ea00008c81245b01010000</PROPERTY>\n    <PROPERTY name=\"member-count\" >2</PROPERTY>\n  </OBJECT>\n<COMP G=\"8\" P=\"9\"/>  <OBJECT basetype=\"host\" name=\"host\" oid=\"9\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >H4</PROPERTY>\n    <PROPERTY name=\"name\" >rac01</PROPERTY>\n    <PROPERTY name=\"serial-number\">00c0ff26c4ea0000057f245b01010000</PROPERTY>\n    <PROPERTY name=\"member-count\" >2</PROPERTY>\n    <PROPERTY name=\"host-group\" >00c0ff26c4ea00008c81245b01010000</PROPERTY>\n    <PROPERTY name=\"group-key\">HG2</PROPERTY>\n  </OBJECT>\n<COMP G=\"9\" P=\"10\"/>\n <OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"10\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >I6</PROPERTY>\n    <PROPERTY name=\"nickname\" >rac01_01</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\" >Yes</PROPERTY>\n    <PROPERTY name=\"profile\" >Standard</PROPERTY>\n    <PROPERTY name=\"host-bus-type\" >FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" >500143801875548e</PROPERTY>\n    <PROPERTY name=\"host-id\" >00c0ff26c4ea0000057f245b01010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H4</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\" >0</PROPERTY>\n  </OBJECT>\n<COMP G=\"9\" P=\"11\"/>\n <OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"11\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >I5</PROPERTY>\n    <PROPERTY name=\"nickname\" >rac01_02</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\" >Yes</PROPERTY>\n    <PROPERTY name=\"profile\" >Standard</PROPERTY>\n    <PROPERTY name=\"host-bus-type\" >FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" >5001438012097ed6</PROPERTY>\n    <PROPERTY name=\"host-id\" >00c0ff26c4ea0000057f245b01010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H4</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\" >0</PROPERTY>\n  </OBJECT>\n<COMP G=\"8\" P=\"12\"/>\n<OBJECT basetype=\"host\" name=\"host\" oid=\"12\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >H3</PROPERTY>\n    <PROPERTY name=\"name\" >rac02</PROPERTY>\n    <PROPERTY name=\"serial-number\">00c0ff26c4ea0000f77f245b01010000</PROPERTY>\n    <PROPERTY name=\"member-count\" >2</PROPERTY>\n    <PROPERTY name=\"host-group\">00c0ff26c4ea00008c81245b01010000</PROPERTY>\n    <PROPERTY name=\"group-key\">HG2</PROPERTY>\n  </OBJECT>\n<COMP G=\"12\" P=\"13\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"13\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >I3</PROPERTY>\n    <PROPERTY name=\"nickname\" >rac02_01</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\" >Yes</PROPERTY>\n    <PROPERTY name=\"profile\" >Standard</PROPERTY>\n    <PROPERTY name=\"host-bus-type\" >FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" >50014380029ceb58</PROPERTY>\n    <PROPERTY name=\"host-id\" >00c0ff26c4ea0000f77f245b01010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H3</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\" >0</PROPERTY>\n  </OBJECT>\n<COMP G=\"12\" P=\"14\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"14\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >I4</PROPERTY>\n    <PROPERTY name=\"nickname\" >rac02_02</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\" >No</PROPERTY>\n    <PROPERTY name=\"profile\" >Standard</PROPERTY>\n    <PROPERTY name=\"host-bus-type\" >FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" >500143801209031c</PROPERTY>\n    <PROPERTY name=\"host-id\" >00c0ff26c4ea0000f77f245b01010000</PROPERTY>\n    <PROPERTY name=\"host-key\" >H3</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\" >0</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"15\"/>\n <OBJECT basetype=\"host\" name=\"host\" oid=\"15\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >HU</PROPERTY>\n    <PROPERTY name=\"name\" >-nohost-</PROPERTY>\n    <PROPERTY name=\"serial-number\">NOHOST</PROPERTY>\n    <PROPERTY name=\"member-count\" >0</PROPERTY>\n    <PROPERTY name=\"host-group\" >UNGROUPEDHOSTS</PROPERTY>\n    <PROPERTY name=\"group-key\">HGU</PROPERTY>\n  </OBJECT>\n<COMP G=\"15\" P=\"16\"/>\n<OBJECT basetype=\"initiator\" name=\"initiator\" oid=\"16\" format=\"rows\">\n    <PROPERTY name=\"durable-id\" >I2</PROPERTY>\n    <PROPERTY name=\"nickname\" >FC-port1</PROPERTY>\n    <PROPERTY name=\"discovered\">No</PROPERTY>\n    <PROPERTY name=\"mapped\" >No</PROPERTY>\n    <PROPERTY name=\"profile\" >HP-UX</PROPERTY>\n    <PROPERTY name=\"host-bus-type\" >FC</PROPERTY>\n    <PROPERTY name=\"host-bus-type-numeric\">6</PROPERTY>\n    <PROPERTY name=\"id\" >21000024ff3dfed1</PROPERTY>\n    <PROPERTY name=\"host-id\" >NOHOST</PROPERTY>\n    <PROPERTY name=\"host-key\" >HU</PROPERTY>\n    <PROPERTY name=\"host-port-bits-a\">0</PROPERTY>\n    <PROPERTY name=\"host-port-bits-b\" >0</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\"\"\"\nLIST_HOST = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show host-groups\">\n<COMP G=\"1\" P=\"2\"/>  <OBJECT basetype=\"host\" name=\"host\" oid=\"2\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">HU</PROPERTY>\n    <PROPERTY name=\"name\">-nohost-</PROPERTY>\n    <PROPERTY name=\"serial-number\">NOHOST</PROPERTY>\n    <PROPERTY name=\"member-count\">0</PROPERTY>\n    <PROPERTY name=\"host-group\">UNGROUPEDHOSTS</PROPERTY>\n    <PROPERTY name=\"group-key\">HGU</PROPERTY>\n  </OBJECT>\n<COMP G=\"4\" P=\"5\"/>  <OBJECT basetype=\"host\" name=\"host\" oid=\"5\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">H1</PROPERTY>\n    <PROPERTY name=\"name\">Host1</PROPERTY>\n    <PROPERTY name=\"serial-number\">00c0ff26c2360000e2399f6101010000</PROPERTY>\n    <PROPERTY name=\"member-count\">2</PROPERTY>\n    <PROPERTY name=\"host-group\">00c0ff26c2360000223a9f6101010000</PROPERTY>\n    <PROPERTY name=\"group-key\">HG0</PROPERTY>\n  </OBJECT>\n<COMP G=\"8\" P=\"9\"/>  <OBJECT basetype=\"host\" name=\"host\" oid=\"9\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">H4</PROPERTY>\n    <PROPERTY name=\"name\">rac01</PROPERTY>\n    <PROPERTY name=\"serial-number\">00c0ff26c4ea0000057f245b01010000</PROPERTY>\n    <PROPERTY name=\"member-count\">2</PROPERTY>\n    <PROPERTY name=\"host-group\">00c0ff26c4ea00008c81245b01010000</PROPERTY>\n    <PROPERTY name=\"group-key\">HG2</PROPERTY>\n  </OBJECT>\n<COMP G=\"8\" P=\"12\"/>\n<OBJECT basetype=\"host\" name=\"host\" oid=\"12\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">H3</PROPERTY>\n    <PROPERTY name=\"name\">rac02</PROPERTY>\n    <PROPERTY name=\"serial-number\">00c0ff26c4ea0000f77f245b01010000</PROPERTY>\n    <PROPERTY name=\"member-count\">2</PROPERTY>\n    <PROPERTY name=\"host-group\">00c0ff26c4ea00008c81245b01010000</PROPERTY>\n    <PROPERTY name=\"group-key\">HG2</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"15\"/><OBJECT basetype=\"host\" name=\"host\" oid=\"15\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">HU</PROPERTY>\n    <PROPERTY name=\"name\">-nohost-</PROPERTY>\n    <PROPERTY name=\"serial-number\">NOHOST</PROPERTY>\n    <PROPERTY name=\"member-count\">0</PROPERTY>\n    <PROPERTY name=\"host-group\">UNGROUPEDHOSTS</PROPERTY>\n    <PROPERTY name=\"group-key\">HGU</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\"\"\"\nLIST_VOLUME_GROUPS = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show volume-groups\">\n<COMP G=\"0\" P=\"4\"/>\n<OBJECT basetype=\"volume-groups\" name=\"volume-groups\" oid=\"4\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">VG6</PROPERTY>\n    <PROPERTY name=\"group-name\" >VGroup1</PROPERTY>\n    <PROPERTY name=\"serial-number\">00c0ff26c4ea0000ab2b9f6101000000</PROPERTY>\n    <PROPERTY name=\"type\" >Volume</PROPERTY>\n    <PROPERTY name=\"type-numeric\">3672</PROPERTY>\n    <PROPERTY name=\"member-count\">2</PROPERTY>\n  </OBJECT>\n<COMP G=\"4\" P=\"5\"/>\n <OBJECT basetype=\"volumes\" name=\"volume\" oid=\"5\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">V0</PROPERTY>\n    <PROPERTY name=\"virtual-disk-name\" >A</PROPERTY>\n    <PROPERTY name=\"storage-pool-name\">A</PROPERTY>\n    <PROPERTY name=\"volume-name\">Vol0001</PROPERTY>\n    <PROPERTY name=\"size\" >100.9GB</PROPERTY>\n    <PROPERTY name=\"size-numeric\" >197255168</PROPERTY>\n    <PROPERTY name=\"total-size\">100.9GB</PROPERTY>\n    <PROPERTY name=\"total-size-numeric\" >197255168</PROPERTY>\n    <PROPERTY name=\"allocated-size\">0B</PROPERTY>\n    <PROPERTY name=\"allocated-size-numeric\">0</PROPERTY>\n    <PROPERTY name=\"storage-type\" >Virtual</PROPERTY>\n    <PROPERTY name=\"storage-type-numeric\" >1</PROPERTY>\n    <PROPERTY name=\"preferred-owner\">A</PROPERTY>\n    <PROPERTY name=\"preferred-owner-numeric\">1</PROPERTY>\n    <PROPERTY name=\"owner\" >A</PROPERTY>\n    <PROPERTY name=\"owner-numeric\">1</PROPERTY>\n    <PROPERTY name=\"serial-number\" >00c0ff26c4ea0000fa80546101000000</PROPERTY>\n    <PROPERTY name=\"write-policy\" >write-back</PROPERTY>\n    <PROPERTY name=\"write-policy-numeric\">1</PROPERTY>\n    <PROPERTY name=\"cache-optimization\" >standard</PROPERTY>\n    <PROPERTY name=\"cache-optimization-numeric\" >0</PROPERTY>\n    <PROPERTY name=\"read-ahead-size\" >Adaptive</PROPERTY>\n    <PROPERTY name=\"read-ahead-size-numeric\" >-1</PROPERTY>\n    <PROPERTY name=\"volume-type\" >base</PROPERTY>\n    <PROPERTY name=\"volume-type-numeric\">15</PROPERTY>\n    <PROPERTY name=\"volume-class\" >standard</PROPERTY>\n    <PROPERTY name=\"volume-class-numeric\" >0</PROPERTY>\n    <PROPERTY name=\"profile-preference\" >Standard</PROPERTY>\n    <PROPERTY name=\"profile-preference-numeric\">0</PROPERTY>\n    <PROPERTY name=\"snapshot\" >No</PROPERTY>\n    <PROPERTY name=\"volume-qualifier\">N/A</PROPERTY>\n    <PROPERTY name=\"volume-qualifier-numeric\" >0</PROPERTY>\n    <PROPERTY name=\"blocks\" >197255168</PROPERTY>\n    <PROPERTY name=\"capabilities\">dmse</PROPERTY>\n    <PROPERTY name=\"volume-parent\"></PROPERTY>\n    <PROPERTY name=\"snap-pool\"></PROPERTY>\n    <PROPERTY name=\"replication-set\" ></PROPERTY>\n    <PROPERTY name=\"attributes\" ></PROPERTY>\n    <PROPERTY name=\"wwn\" >600C0FF00026C4EAFA80546101000000</PROPERTY>\n    <PROPERTY name=\"progress\">0%</PROPERTY>\n    <PROPERTY name=\"progress-numeric\">0</PROPERTY>\n    <PROPERTY name=\"container-name\">A</PROPERTY>\n    <PROPERTY name=\"allowed-storage-tiers-numeric\" >7</PROPERTY>\n    <PROPERTY name=\"threshold-percent-of-pool\" >10.00 %</PROPERTY>\n    <PROPERTY name=\"reserved-size-in-pages\" >0</PROPERTY>\n    <PROPERTY name=\"allocate-reserved-pages-first\">Enabled</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"health-numeric\">0</PROPERTY>\n    <PROPERTY name=\"health-reason\"></PROPERTY>\n    <PROPERTY name=\"health-recommendation\" ></PROPERTY>\n    <PROPERTY name=\"volume-group\">00c0ff26c4ea0000ab2b9f6101000000</PROPERTY>\n    <PROPERTY name=\"group-key\" >VG6</PROPERTY>\n  </OBJECT>\n<COMP G=\"4\" P=\"6\"/>\n<OBJECT basetype=\"volumes\" name=\"volume\" oid=\"6\" format=\"rows\">\n    <PROPERTY name=\"durable-id\">V1</PROPERTY>\n    <PROPERTY name=\"virtual-disk-name\" >A</PROPERTY>\n    <PROPERTY name=\"storage-pool-name\">A</PROPERTY>\n    <PROPERTY name=\"volume-name\">Vol0002</PROPERTY>\n    <PROPERTY name=\"size\" >99.9GB</PROPERTY>\n    <PROPERTY name=\"size-numeric\" >195305472</PROPERTY>\n    <PROPERTY name=\"total-size\">99.9GB</PROPERTY>\n    <PROPERTY name=\"total-size-numeric\" >195305472</PROPERTY>\n    <PROPERTY name=\"allocated-size\">0B</PROPERTY>\n    <PROPERTY name=\"allocated-size-numeric\">0</PROPERTY>\n    <PROPERTY name=\"storage-type\" >Virtual</PROPERTY>\n    <PROPERTY name=\"storage-type-numeric\" >1</PROPERTY>\n    <PROPERTY name=\"preferred-owner\">A</PROPERTY>\n    <PROPERTY name=\"preferred-owner-numeric\">1</PROPERTY>\n    <PROPERTY name=\"owner\" >A</PROPERTY>\n    <PROPERTY name=\"owner-numeric\">1</PROPERTY>\n    <PROPERTY name=\"serial-number\" >00c0ff26c4ea00000a81546101000000</PROPERTY>\n    <PROPERTY name=\"write-policy\" >write-back</PROPERTY>\n    <PROPERTY name=\"write-policy-numeric\">1</PROPERTY>\n    <PROPERTY name=\"cache-optimization\" >standard</PROPERTY>\n    <PROPERTY name=\"cache-optimization-numeric\" >0</PROPERTY>\n    <PROPERTY name=\"read-ahead-size\" >Adaptive</PROPERTY>\n    <PROPERTY name=\"read-ahead-size-numeric\" >-1</PROPERTY>\n    <PROPERTY name=\"volume-type\" >base</PROPERTY>\n    <PROPERTY name=\"volume-type-numeric\">15</PROPERTY>\n    <PROPERTY name=\"volume-class\" >standard</PROPERTY>\n    <PROPERTY name=\"volume-class-numeric\" >0</PROPERTY>\n    <PROPERTY name=\"profile-preference\" >Standard</PROPERTY>\n    <PROPERTY name=\"profile-preference-numeric\">0</PROPERTY>\n    <PROPERTY name=\"snapshot\" >No</PROPERTY>\n    <PROPERTY name=\"volume-qualifier\">N/A</PROPERTY>\n    <PROPERTY name=\"volume-qualifier-numeric\" >0</PROPERTY>\n    <PROPERTY name=\"blocks\" >195305472</PROPERTY>\n    <PROPERTY name=\"capabilities\">dmse</PROPERTY>\n    <PROPERTY name=\"volume-parent\"></PROPERTY>\n    <PROPERTY name=\"snap-pool\"></PROPERTY>\n    <PROPERTY name=\"replication-set\" ></PROPERTY>\n    <PROPERTY name=\"attributes\" ></PROPERTY>\n    <PROPERTY name=\"wwn\" >600C0FF00026C4EA0A81546101000000</PROPERTY>\n    <PROPERTY name=\"progress\">0%</PROPERTY>\n    <PROPERTY name=\"progress-numeric\">0</PROPERTY>\n    <PROPERTY name=\"container-name\">A</PROPERTY>\n    <PROPERTY name=\"allowed-storage-tiers-numeric\" >7</PROPERTY>\n    <PROPERTY name=\"threshold-percent-of-pool\" >10.00 %</PROPERTY>\n    <PROPERTY name=\"reserved-size-in-pages\" >0</PROPERTY>\n    <PROPERTY name=\"allocate-reserved-pages-first\">Enabled</PROPERTY>\n    <PROPERTY name=\"health\">OK</PROPERTY>\n    <PROPERTY name=\"health-numeric\">0</PROPERTY>\n    <PROPERTY name=\"health-reason\"></PROPERTY>\n    <PROPERTY name=\"health-recommendation\" ></PROPERTY>\n    <PROPERTY name=\"volume-group\">00c0ff26c4ea0000ab2b9f6101000000</PROPERTY>\n    <PROPERTY name=\"group-key\" >VG6</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\"\"\"\n\nLIST_MAPS_ALL = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<RESPONSE VERSION=\"L100\" REQUEST=\"show maps\">\n<COMP G=\"0\" P=\"1\"/>\n <OBJECT basetype=\"volume-group-view\">\n    <PROPERTY name=\"durable-id\">VG5</PROPERTY>\n    <PROPERTY name=\"serial-number\">00c0ff26c4ea0000e22b9f6101000000</PROPERTY>\n    <PROPERTY name=\"group-name\">VGroup2.*</PROPERTY>\n  </OBJECT>\n<COMP G=\"1\" P=\"2\"/>\n<OBJECT basetype=\"volume-group-view-mappings\" >\n    <PROPERTY name=\"durable-id\" >VG5_I3</PROPERTY>\n    <PROPERTY name=\"parent-id\">VG5</PROPERTY>\n    <PROPERTY name=\"mapped-id\">I3</PROPERTY>\n    <PROPERTY name=\"ports\">1,2</PROPERTY>\n    <PROPERTY name=\"access\">read-write</PROPERTY>\n    <PROPERTY name=\"access-numeric\">3</PROPERTY>\n    <PROPERTY name=\"initiator-id\">50014380029ceb58</PROPERTY>\n    <PROPERTY name=\"nickname\">rac02_01</PROPERTY>\n    <PROPERTY name=\"host-profile\">Standard</PROPERTY>\n  </OBJECT>\n<COMP G=\"2\" P=\"3\"/>\n<OBJECT basetype=\"volume-group-view-mappings-luns\">\n    <PROPERTY name=\"volume-name\">Vol0003</PROPERTY>\n    <PROPERTY name=\"volume-serial\">00c0ff26c4ea000082537a6101000000</PROPERTY>\n    <PROPERTY name=\"lun\">0</PROPERTY>\n  </OBJECT>\n<COMP G=\"2\" P=\"4\"/>\n<OBJECT basetype=\"volume-group-view-mappings-luns\">\n    <PROPERTY name=\"volume-name\" >Vol0004</PROPERTY>\n    <PROPERTY name=\"volume-serial\" >00c0ff26c4ea000085537a6101000000</PROPERTY>\n    <PROPERTY name=\"lun\">2</PROPERTY>\n  </OBJECT>\n<COMP G=\"1\" P=\"5\"/>\n <OBJECT basetype=\"volume-view\">\n    <PROPERTY name=\"durable-id\">V3</PROPERTY>\n    <PROPERTY name=\"volume-serial\" >00c0ff26c4ea000085537a6101000000</PROPERTY>\n    <PROPERTY name=\"volume-name\">Vol0004</PROPERTY>\n  </OBJECT>\n<COMP G=\"5\" P=\"6\"/>\n <OBJECT basetype=\"volume-view-mappings\" >\n    <PROPERTY name=\"durable-id\" >V3_I0</PROPERTY>\n    <PROPERTY name=\"parent-id\">V3</PROPERTY>\n    <PROPERTY name=\"mapped-id\" >I0</PROPERTY>\n    <PROPERTY name=\"ports\">3,4</PROPERTY>\n    <PROPERTY name=\"lun\"  >0</PROPERTY>\n    <PROPERTY name=\"access\">read-write</PROPERTY>\n    <PROPERTY name=\"access-numeric\">3</PROPERTY>\n    <PROPERTY name=\"identifier\" >10000090fa13870f</PROPERTY>\n    <PROPERTY name=\"nickname\" >FC-port3</PROPERTY>\n    <PROPERTY name=\"host-profile\">HPUX</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"7\"/>\n  <OBJECT basetype=\"volume-view\">\n    <PROPERTY name=\"durable-id\">V0</PROPERTY>\n    <PROPERTY name=\"volume-serial\">00c0ff26c4ea0000fa80546101000000</PROPERTY>\n    <PROPERTY name=\"volume-name\" >Vol0001</PROPERTY>\n  </OBJECT>\n<COMP G=\"7\" P=\"8\"/>\n<OBJECT basetype=\"volume-view-mappings\">\n    <PROPERTY name=\"durable-id\" key=\"true\">V0_I1</PROPERTY>\n    <PROPERTY name=\"parent-id\">V0</PROPERTY>\n    <PROPERTY name=\"mapped-id\">I1</PROPERTY>\n    <PROPERTY name=\"ports\">1,2</PROPERTY>\n    <PROPERTY name=\"lun\">0</PROPERTY>\n    <PROPERTY name=\"access\">read-write</PROPERTY>\n    <PROPERTY name=\"access-numeric\">3</PROPERTY>\n    <PROPERTY name=\"identifier\">10000090fa13870e</PROPERTY>\n    <PROPERTY name=\"nickname\">FC-port2</PROPERTY>\n    <PROPERTY name=\"host-profile\">HPUX</PROPERTY>\n  </OBJECT>\n<COMP G=\"0\" P=\"9\"/>\n<OBJECT basetype=\"volume-view\">\n    <PROPERTY name=\"durable-id\" >V1</PROPERTY>\n    <PROPERTY name=\"volume-serial\">00c0ff26c4ea00000a81546101000000</PROPERTY>\n    <PROPERTY name=\"volume-name\">Vol0002</PROPERTY>\n  </OBJECT>\n<COMP G=\"9\" P=\"10\"/>\n<OBJECT basetype=\"volume-view-mappings\">\n    <PROPERTY name=\"durable-id\">V1_H4</PROPERTY>\n    <PROPERTY name=\"parent-id\" >V1</PROPERTY>\n    <PROPERTY name=\"mapped-id\">H4</PROPERTY>\n    <PROPERTY name=\"ports\">1,2</PROPERTY>\n    <PROPERTY name=\"lun\">0</PROPERTY>\n    <PROPERTY name=\"access\">read-write</PROPERTY>\n    <PROPERTY name=\"access-numeric\">3</PROPERTY>\n    <PROPERTY name=\"identifier\">00c0ff26c4ea0000057f245b01010000</PROPERTY>\n    <PROPERTY name=\"nickname\">rac01.*</PROPERTY>\n    <PROPERTY name=\"host-profile\">Standard</PROPERTY>\n  </OBJECT>\n</RESPONSE>\n\"\"\"\nerror_result = [\n    {\n        'alert_id': 'A891',\n        'alert_name': '557',\n        'category': 'Fault',\n        'description': 'Management',\n        'location': 'An Enclosure Management Processor(EMP)',\n        'match_key': 'd0317252aed04fd8b68e79d7eab08277',\n        'occur_time': 1636704980000,\n        'resource_type': '557',\n        'sequence_number': 'A891',\n        'severity': 'ERROR',\n        'type': 'EquipmentAlarm'\n    }\n]\n\nvolume_result = [\n    {\n        'name': 'Vol0001',\n        'storage_id': 'kkk',\n        'description': 'Vol0001',\n        'status': 'normal',\n        'native_volume_id': 'V1',\n        'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000',\n        'wwn': '600C0FF00026C4EAFA80546101000000',\n        'type': 'base',\n        'total_capacity': 107266808217,\n        'free_capacit': 107266808217,\n        'used_capacity': 0,\n        'blocks': 195305472,\n        'compressed': True,\n        'deduplicated': True\n    }, {\n        'name': 'Vol0002',\n        'storage_id': 'kkk',\n        'description': 'Vol0002',\n        'status': 'normal',\n        'native_volume_id': 'V2',\n        'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000',\n        'wwn': '600C0FF00026C4EA0A81546101000000',\n        'type': 'base',\n        'total_capacity': 107266808217,\n        'free_capacit': 107266808217,\n        'used_capacity': 0,\n        'blocks': 195305472,\n        'compressed': True,\n        'deduplicated': True\n    }\n]\n\npools_result = [\n    {\n        'name': 'A',\n        'storage_id': 'kkk',\n        'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000',\n        'status': 'normal',\n        'storage_type': 'block',\n        'total_capacity': 1285054214963,\n        'subscribed_capacity': 390610944,\n        'used_capacity': 214533616434,\n        'free_capacity': 1070520598529\n    }\n]\n\nports_result = [\n    {\n        'native_port_id': 'hostport_A1',\n        'name': 'A1', 'type': 'fc',\n        'connection_status': 'disconnected',\n        'health_status': 'abnormal',\n        'location': 'A1_FC',\n        'storage_id': 'kkk',\n        'speed': 8589934592.0,\n        'max_speed': 8589934592.0,\n        'mac_address': None,\n        'ipv4': None,\n        'wwn': '207000c0ff26dcb0'\n    }, {\n        'native_port_id': 'hostport_A2',\n        'name': 'A2',\n        'type': 'fc',\n        'connection_status': 'disconnected',\n        'health_status': 'abnormal',\n        'location': 'A2_FC',\n        'storage_id': 'kkk',\n        'speed': 8589934592.0,\n        'max_speed': 8589934592.0,\n        'mac_address': None,\n        'ipv4': None,\n        'wwn': '217000c0ff26dcb0'\n    }, {\n        'native_port_id': 'hostport_A3',\n        'name': 'A3',\n        'type': 'eth',\n        'connection_status': 'disconnected',\n        'health_status': 'abnormal',\n        'location': 'A3_ISCSI',\n        'storage_id': 'kkk',\n        'speed': 0,\n        'max_speed': 0,\n        'mac_address': '00:C0:FF:35:BD:64',\n        'ipv4': '0.0.0.0',\n        'wwn': None\n    }, {\n        'native_port_id': 'hostport_A4',\n        'name': 'A4',\n        'type': 'eth',\n        'connection_status': 'disconnected',\n        'health_status': 'abnormal',\n        'location': 'A4_ISCSI',\n        'storage_id': 'kkk',\n        'speed': 0,\n        'max_speed': 0,\n        'mac_address': '00:C0:FF:35:BD:65',\n        'ipv4': '0.0.0.0',\n        'wwn': None\n    }, {\n        'native_port_id': 'hostport_B1',\n        'name': 'B1',\n        'type': 'fc',\n        'connection_status': 'disconnected',\n        'health_status': 'abnormal',\n        'location': 'B1_FC',\n        'storage_id': 'kkk',\n        'speed': 8589934592.0,\n        'max_speed': 8589934592.0,\n        'mac_address': None,\n        'ipv4': None,\n        'wwn': '247000c0ff26dcb0'\n    }, {\n        'native_port_id': 'hostport_B2',\n        'name': 'B2',\n        'type': 'fc',\n        'connection_status': 'disconnected',\n        'health_status': 'abnormal',\n        'location': 'B2_FC',\n        'storage_id': 'kkk',\n        'speed': 8589934592.0,\n        'max_speed': 8589934592.0,\n        'mac_address': None,\n        'ipv4': None,\n        'wwn': '257000c0ff26dcb0'\n    }, {\n        'native_port_id': 'hostport_B3',\n        'name': 'B3',\n        'type': 'eth',\n        'connection_status': 'disconnected',\n        'health_status': 'abnormal',\n        'location': 'B3_ISCSI', 'storage_id': 'kkk',\n        'speed': 0,\n        'max_speed': 0,\n        'mac_address': '00:C0:FF:35:BA:BC',\n        'ipv4': '0.0.0.0',\n        'wwn': None\n    }, {\n        'native_port_id': 'hostport_B4',\n        'name': 'B4',\n        'type': 'eth',\n        'connection_status': 'disconnected',\n        'health_status': 'abnormal',\n        'location': 'B4_ISCSI',\n        'storage_id': 'kkk',\n        'speed': 0,\n        'max_speed': 0,\n        'mac_address': '00:C0:FF:35:BA:BD',\n        'ipv4': '0.0.0.0',\n        'wwn': None\n    }]\n\ndisks_result = [\n    {\n        'native_disk_id': '1.1',\n        'name': '1.1',\n        'physical_type': 'sas',\n        'status': 'normal',\n        'storage_id': 'kkk',\n        'native_disk_group_id': 'dgA01',\n        'serial_number': '6SL9CD560000N51404EF',\n        'manufacturer': 'SEAGATE',\n        'model': 'ST3600057SS',\n        'speed': 15000,\n        'capacity': 644352468582,\n        'health_score': 'normal'\n    }, {\n        'native_disk_id': '1.2',\n        'name': '1.2',\n        'physical_type': 'sas',\n        'status': 'normal',\n        'storage_id': 'kkk',\n        'native_disk_group_id': 'dgA01',\n        'serial_number': '6SL7X4RE0000B42601SF',\n        'manufacturer': 'SEAGATE',\n        'model': 'ST3600057SS',\n        'speed': 15000,\n        'capacity': 644352468582,\n        'health_score': 'normal'\n    }, {\n        'native_disk_id': '1.3',\n        'name': '1.3',\n        'physical_type': 'sas',\n        'status': 'normal',\n        'storage_id': 'kkk',\n        'native_disk_group_id': 'dgA01',\n        'serial_number': '6SL9QR5T0000N52120SK',\n        'manufacturer': 'SEAGATE',\n        'model': 'ST3600057SS',\n        'speed': 15000, 'capacity': 644352468582,\n        'health_score': 'normal'\n    }, {\n        'native_disk_id': '1.4',\n        'name': '1.4',\n        'physical_type': 'sas',\n        'status': 'normal',\n        'storage_id': 'kkk',\n        'native_disk_group_id': 'dgA01',\n        'serial_number': '3SL0WT7G00009051YBTF',\n        'manufacturer': 'SEAGATE',\n        'model': 'ST3600057SS',\n        'speed': 15000,\n        'capacity': 644352468582,\n        'health_score': 'normal'\n    }\n]\n\nsystem_info = {\n    'name': 'msa2040',\n    'vendor': 'HPE',\n    'model': 'MSA 2040 SAN',\n    'status': 'normal',\n    'serial_number': '00C0FF26DCB0',\n    'firmware_version': 'GL210R004',\n    'location': 'Uninitialized Location',\n    'raw_capacity': 2577409874328,\n    'total_capacity': 1285054214963,\n    'used_capacity': 214533616434,\n    'free_capacity': 1070520598529\n}\n\ncontroller_result = [\n    {\n        'native_controller_id': 'A',\n        'name': 'controller_a',\n        'storage_id': 'kkk',\n        'status': 'normal',\n        'location': 'Top',\n        'soft_version': 'GLS210R04-01',\n        'cpu_info': 'Gladden',\n        'cpu_count': 1,\n        'memory_size': 6442450944\n    },\n    {\n        'native_controller_id': 'B',\n        'name': 'controller_b',\n        'storage_id': 'kkk',\n        'status': 'normal',\n        'location': 'Bottom',\n        'soft_version': 'GLS210R04-01',\n        'cpu_info': 'Gladden',\n        'cpu_count': 1,\n        'memory_size': 6442450944\n    }\n]\n\nlist_storage_host_initiators = [\n    {\n        'name': 'FC-port1',\n        'type': 'fc',\n        'alias': 'I2',\n        'storage_id': 'kkk',\n        'native_storage_host_initiator_id': 'I2',\n        'wwn': '21000024ff3dfed1',\n        'status': 'online',\n        'native_storage_host_id': 'NOHOST'\n    },\n    {\n        'name': 'FC-port2',\n        'type': 'fc',\n        'alias': 'I1',\n        'storage_id': 'kkk',\n        'native_storage_host_initiator_id': 'I1',\n        'wwn': '10000090fa13870e',\n        'status': 'online',\n        'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'\n    },\n    {\n        'name': 'FC-port3',\n        'type': 'fc',\n        'alias': 'I0',\n        'storage_id': 'kkk',\n        'native_storage_host_initiator_id': 'I0',\n        'wwn': '10000090fa13870f',\n        'status': 'online',\n        'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'\n    },\n    {\n        'name': 'rac01_01',\n        'type': 'fc',\n        'alias': 'I6',\n        'storage_id': 'kkk',\n        'native_storage_host_initiator_id': 'I6',\n        'wwn': '500143801875548e',\n        'status': 'online',\n        'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000'\n    },\n    {\n        'name': 'rac01_02',\n        'type': 'fc',\n        'alias': 'I5',\n        'storage_id': 'kkk',\n        'native_storage_host_initiator_id': 'I5',\n        'wwn': '5001438012097ed6',\n        'status': 'online',\n        'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000'\n    },\n    {\n        'name': 'rac02_01',\n        'type': 'fc',\n        'alias': 'I3',\n        'storage_id': 'kkk',\n        'native_storage_host_initiator_id': 'I3',\n        'wwn': '50014380029ceb58',\n        'status': 'online',\n        'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000'\n    },\n    {\n        'name': 'rac02_02',\n        'type': 'fc',\n        'alias': 'I4',\n        'storage_id': 'kkk',\n        'native_storage_host_initiator_id': 'I4',\n        'wwn': '500143801209031c',\n        'status': 'online',\n        'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000'\n    },\n    {\n        'name': 'FC-port1',\n        'type': 'fc',\n        'alias': 'I2',\n        'storage_id': 'kkk',\n        'native_storage_host_initiator_id': 'I2',\n        'wwn': '21000024ff3dfed1',\n        'status': 'online',\n        'native_storage_host_id': 'NOHOST'\n    }\n]\n\nlist_storage_hosts = [\n    {\n        'name': 'Host1',\n        'description': 'H1',\n        'storage_id': 'kkk',\n        'native_storage_host_id': '00c0ff26c2360000e2399f6101010000',\n        'os_type': 'HP-UX',\n        'status': 'normal'\n    },\n    {\n        'name': 'rac01',\n        'description': 'H4',\n        'storage_id': 'kkk',\n        'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000',\n        'os_type': 'HP-UX',\n        'status': 'normal'\n    },\n    {\n        'name': 'rac02',\n        'description': 'H3',\n        'storage_id': 'kkk',\n        'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000',\n        'os_type': 'HP-UX',\n        'status': 'normal'\n    }\n]\n\n\nlist_storage_host_groups = {\n    'storage_host_groups': [\n        {\n            'name': 'HostGroup1',\n            'description': 'HG0',\n            'storage_id': 'kkk',\n            'native_storage_host_group_id': '00c0ff26c2360000223a9f6101010000',\n            'storage_hosts': '00c0ff26c2360000e2399f6101010000'\n        }, {\n            'name': 'rac',\n            'description': 'HG2',\n            'storage_id': 'kkk',\n            'native_storage_host_group_id': '00c0ff26c4ea00008c81245b01010000',\n            'storage_hosts': '00c0ff26c4ea0000057f245b01010000,'\n                             '00c0ff26c4ea0000f77f245b01010000'\n        }\n    ],\n    'storage_host_grp_host_rels': [\n        {'storage_id': 'kkk',\n         'native_storage_host_group_id': '00c0ff26c2360000223a9f6101010000',\n         'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'\n         },\n        {\n            'storage_id': 'kkk',\n            'native_storage_host_group_id': '00c0ff26c4ea00008c81245b01010000',\n            'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000'\n        },\n        {\n            'storage_id': 'kkk',\n            'native_storage_host_group_id': '00c0ff26c4ea00008c81245b01010000',\n            'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000'\n        }\n    ]\n}\n\nlist_volume_groups = {\n    'volume_groups':\n        [\n            {\n                'name': 'VGroup1',\n                'description': 'VG6',\n                'storage_id': 'kkk',\n                'native_volume_group_id': 'VG6',\n                'volumes': 'V0,V1'\n            }\n        ],\n    'vol_grp_vol_rels':\n        [\n            {\n                'storage_id': 'kkk',\n                'native_volume_group_id': 'VG6',\n                'native_volume_id': 'V0'\n            },\n            {\n                'storage_id': 'kkk',\n                'native_volume_group_id': 'VG6',\n                'native_volume_id': 'V1'\n            }\n        ]\n}\n\nlist_masking_views = [\n    {\n        'name': 'FC-port3',\n        'description': 'FC-port3',\n        'storage_id': 'kkk',\n        'native_masking_view_id': 'V3_I0V3',\n        'native_port_group_id': 'port_group_A3B3A4B4',\n        'native_volume_id': 'V3',\n        'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'\n    },\n    {\n        'name': 'FC-port2',\n        'description': 'FC-port2',\n        'storage_id': 'kkk',\n        'native_masking_view_id': 'V0_I1V0',\n        'native_port_group_id': 'port_group_A1B1A2B2',\n        'native_volume_id': 'V0',\n        'native_storage_host_id': '00c0ff26c2360000e2399f6101010000'\n    },\n    {\n        'name': 'rac01.*',\n        'description': 'rac01.*',\n        'storage_id': 'kkk',\n        'native_masking_view_id': 'V1_H4V1',\n        'native_port_group_id': 'port_group_A1B1A2B2',\n        'native_volume_id': 'V1',\n        'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000'\n    },\n    {\n        'name': 'rac02_01',\n        'description': 'rac02_01',\n        'storage_id': 'kkk',\n        'native_masking_view_id': 'VG5_I3VG5',\n        'native_port_group_id': 'port_group_A1B1A2B2',\n        'native_volume_group_id': 'VG5',\n        'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000'\n    }\n]\n"
  },
  {
    "path": "delfin/tests/unit/drivers/hpe/hpe_msa/test_hpe_msastor.py",
    "content": "import sys\nimport paramiko\n\nfrom delfin import context\nfrom unittest import TestCase, mock\nfrom delfin.tests.unit.drivers.hpe.hpe_msa import test_constans\nfrom delfin.drivers.utils.ssh_client import SSHPool\nfrom delfin.drivers.hpe.hpe_msa.ssh_handler import SSHHandler\nfrom delfin.drivers.hpe.hpe_msa.hpe_msastor import HpeMsaStorDriver\n\nsys.modules['delfin.cryptor'] = mock.Mock()\n\nACCESS_INFO = {\n    \"storage_id\": \"kkk\",\n    \"ssh\": {\n        \"host\": \"110.143.132.231\",\n        \"port\": 22,\n        \"username\": \"user\",\n        \"password\": \"pass\",\n        \"pub_key\": \"ddddddddddddddddddddddddd\"\n    }\n}\n\n\nclass TestHpeMsaStorageDriver(TestCase):\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_ports(self, mock_ssh_get, mock_control):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [test_constans.LIST_PORTS]\n        ports = HpeMsaStorDriver(**ACCESS_INFO).list_ports(context)\n        self.assertEqual(ports, test_constans.ports_result)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_disks(self, mock_ssh_get, mock_control):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [test_constans.LIST_DISKS]\n        disks = HpeMsaStorDriver(**ACCESS_INFO).list_disks(context)\n        self.assertEqual(disks, test_constans.disks_result)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_controllers(self, mock_ssh_get, mock_control):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [test_constans.LIST_CONTROLLERS]\n        controller = HpeMsaStorDriver(**ACCESS_INFO).\\\n            list_controllers(context)\n        self.assertEqual(controller, test_constans.controller_result)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_volumes(self, mock_ssh_get, mock_control):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [test_constans.LIST_VOLUMES,\n                                    test_constans.LIST_POOLS]\n        volumes = HpeMsaStorDriver(**ACCESS_INFO).list_volumes(context)\n        self.assertEqual(volumes, test_constans.volume_result)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    @mock.patch.object(SSHHandler, 'list_storage_pools')\n    @mock.patch.object(SSHHandler, 'list_storage_disks')\n    @mock.patch.object(SSHHandler, 'list_storage_volume')\n    def test_list_storage(self, mock_system, mock_ssh_get,\n                          mock_pools, mock_disks, mock_volume):\n        mock_volume.side_effect = [test_constans.LIST_SYSTEM,\n                                   test_constans.LIST_VISION]\n        mock_disks.return_value = {paramiko.SSHClient()}\n        mock_pools.side_effect = [test_constans.pools_result]\n        mock_ssh_get.side_effect = [test_constans.disks_result]\n        mock_system.side_effect = [test_constans.volume_result]\n        system = HpeMsaStorDriver(**ACCESS_INFO).get_storage(context)\n        self.assertEqual(system, test_constans.system_info)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    @mock.patch.object(SSHHandler, 'list_storage_volume')\n    def test_list_storage_pools(self, mock_ssh_get, mock_control,\n                                mock_volume):\n        mock_ssh_get.return_value = test_constans.volume_result\n        mock_control.side_effect = {paramiko.SSHClient()}\n        mock_volume.side_effect = [test_constans.LIST_POOLS]\n        pools = HpeMsaStorDriver(**ACCESS_INFO).list_storage_pools(context)\n        self.assertEqual(pools, test_constans.pools_result)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_alerts(self, mock_ssh_get, mock_control):\n        query_para = None\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [test_constans.LIST_ERROR]\n        alerts = HpeMsaStorDriver(**ACCESS_INFO).list_alerts(query_para)\n        self.assertEqual(alerts, test_constans.error_result)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_storage_host_initiators(self, mock_ssh_get, mock_control):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [test_constans.LIST_HOST_INITIATORS]\n        list_storage_host_initiators = HpeMsaStorDriver(**ACCESS_INFO)\\\n            .list_storage_host_initiators(context)\n        self.assertEqual(list_storage_host_initiators[0], test_constans\n                         .list_storage_host_initiators[0])\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_storage_hosts(self, mock_ssh_get, mock_control):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [test_constans.LIST_HOST]\n        list_storage_hosts = HpeMsaStorDriver(**ACCESS_INFO) \\\n            .list_storage_hosts(context)\n        self.assertEqual(list_storage_hosts, test_constans\n                         .list_storage_hosts)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_storage_host_groups(self, mock_ssh_get, mock_control):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [test_constans.LIST_HOST_GROUPS]\n        list_storage_host_groups = HpeMsaStorDriver(**ACCESS_INFO) \\\n            .list_storage_host_groups(context)\n        self.assertEqual(list_storage_host_groups, test_constans\n                         .list_storage_host_groups)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_volume_groups(self, mock_ssh_get, mock_control):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [test_constans.LIST_VOLUME_GROUPS]\n        list_volume_groups = HpeMsaStorDriver(**ACCESS_INFO) \\\n            .list_volume_groups(context)\n        self.assertEqual(list_volume_groups, test_constans\n                         .list_volume_groups)\n\n    @mock.patch.object(SSHPool, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    @mock.patch.object(SSHHandler, 'list_storage_ports')\n    @mock.patch.object(SSHHandler, 'list_storage_hosts')\n    @mock.patch.object(SSHHandler, 'list_storage_host_initiators')\n    def test_list_masking_view(self, mock_ssh_get, mock_control,\n                               mock_port, mock_hosts, mock_initiators):\n        mock_ssh_get.side_effect = [test_constans.list_storage_host_initiators]\n        mock_control.side_effect = [test_constans.list_storage_hosts]\n        mock_port.side_effect = [test_constans.ports_result]\n        mock_hosts.return_value = {paramiko.SSHClient()}\n        mock_initiators.return_value = test_constans.LIST_MAPS_ALL\n        list_masking_views = HpeMsaStorDriver(**ACCESS_INFO) \\\n            .list_masking_views(context)\n        self.assertEqual(list_masking_views, test_constans\n                         .list_masking_views)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/huawei/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/huawei/oceanstor/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/huawei/oceanstor/test_alert_handler.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom oslo_utils import importutils\n\nfrom delfin import exception\nfrom delfin.common import constants\n\n\nclass AlertHandlerTestCase(unittest.TestCase):\n    ALERT_HANDLER_CLASS = 'delfin.drivers.huawei.oceanstor.alert_handler' \\\n                          '.AlertHandler'\n\n    def _get_alert_handler(self):\n        alert_handler_class = importutils.import_class(\n            self.ALERT_HANDLER_CLASS)\n        alert_handler = alert_handler_class()\n        return alert_handler\n\n    def _get_fake_alert_info(self):\n        alert_info = {\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.2.0': 'location=location1',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.4.0': 'Trap Test Alarm',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.5.0': '2',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.6.0': '1',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.7.0': '4294967294',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.9.0': '4294967295',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.10.0': 'This is just for'\n                                                   ' testing.Please '\n                                                   'ignore it',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.11.0': '1',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.3.0': 'Sample advice',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.1.0': 'Array',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.8.0': '2020-6-25,1:42:26.0'\n        }\n\n        return alert_info\n\n    def _get_fake_incomplete_alert_info(self):\n        # hwIsmReportingAlarmFaultCategory is missing here\n        alert_info = {\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.2.0': 'location=location1',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.4.0': 'Trap Test Alarm',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.5.0': '2',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.6.0': '1',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.7.0': '4294967294',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.9.0': '4294967295',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.10.0': 'This is just '\n                                                   'for testing.'\n                                                   'Please '\n                                                   'ignore it',\n            '1.3.6.1.4.1.2011.2.91.10.3.1.1.8': '2020-6-25,1:42:26.0'\n        }\n\n        return alert_info\n\n    def _get_fake_queried_alert(self):\n        alert_info = [{\n            'eventID': 1234,\n            'name': 'sample-event',\n            'level': 2,\n            'eventType': 0,\n            'sequence': '1234',\n            'startTime': 13200000,\n            'description': 'This is just for  testing.Please ignore it',\n            'suggestion': 'Sample advice',\n            'location': 'location1'\n        }]\n\n        return alert_info\n\n    def test_parse_alert_with_all_necessary_info(self):\n        \"\"\" Success flow with all necessary parameters\"\"\"\n        alert_handler_inst = self._get_alert_handler()\n        alert = self._get_fake_alert_info()\n\n        expected_alert_model = {\n            'alert_id': alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.7.0'],\n            'alert_name': alert[\n                '1.3.6.1.4.1.2011.2.91.10.3.1.1.4.0'],\n            'severity': constants.Severity.CRITICAL,\n            'category': constants.Category.FAULT,\n            'type': constants.EventType.EQUIPMENT_ALARM,\n            'sequence_number': alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.9.0'],\n            'description': alert[\n                '1.3.6.1.4.1.2011.2.91.10.3.1.1.10.0'],\n            'recovery_advice': alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.3.0'],\n            'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n            'location': 'Node code='\n                        + alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.1.0']\n                        + ',' + alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.2.0']\n        }\n        context = {}\n        alert_model = alert_handler_inst.parse_alert(context, alert)\n        # Equating occur_time so that complete model can be validated\n        expected_alert_model['occur_time'] = alert_model['occur_time']\n\n        # Verify that all other fields are matching\n        self.assertDictEqual(expected_alert_model, alert_model)\n\n    def test_parse_alert_without_mandatory_info(self):\n        \"\"\" Error flow with some mandatory parameters missing\"\"\"\n        alert_handler_inst = self._get_alert_handler()\n        context = {}\n        alert = self._get_fake_incomplete_alert_info()\n        self.assertRaisesRegex(exception.InvalidInput,\n                               \"Mandatory information \"\n                               \"hwIsmReportingAlarmNodeCode missing in alert \"\n                               \"message.\",\n                               alert_handler_inst.parse_alert, context, alert)\n\n    def test_parse_queried_alerts_inside_range(self):\n        \"\"\" Success flow with all necessary parameters\"\"\"\n        alert_handler_inst = self._get_alert_handler()\n        alert = self._get_fake_queried_alert()\n\n        expected_alert_model = [{\n            'alert_id': alert[0]['eventID'],\n            'alert_name': alert[0]['name'],\n            'severity': constants.Severity.INFORMATIONAL,\n            'category': constants.Category.EVENT,\n            'type': constants.EventType.NOT_SPECIFIED,\n            'sequence_number': alert[0]['sequence'],\n            'description': alert[0]['description'],\n            'recovery_advice': alert[0]['suggestion'],\n            'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n            'location': alert[0]['location'],\n            'occur_time': alert[0]['startTime'] * 1000\n        }]\n\n        # With both valid begin_time and end_time\n        query_para = {'begin_time': 13100000, 'end_time': 13300000}\n        alert_model = alert_handler_inst.parse_queried_alerts(alert,\n                                                              query_para)\n        # Verify that all other fields are matching\n        self.assertDictEqual(expected_alert_model[0], alert_model[0])\n\n        # With only valid begin_time\n        query_para = {'begin_time': 13100000}\n        alert_model = alert_handler_inst.parse_queried_alerts(alert,\n                                                              query_para)\n        # Verify that all other fields are matching\n        self.assertDictEqual(expected_alert_model[0], alert_model[0])\n\n        # With only valid end_time\n        query_para = {'end_time': 13300000}\n        alert_model = alert_handler_inst.parse_queried_alerts(alert,\n                                                              query_para)\n        # Verify that all other fields are matching\n        self.assertDictEqual(expected_alert_model[0], alert_model[0])\n\n    def test_parse_queried_alerts_outside_range(self):\n        \"\"\" Success flow with all necessary parameters\"\"\"\n        alert_handler_inst = self._get_alert_handler()\n        alert = self._get_fake_queried_alert()\n\n        query_para = {'begin_time': 13300000, 'end_time': 13400000}\n        alert_model = alert_handler_inst.parse_queried_alerts(alert,\n                                                              query_para)\n\n        # Verify that when input alert is out of begin and end time,\n        # it is skipped\n        self.assertEqual(len(alert_model), 0)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/huawei/oceanstor/test_oceanstor.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom unittest import TestCase, mock\nfrom delfin import exception\nfrom delfin import context\nfrom delfin.common import config # noqa\nfrom delfin.drivers.huawei.oceanstor.oceanstor import OceanStorDriver, consts\nfrom delfin.drivers.huawei.oceanstor.rest_client import RestClient\nfrom requests import Session\n\n\nclass Request:\n    def __init__(self):\n        self.environ = {'delfin.context': context.RequestContext()}\n        pass\n\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"dell_emc\",\n    \"model\": \"vmax\",\n    \"rest\": {\n        \"host\": \"10.0.0.1\",\n        \"port\": \"8443\",\n        \"username\": \"user\",\n        \"password\": \"cGFzc3dvcmQ=\",\n    },\n    \"extra_attributes\": {\n        \"array_id\": \"00112233\"\n    }\n}\n\n\ndef create_driver():\n    kwargs = ACCESS_INFO\n\n    m = mock.MagicMock()\n    with mock.patch.object(Session, 'post', return_value=m):\n        m.raise_for_status.return_value = None\n        m.json.return_value = {\n            'data': {\n                'deviceid': '123ABC456',\n                'iBaseToken': 'FFFF0000',\n                'accountstate': 1\n            },\n            'error': {\n                'code': 0,\n                'description': '0'\n            }\n        }\n        return OceanStorDriver(**kwargs)\n\n\nclass TestOceanStorStorageDriver(TestCase):\n\n    def test_init(self):\n        driver = create_driver()\n        self.assertEqual(driver.storage_id, \"12345\")\n        self.assertEqual(driver.sector_size, consts.SECTORS_SIZE)\n        self.assertEqual(driver.client.device_id, '123ABC456')\n\n        m = mock.MagicMock()\n        with mock.patch.object(Session, 'post', return_value=m):\n            m.raise_for_status.return_value = None\n            m.json.return_value = {\n                'data': {\n                    'deviceid': '123ABC456',\n                    'iBaseToken': 'FFFF0000',\n                    'accountstate': 1\n                },\n                'error': {\n                    'code': 123,\n                    'description': '0'\n                }\n            }\n            kwargs = ACCESS_INFO\n            with self.assertRaises(Exception) as exc:\n                OceanStorDriver(**kwargs)\n            self.assertIn('The credentials are invalid', str(exc.exception))\n\n    def test_get_storage(self):\n        driver = create_driver()\n        expected = {\n            'name': 'OceanStor',\n            'vendor': 'Huawei',\n            'description': 'Huawei OceanStor Storage',\n            'model': 'OceanStor_1',\n            'status': 'normal',\n            'serial_number': '012345',\n            'firmware_version': '1000',\n            'location': 'Location1',\n            'total_capacity': 51200,\n            'used_capacity': 38400,\n            'free_capacity': 20480,\n            'raw_capacity': 76800\n        }\n\n        ret = [\n            # Storage 1\n            {\n                'data': {\n                    'RUNNINGSTATUS': '1',\n                    'SECTORSIZE': '512',\n                    'TOTALCAPACITY': '100',\n                    'USEDCAPACITY': '75',\n                    'MEMBERDISKSCAPACITY': '150',\n                    'userFreeCapacity': '40',\n                    'NAME': 'OceanStor_1',\n                    'ID': '012345',\n                    'LOCATION': 'Location1'\n                },\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'SOFTVER': '1000',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            storage = driver.get_storage(context)\n            self.assertDictEqual(storage, expected)\n\n    def test_list_storage_pools(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'OceanStor_1',\n                'storage_id': '12345',\n                'native_storage_pool_id': '012345',\n                'description': 'Huawei OceanStor Pool',\n                'status': 'normal',\n                'storage_type': 'block',\n                'total_capacity': 51200,\n                'used_capacity': 38400,\n                'free_capacity': 20480\n            },\n            {\n                'name': 'OceanStor_1',\n                'storage_id': '12345',\n                'native_storage_pool_id': '012345',\n                'description': 'Huawei OceanStor Pool',\n                'status': 'offline',\n                'storage_type': 'file',\n                'total_capacity': 51200,\n                'used_capacity': 38400,\n                'free_capacity': 20480\n            }\n\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        'RUNNINGSTATUS': '27',\n                        'USAGETYPE': '1',\n                        'USERTOTALCAPACITY': '100',\n                        'USERCONSUMEDCAPACITY': '75',\n                        'USERFREECAPACITY': '40',\n                        'NAME': 'OceanStor_1',\n                        'ID': '012345',\n                        'LOCATION': 'Location1'\n                    },\n                    {\n                        'RUNNINGSTATUS': '28',\n                        'USAGETYPE': '2',\n                        'USERTOTALCAPACITY': '100',\n                        'USERCONSUMEDCAPACITY': '75',\n                        'USERFREECAPACITY': '40',\n                        'NAME': 'OceanStor_1',\n                        'ID': '012345',\n                        'LOCATION': 'Location1'\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'SOFTVER': '1000',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            pools = driver.list_storage_pools(context)\n            self.assertDictEqual(pools[0], expected[0])\n            self.assertDictEqual(pools[1], expected[1])\n\n        with mock.patch.object(RestClient, 'get_all_pools',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_storage_pools(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n    def test_list_volumes(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'Volume_1',\n                'storage_id': '12345',\n                'description': 'Huawei OceanStor volume',\n                'status': 'available',\n                'native_volume_id': '0001',\n                'native_storage_pool_id': '012345',\n                'wwn': 'wwn12345',\n                'type': 'thin',\n                'total_capacity': 51200,\n                'used_capacity': 38400,\n                'free_capacity': None,\n                'compressed': False,\n                'deduplicated': False\n            },\n            {\n                'name': 'Volume_1',\n                'storage_id': '12345',\n                'description': 'Huawei OceanStor volume',\n                'status': 'error',\n                'native_volume_id': '0001',\n                'native_storage_pool_id': '012345',\n                'wwn': 'wwn12345',\n                'type': 'thick',\n                'total_capacity': 51200,\n                'used_capacity': 38400,\n                'free_capacity': None,\n                'compressed': True,\n                'deduplicated': True\n            }\n\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        'RUNNINGSTATUS': '27',\n                        'USAGETYPE': '1',\n                        'CAPACITY': '100',\n                        'ALLOCCAPACITY': '75',\n                        'WWN': 'wwn12345',\n                        'NAME': 'Volume_1',\n                        'ID': '0001',\n                        'LOCATION': 'Location1',\n                        'PARENTNAME': 'OceanStor_1',\n                        'ENABLECOMPRESSION': 'false',\n                        'ENABLEDEDUP': 'false',\n                        'ALLOCTYPE': '1',\n                        'SECTORSIZE': '512',\n\n                    },\n                    {\n                        'RUNNINGSTATUS': '28',\n                        'USAGETYPE': '1',\n                        'CAPACITY': '100',\n                        'ALLOCCAPACITY': '75',\n                        'WWN': 'wwn12345',\n                        'NAME': 'Volume_1',\n                        'ID': '0001',\n                        'LOCATION': 'Location1',\n                        'PARENTNAME': 'OceanStor_1',\n                        'ENABLECOMPRESSION': 'true',\n                        'ENABLEDEDUP': 'true',\n                        'ALLOCTYPE': '0',\n                        'SECTORSIZE': '512',\n\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'NAME': 'OceanStor_1',\n                    'ID': '012345'\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'SOFTVER': '1000',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            volumes = driver.list_volumes(context)\n            self.assertDictEqual(volumes[0], expected[0])\n            self.assertDictEqual(volumes[1], expected[1])\n\n        with mock.patch.object(RestClient, 'get_all_volumes',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_volumes(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n    def test_list_ports(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'TEST_FC_PORT',\n                'storage_id': '12345',\n                'connection_status': 'disconnected',\n                'health_status': 'unknown',\n                'location': 'Location1',\n                'logical_type': 'service',\n                'max_speed': '16000',\n                'native_port_id': '012345',\n                'native_parent_id': '0B.0',\n                'wwn': 'WWN_123000',\n                'type': 'fc',\n                'speed': None,\n                'mac_address': None,\n                'ipv4': None,\n                'ipv4_mask': None,\n                'ipv6': None,\n                'ipv6_mask': None,\n            },\n            {\n                'name': 'TEST_FCOE_PORT',\n                'storage_id': '12345',\n                'connection_status': 'disconnected',\n                'health_status': 'unknown',\n                'location': 'Location2',\n                'logical_type': 'service',\n                'max_speed': '12000',\n                'native_port_id': '22222',\n                'native_parent_id': '0B.2',\n                'wwn': '2210',\n                'type': 'fcoe',\n                'speed': None,\n                'mac_address': None,\n                'ipv4': None,\n                'ipv4_mask': None,\n                'ipv6': None,\n                'ipv6_mask': None,\n            },\n            {\n                'name': 'TEST_ETH_PORT',\n                'storage_id': '12345',\n                'connection_status': 'disconnected',\n                'health_status': 'unknown',\n                'location': 'Location3',\n                'logical_type': 'service',\n                'max_speed': '1000',\n                'native_port_id': '11111',\n                'native_parent_id': '0B.0',\n                'wwn': None,\n                'type': 'eth',\n                'speed': '-1',\n                'mac_address': 'MAC_1:ff:00',\n                'ipv4': None,\n                'ipv4_mask': None,\n                'ipv6': None,\n                'ipv6_mask': None,\n            },\n            {\n                'name': 'TEST_PCIE_PORT',\n                'storage_id': '12345',\n                'connection_status': 'disconnected',\n                'health_status': 'unknown',\n                'location': 'Location4',\n                'logical_type': 'other',\n                'max_speed': '8000',\n                'native_port_id': '33333',\n                'native_parent_id': '1090',\n                'wwn': None,\n                'type': 'other',\n                'speed': '5000',\n                'mac_address': None,\n                'ipv4': None,\n                'ipv4_mask': None,\n                'ipv6': None,\n                'ipv6_mask': None,\n            },\n            {\n                'name': 'TEST_BOND_PORT',\n                'storage_id': '12345',\n                'connection_status': 'connected',\n                'health_status': 'unknown',\n                'location': 'Location5',\n                'logical_type': 'other',\n                'max_speed': None,\n                'native_port_id': '44444',\n                'native_parent_id': None,\n                'wwn': None,\n                'type': 'other',\n                'speed': None,\n                'mac_address': None,\n                'ipv4': None,\n                'ipv4_mask': None,\n                'ipv6': None,\n                'ipv6_mask': None,\n            },\n            {\n                'name': 'TEST_SAS_PORT',\n                'storage_id': '12345',\n                'connection_status': 'unknown',\n                'health_status': 'unknown',\n                'location': 'Location6',\n                'logical_type': 'other',\n                'max_speed': '12000',\n                'native_port_id': '55555',\n                'native_parent_id': '0A',\n                'wwn': None,\n                'type': 'sas',\n                'speed': '12000',\n                'mac_address': None,\n                'ipv4': None,\n                'ipv4_mask': None,\n                'ipv6': None,\n                'ipv6_mask': None,\n            }\n\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        'TYPE': '212',\n                        'NAME': 'TEST_FC_PORT',\n                        'RUNNINGSTATUS': '11',\n                        'HEALTHSTATUS': '1',\n                        'ID': '012345',\n                        'LOCATION': 'Location1',\n                        'MAXSPEED': '16000',\n                        'MAXSUPPORTSPEED': '16000',\n                        'LOGICTYPE': '0',\n                        'RUNSPEED': '-1',\n                        'PARENTID': '0B.0',\n                        'WWN': 'WWN_123000',\n                    },\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'TYPE': '252',\n                    'NAME': 'TEST_FCOE_PORT',\n                    'RUNNINGSTATUS': '11',\n                    'HEALTHSTATUS': '1',\n                    'ID': '22222',\n                    'LOCATION': 'Location2',\n                    'MAXSPEED': '12000',\n                    'LOGICTYPE': '0',\n                    'RUNSPEED': '-1',\n                    'PARENTID': '0B.2',\n                    'WWN': '2210',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'TYPE': '213',\n                    'NAME': 'TEST_ETH_PORT',\n                    'RUNNINGSTATUS': '11',\n                    'HEALTHSTATUS': '1',\n                    'ID': '11111',\n                    'LOCATION': 'Location3',\n                    'SPEED': '-1',\n                    'maxSpeed': '1000',\n                    'LOGICTYPE': '0',\n                    'RUNSPEED': '-1',\n                    'PARENTID': '0B.0',\n                    'MACADDRESS': 'MAC_1:ff:00',\n                    'IP4ADDR': '',\n                    'IP4MASK': '',\n                    'IP6ADDR': '',\n                    'IP6MASK': '',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'TYPE': '233',\n                    'NAME': 'TEST_PCIE_PORT',\n                    'RUNNINGSTATUS': '11',\n                    'HEALTHSTATUS': '1',\n                    'ID': '33333',\n                    'LOCATION': 'Location4',\n                    'PCIESPEED': '5000',\n                    'MAXSPEED': '8000',\n                    'PARENTID': '1090',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'TYPE': '235',\n                    'NAME': 'TEST_BOND_PORT',\n                    'RUNNINGSTATUS': '10',\n                    'HEALTHSTATUS': '1',\n                    'ID': '44444',\n                    'LOCATION': 'Location5',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'TYPE': '214',\n                    'NAME': 'TEST_SAS_PORT',\n                    'RUNNINGSTATUS': '0',\n                    'HEALTHSTATUS': '0',\n                    'ID': '55555',\n                    'LOCATION': 'Location6',\n                    'RUNSPEED': '12000',\n                    'MAXSPEED': '12000',\n                    'PARENTID': '0A',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'TYPE': '210',\n                    'ID': '012345',\n                    'NAME': 'Name100',\n                    'RUNNINGSTATUS': '27',\n                    'HEALTHSTATUS': '0',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            ports = driver.list_ports(context)\n\n            self.assertDictEqual(ports[0], expected[0])\n            self.assertDictEqual(ports[1], expected[1])\n            self.assertDictEqual(ports[2], expected[2])\n            self.assertDictEqual(ports[3], expected[3])\n            self.assertDictEqual(ports[4], expected[4])\n            self.assertDictEqual(ports[5], expected[5])\n\n        with mock.patch.object(RestClient, 'get_all_ports',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_ports(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_ports',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_ports(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_controllers(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'Controller-1',\n                'storage_id': '12345',\n                'native_controller_id': '0A',\n                'status': 'normal',\n                'location': 'Location1',\n                'soft_version': 'Ver123',\n                'cpu_info': 'Intel Xenon',\n                'memory_size': '100000',\n            },\n            {\n                'name': 'Controller-2',\n                'storage_id': '12345',\n                'native_controller_id': '0B',\n                'status': 'offline',\n                'location': 'Location2',\n                'soft_version': 'VerABC',\n                'cpu_info': 'ARM64',\n                'memory_size': '500000',\n            },\n            {\n                'name': 'Controller-3',\n                'storage_id': '12345',\n                'native_controller_id': '0B',\n                'status': 'unknown',\n                'location': 'Location3',\n                'soft_version': 'VerABC',\n                'cpu_info': 'ARM64',\n                'memory_size': '500000',\n            }\n\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        'RUNNINGSTATUS': '27',\n                        'NAME': 'Controller-1',\n                        'SOFTVER': 'Ver123',\n                        'CPUINFO': 'Intel Xenon',\n                        'MEMORYSIZE': '100000',\n                        'ID': '0A',\n                        'LOCATION': 'Location1'\n                    },\n                    {\n                        'RUNNINGSTATUS': '28',\n                        'NAME': 'Controller-2',\n                        'SOFTVER': 'VerABC',\n                        'CPUINFO': 'ARM64',\n                        'MEMORYSIZE': '500000',\n                        'ID': '0B',\n                        'LOCATION': 'Location2'\n                    },\n                    {\n                        'RUNNINGSTATUS': '0',\n                        'NAME': 'Controller-3',\n                        'SOFTVER': 'VerABC',\n                        'CPUINFO': 'ARM64',\n                        'MEMORYSIZE': '500000',\n                        'ID': '0B',\n                        'LOCATION': 'Location3'\n                    },\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'SOFTVER': '1000',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            controller = driver.list_controllers(context)\n            self.assertDictEqual(controller[0], expected[0])\n            self.assertDictEqual(controller[1], expected[1])\n            self.assertDictEqual(controller[2], expected[2])\n\n        with mock.patch.object(RestClient, 'get_all_controllers',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_controllers(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_controllers',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_controllers(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_disks(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'ST200:1234',\n                'storage_id': '12345',\n                'native_disk_id': '0A',\n                'serial_number': '1234',\n                'manufacturer': 'Segate',\n                'model': 'ST200',\n                'firmware': '0003',\n                'speed': 7200,\n                'capacity': 1000000,\n                'status': 'normal',\n                'physical_type': 'unknown',\n                'logical_type': 'free',\n                'health_score': '255',\n                'native_disk_group_id': None,\n                'location': 'Location1',\n            },\n            {\n                'name': 'WD00:1111',\n                'storage_id': '12345',\n                'native_disk_id': '0B',\n                'serial_number': '1111',\n                'manufacturer': 'WesterDigital',\n                'model': 'WD00',\n                'firmware': '123',\n                'speed': 10000,\n                'capacity': 5000000,\n                'status': 'offline',\n                'physical_type': 'ssd',\n                'logical_type': 'free',\n                'health_score': '255',\n                'native_disk_group_id': None,\n                'location': 'Location2',\n            },\n            {\n                'name': 'ST200:1234',\n                'storage_id': '12345',\n                'native_disk_id': '0A',\n                'serial_number': '1234',\n                'manufacturer': 'Segate',\n                'model': 'ST200',\n                'firmware': '0003',\n                'speed': 7200,\n                'capacity': 1000000,\n                'status': 'abnormal',\n                'physical_type': 'unknown',\n                'logical_type': 'free',\n                'health_score': '255',\n                'native_disk_group_id': None,\n                'location': 'Location1',\n            }\n\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        'RUNNINGSTATUS': '27',\n                        'DISKTYPE': '4',\n                        'LOGICTYPE': '1',\n                        'HEALTHMARK': '255',\n                        'MODEL': 'ST200',\n                        'SERIALNUMBER': '1234',\n                        'MANUFACTURER': 'Segate',\n                        'FIRMWAREVER': '0003',\n                        'SPEEDRPM': '7200',\n                        'SECTORS': '10000',\n                        'SECTORSIZE': '100',\n                        'ID': '0A',\n                        'LOCATION': 'Location1'\n                    },\n                    {\n                        'RUNNINGSTATUS': '28',\n                        'DISKTYPE': '3',\n                        'LOGICTYPE': '1',\n                        'HEALTHMARK': '255',\n                        'MODEL': 'WD00',\n                        'SERIALNUMBER': '1111',\n                        'MANUFACTURER': 'WesterDigital',\n                        'FIRMWAREVER': '123',\n                        'SPEEDRPM': '10000',\n                        'SECTORS': '50000',\n                        'SECTORSIZE': '100',\n                        'ID': '0B',\n                        'LOCATION': 'Location2'\n                    },\n                    {\n                        'RUNNINGSTATUS': '0',\n                        'DISKTYPE': '4',\n                        'LOGICTYPE': '1',\n                        'HEALTHMARK': '255',\n                        'MODEL': 'ST200',\n                        'SERIALNUMBER': '1234',\n                        'MANUFACTURER': 'Segate',\n                        'FIRMWAREVER': '0003',\n                        'SPEEDRPM': '7200',\n                        'SECTORS': '10000',\n                        'SECTORSIZE': '100',\n                        'ID': '0A',\n                        'LOCATION': 'Location1'\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'SOFTVER': '1000',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            disk = driver.list_disks(context)\n            self.assertDictEqual(disk[0], expected[0])\n            self.assertDictEqual(disk[1], expected[1])\n            self.assertDictEqual(disk[2], expected[2])\n\n        with mock.patch.object(RestClient, 'get_all_disks',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_disks(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_disks',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_disks(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_filesystems(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'fs1',\n                'storage_id': '12345',\n                'native_filesystem_id': '123',\n                'native_pool_id': '123',\n                'compressed': True,\n                'deduplicated': True,\n                'worm': 'non_worm',\n                'status': 'normal',\n                'type': 'thin',\n                'total_capacity': 81920,\n                'used_capacity': 8192,\n                'free_capacity': 8192,\n            },\n            {\n                'name': 'fs2',\n                'storage_id': '12345',\n                'native_filesystem_id': '123',\n                'native_pool_id': '123',\n                'compressed': False,\n                'deduplicated': False,\n                'worm': 'compliance',\n                'status': 'normal',\n                'type': 'thin',\n                'total_capacity': 81920,\n                'used_capacity': 81920,\n                'free_capacity': 8192,\n            },\n            {\n                'name': 'fs3',\n                'storage_id': '12345',\n                'native_filesystem_id': '123',\n                'native_pool_id': '123',\n                'compressed': True,\n                'deduplicated': True,\n                'worm': 'audit_log',\n                'status': 'normal',\n                'type': 'thin',\n                'total_capacity': 81920,\n                'used_capacity': 8192,\n                'free_capacity': 8192,\n            }\n\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        'HEALTHSTATUS': '1',\n                        'ALLOCTYPE': '1',\n                        'SECTORSIZE': '8192',\n                        'CAPACITY': '10',\n                        'ALLOCCAPACITY': '1',\n                        'AVAILABLECAPCITY': '1',\n                        'ENABLECOMPRESSION': 'true',\n                        'ENABLEDEDUP': 'true',\n                        'NAME': 'fs1',\n                        'ID': '123',\n                        'PARENTTYPE': 216,\n                        'PARENTID': '123',\n                        'WORMTYPE': '0'\n                    },\n                    {\n                        'HEALTHSTATUS': '1',\n                        'ALLOCTYPE': '1',\n                        'SECTORSIZE': '8192',\n                        'CAPACITY': '10',\n                        'ALLOCCAPACITY': '10',\n                        'AVAILABLECAPCITY': '1',\n                        'ENABLECOMPRESSION': 'false',\n                        'ENABLEDEDUP': 'false',\n                        'NAME': 'fs2',\n                        'ID': '123',\n                        'PARENTTYPE': 216,\n                        'PARENTID': '123',\n                        'WORMTYPE': '1'\n                    },\n                    {\n                        'HEALTHSTATUS': '1',\n                        'ALLOCTYPE': '1',\n                        'SECTORSIZE': '8192',\n                        'CAPACITY': '10',\n                        'ALLOCCAPACITY': '1',\n                        'AVAILABLECAPCITY': '1',\n                        'ENABLECOMPRESSION': 'true',\n                        'ENABLEDEDUP': 'true',\n                        'NAME': 'fs3',\n                        'ID': '123',\n                        'PARENTTYPE': 216,\n                        'PARENTID': '123',\n                        'WORMTYPE': '2'\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'SOFTVER': '1000',\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            fs = driver.list_filesystems(context)\n            self.assertDictEqual(fs[0], expected[0])\n            self.assertDictEqual(fs[1], expected[1])\n            self.assertDictEqual(fs[2], expected[2])\n\n        with mock.patch.object(RestClient, 'get_all_filesystems',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_filesystems(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_filesystems',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_filesystems(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_qtrees(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'qtree1',\n                'storage_id': '12345',\n                'native_qtree_id': '123',\n                'native_filesystem_id': '123',\n                'security_mode': 'mixed',\n            },\n            {\n                'name': 'WD00:1111',\n                'storage_id': '12345',\n                'native_disk_id': '0B',\n                'serial_number': '1111',\n                'manufacturer': 'WesterDigital',\n                'model': 'WD00',\n                'firmware': '123',\n                'speed': 10000,\n                'capacity': 5000000,\n                'status': 'offline',\n                'physical_type': 'ssd',\n                'logical_type': 'free',\n                'health_score': '255',\n                'native_disk_group_id': None,\n                'location': 'Location2',\n            },\n            {\n                'name': 'ST200:1234',\n                'storage_id': '12345',\n                'native_disk_id': '0A',\n                'serial_number': '1234',\n                'manufacturer': 'Segate',\n                'model': 'ST200',\n                'firmware': '0003',\n                'speed': 7200,\n                'capacity': 1000000,\n                'status': 'abnormal',\n                'physical_type': 'unknown',\n                'logical_type': 'free',\n                'health_score': '255',\n                'native_disk_group_id': None,\n                'location': 'Location1',\n            }\n\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        'NAME': 'qtree1',\n                        'ID': '123',\n                        'securityStyle': '0',\n                        'PARENTTYPE': 40,\n                        'PARENTID': '123',\n                    },\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'get_all_filesystems',\n                               side_effect=[[{\"ID\": \"1\"}]]):\n            with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n                qtree = driver.list_qtrees(context)\n            self.assertDictEqual(qtree[0], expected[0])\n\n        with mock.patch.object(RestClient, 'get_all_filesystems',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_qtrees(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_filesystems',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_qtrees(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_shares(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'CIFS',\n                'storage_id': '12345',\n                'native_share_id': '111',\n                'native_filesystem_id': 'FS111',\n                'path': '/filesystem0001/',\n                'protocol': 'cifs'\n            },\n            {\n                'name': 'NFS',\n                'storage_id': '12345',\n                'native_share_id': '222',\n                'native_filesystem_id': 'FS222',\n                'path': '/filesystem0002/',\n                'protocol': 'nfs'\n            },\n            {\n                'name': 'FTP',\n                'storage_id': '12345',\n                'native_share_id': '333',\n                'native_filesystem_id': 'FS333',\n                'path': '/filesystem0003/',\n                'protocol': 'ftp'\n            }\n\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        'subType': '0',\n                        'NAME': 'CIFS',\n                        'SHAREPATH': '/filesystem0001/',\n                        'ID': '111',\n                        'FSID': 'FS111'\n                    },\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'type': '16401',\n                    'NAME': 'NFS',\n                    'SHAREPATH': '/filesystem0002/',\n                    'ID': '222',\n                    'FSID': 'FS222'\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [{\n                    'ACCESSNAME': 'test',\n                    'NAME': 'FTP',\n                    'SHAREPATH': '/filesystem0003/',\n                    'ID': '333',\n                    'FSID': 'FS333'\n                }],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            share = driver.list_shares(context)\n            self.assertDictEqual(share[0], expected[0])\n            self.assertDictEqual(share[1], expected[1])\n            self.assertDictEqual(share[2], expected[2])\n\n        with mock.patch.object(RestClient, 'get_all_shares',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_shares(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_shares',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_shares(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_storage_host_initiators(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': '12',\n                'description': 'FC Initiator',\n                'alias': '1212121212121212',\n                'storage_id': '12345',\n                'native_storage_host_initiator_id': '1212121212121212',\n                'wwn': '1212121212121212',\n                'status': 'online',\n                'native_storage_host_id': '0'\n            }\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        \"HEALTHSTATUS\": \"1\",\n                        \"ID\": \"1212121212121212\",\n                        \"ISFREE\": \"true\",\n                        \"MULTIPATHTYPE\": \"1\",\n                        \"NAME\": \"12\",\n                        \"OPERATIONSYSTEM\": \"1\",\n                        \"PARENTID\": \"0\",\n                        \"PARENTTYPE\": 0,\n                        \"PARENTNAME\": \"Host001\",\n                        \"RUNNINGSTATUS\": \"27\",\n                        \"TYPE\": 223,\n                        \"FAILOVERMODE\": \"3\",\n                        \"SPECIALMODETYPE\": \"2\",\n                        \"PATHTYPE\": \"1\"\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [\n                    {\n                        \"HEALTHSTATUS\": \"1\",\n                        \"ID\": \"111111111111111111\",\n                        \"ISFREE\": \"false\",\n                        \"MULTIPATHTYPE\": \"1\",\n                        \"OPERATIONSYSTEM\": \"255\",\n                        \"PARENTID\": \"0\",\n                        \"PARENTNAME\": \"Host001\",\n                        \"PARENTTYPE\": 21,\n                        \"RUNNINGSTATUS\": \"28\",\n                        \"TYPE\": 222,\n                        \"USECHAP\": \"false\",\n                        \"FAILOVERMODE\": \"3\",\n                        \"SPECIALMODETYPE\": \"2\",\n                        \"PATHTYPE\": \"1\"\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [\n                    {\n                        \"HEALTHSTATUS\": \"1\",\n                        \"ID\": \"1111111111111119\",\n                        \"ISFREE\": \"true\",\n                        \"MULTIPATHTYPE\": \"1\",\n                        \"NAME\": \"\",\n                        \"OPERATIONSYSTEM\": \"1\",\n                        \"RUNNINGSTATUS\": \"28\",\n                        \"TYPE\": 16499,\n                        \"FAILOVERMODE\": \"3\",\n                        \"SPECIALMODETYPE\": \"2\",\n                        \"PATHTYPE\": \"1\"\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            initators = driver.list_storage_host_initiators(context)\n            self.assertDictEqual(initators[0], expected[0])\n\n        with mock.patch.object(RestClient, 'get_all_initiators',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_storage_host_initiators(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_initiators',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_storage_host_initiators(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_storage_hosts(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'Host001',\n                'description': '',\n                'storage_id': '12345',\n                'native_storage_host_id': '0',\n                'os_type': 'Linux',\n                'status': 'normal',\n                'ip_address': ''\n            }\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        \"DESCRIPTION\": \"\",\n                        \"HEALTHSTATUS\": \"1\",\n                        \"ID\": \"0\",\n                        \"INITIATORNUM\": \"0\",\n                        \"IP\": \"\",\n                        \"ISADD2HOSTGROUP\": \"true\",\n                        \"LOCATION\": \"\",\n                        \"MODEL\": \"\",\n                        \"NAME\": \"Host001\",\n                        \"NETWORKNAME\": \"\",\n                        \"OPERATIONSYSTEM\": \"0\",\n                        \"RUNNINGSTATUS\": \"1\",\n                        \"TYPE\": 21,\n                        \"vstoreId\": \"4\",\n                        \"vstoreName\": \"vStore004\"\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            hosts = driver.list_storage_hosts(context)\n            self.assertDictEqual(hosts[0], expected[0])\n\n        with mock.patch.object(RestClient, 'get_all_hosts',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_storage_hosts(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_hosts',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_storage_hosts(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_storage_host_groups(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'hostgroup1',\n                'description': '',\n                'storage_id': '12345',\n                'native_storage_host_group_id': '0',\n                'storage_hosts': '123'\n            }\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        \"DESCRIPTION\": \"\",\n                        \"ID\": \"0\",\n                        \"ISADD2MAPPINGVIEW\": \"false\",\n                        \"NAME\": \"hostgroup1\",\n                        \"TYPE\": 14,\n                        \"vstoreId\": \"4\",\n                        \"vstoreName\": \"vStore004\"\n                    },\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [\n                    {\n                        \"ID\": \"123\",\n                    },\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            hg = driver.list_storage_host_groups(context)\n            self.assertDictEqual(hg[0], expected[0])\n\n        with mock.patch.object(RestClient, 'get_all_host_groups',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_storage_host_groups(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_host_groups',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_storage_host_groups(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_port_groups(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'PortGroup001',\n                'description': '',\n                'storage_id': '12345',\n                'native_port_group_id': '0',\n                'ports': '123,124,125',\n            }\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        \"DESCRIPTION\": \"\",\n                        \"ID\": \"0\",\n                        \"NAME\": \"PortGroup001\",\n                        \"TYPE\": 257\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [\n                    {\n                        \"ID\": \"123\",\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [\n                    {\n                        \"ID\": \"124\",\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [\n                    {\n                        \"ID\": \"125\",\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            port_groups = driver.list_port_groups(context)\n            self.assertDictEqual(port_groups[0], expected[0])\n\n        with mock.patch.object(RestClient, 'get_all_port_groups',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_port_groups(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_port_groups',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_port_groups(context)\n            self.assertIn('', str(exc.exception))\n\n    def test_list_volume_groups(self):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'LUNGroup001',\n                'description': '',\n                'storage_id': '12345',\n                'native_volume_group_id': '0',\n                'volumes': '123'\n            }\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        \"APPTYPE\": \"0\",\n                        \"CAPCITY\": \"2097152\",\n                        \"CONFIGDATA\": \"\",\n                        \"DESCRIPTION\": \"\",\n                        \"GROUPTYPE\": \"0\",\n                        \"ID\": \"0\",\n                        \"ISADD2MAPPINGVIEW\": \"false\",\n                        \"NAME\": \"LUNGroup001\",\n                        \"TYPE\": 256,\n                        \"vstoreId\": \"4\",\n                        \"vstoreName\": \"vStore004\"\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n            {\n                'data': [\n                    {\n                        \"ID\": \"123\",\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            },\n        ]\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            volume_groups = driver.list_volume_groups(context)\n            self.assertDictEqual(volume_groups[0], expected[0])\n\n        with mock.patch.object(RestClient, 'get_all_volume_groups',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_volume_groups(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_volume_groups',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_volume_groups(context)\n            self.assertIn('', str(exc.exception))\n\n    @mock.patch.object(RestClient, 'get_all_associate_mapping_views')\n    @mock.patch.object(RestClient, 'get_all_port_groups')\n    @mock.patch.object(RestClient, 'get_all_volume_groups')\n    @mock.patch.object(RestClient, 'get_all_host_groups')\n    def test_list_masking_views(self, mock_hg, mock_vg,\n                                mock_pg, mock_associate):\n        driver = create_driver()\n        expected = [\n            {\n                'name': 'MappingView001',\n                'description': '',\n                'storage_id': '12345',\n                'native_masking_view_id': '1',\n            }\n        ]\n\n        ret = [\n            {\n                'data': [\n                    {\n                        \"DESCRIPTION\": \"\",\n                        \"ENABLEINBANDCOMMAND\": \"true\",\n                        \"ID\": \"1\",\n                        \"INBANDLUNWWN\": \"\",\n                        \"NAME\": \"MappingView001\",\n                        \"TYPE\": 245,\n                        \"vstoreId\": \"4\",\n                        \"vstoreName\": \"vStore004\"\n                    }\n                ],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        mock_hg.return_value = []\n        mock_vg.return_value = []\n        mock_pg.return_value = []\n        mock_associate.return_value = []\n\n        with mock.patch.object(RestClient, 'do_call', side_effect=ret):\n            view = driver.list_masking_views(context)\n            self.assertDictEqual(view[0], expected[0])\n\n        with mock.patch.object(RestClient, 'get_all_mapping_views',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.list_masking_views(context)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n\n        with mock.patch.object(RestClient, 'get_all_mapping_views',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.list_masking_views(context)\n            self.assertIn('', str(exc.exception))\n\n    @mock.patch.object(RestClient, 'get_disk_metrics')\n    @mock.patch.object(RestClient, 'get_port_metrics')\n    @mock.patch.object(RestClient, 'get_controller_metrics')\n    @mock.patch.object(RestClient, 'get_volume_metrics')\n    @mock.patch.object(RestClient, 'get_pool_metrics')\n    @mock.patch.object(RestClient, 'enable_metrics_collection')\n    @mock.patch.object(RestClient, 'disable_metrics_collection')\n    def test_collect_perf_metrics(self, mock_di, mock_en,\n                                  mock_pool, mock_volume, mock_controller,\n                                  mock_port, mock_disk):\n        driver = create_driver()\n\n        ret = [\n            {\n                'data': [{}],\n                'error': {\n                    'code': 0,\n                    'description': '0'\n                }\n            }\n        ]\n        mock_di.return_value = None\n        mock_en.return_value = None\n\n        mock_pool.return_value = [{}]\n        mock_volume.return_value = [{}]\n        mock_controller.return_value = [{}]\n        mock_port.return_value = [{}]\n        mock_disk.return_value = [{}]\n        with mock.patch.object(RestClient,\n                               'do_call', side_effect=ret):\n            storage_id = 123\n            resource_metrics = {\n                'storagePool': {'iops': 'iops description'},\n                'volume': {'iops': 'iops description'},\n                'port': {'iops': 'iops description'},\n                'disk': {'iops': 'iops description'},\n            }\n            start, end = 0, 1\n            driver.collect_perf_metrics(\n                context, storage_id, resource_metrics, start, end)\n            mock_en.assert_called()\n            mock_di.assert_called()\n        mock_pool.assert_called()\n        mock_volume.assert_called()\n        mock_controller.assert_not_called()\n        mock_port.assert_called()\n        mock_disk.assert_called()\n\n        with mock.patch.object(RestClient, 'get_disk_metrics',\n                               side_effect=exception.DelfinException):\n            with self.assertRaises(Exception) as exc:\n                driver.collect_perf_metrics(context, 0,\n                                            {'disk': {'iops': 'iops'}},\n                                            0, 0)\n            self.assertIn('An unknown exception occurred',\n                          str(exc.exception))\n        with mock.patch.object(RestClient, 'get_disk_metrics',\n                               side_effect=TypeError):\n            with self.assertRaises(Exception) as exc:\n                driver.collect_perf_metrics(context, 0,\n                                            {'disk': {'iops': 'iops'}},\n                                            0, 0)\n            self.assertIn('', str(exc.exception))\n\n    def test_get_capabilities(self):\n        driver = create_driver()\n        cap = driver.get_capabilities(context)\n        self.assertIsNotNone(cap.get('resource_metrics'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('storagePool'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('volume'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('controller'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('port'))\n        self.assertIsNotNone(cap.get('resource_metrics').get('disk'))\n"
  },
  {
    "path": "delfin/tests/unit/drivers/huawei/oceanstor/test_rest_client.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import TestCase, mock\nfrom unittest.mock import call\n\nfrom requests.sessions import Session\n\nfrom delfin import exception\nfrom delfin.common import config # noqa\nfrom delfin.drivers.huawei.oceanstor.rest_client import RestClient\n\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"huawei\",\n    \"model\": \"oceanstor\",\n    \"rest\": {\n        \"host\": \"10.0.0.1\",\n        \"port\": 1234,\n        \"username\": \"user\",\n        \"password\": \"cGFzc3dvcmQ=\"\n    },\n    \"extra_attributes\": {\n        \"array_id\": \"00112233\"\n    }\n}\n\nRESP = {\n    \"error\": {\n        \"code\": 0\n    },\n    \"data\": {\n        \"data\": \"dummy\",\n        \"deviceid\": \"0123456\",\n        \"iBaseToken\": \"112233\",\n        \"accountstate\": \"GREEN\"\n    }\n}\n\n\nclass TestOceanStorRestClient(TestCase):\n\n    def _mock_response(\n            self,\n            status=200,\n            content=\"CONTENT\",\n            json_data=None,\n            raise_for_status=None):\n\n        mock_resp = mock.Mock()\n        mock_resp.raise_for_status = mock.Mock()\n        if raise_for_status:\n            mock_resp.raise_for_status.side_effect = raise_for_status\n        mock_resp.status_code = status\n        mock_resp.content = content\n        if json_data:\n            mock_resp.json = mock.Mock(\n                return_value=json_data\n            )\n        return mock_resp\n\n    # @mock.patch.object(RestClient, 'login')\n    @mock.patch.object(Session, 'post')\n    def test_init(self, mock_rest):\n        mock_resp = self._mock_response(json_data=RESP)\n        mock_rest.return_value = mock_resp\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        self.assertEqual(rest_client.rest_host, \"10.0.0.1\")\n        self.assertEqual(rest_client.rest_port, 1234)\n        self.assertEqual(rest_client.session.headers['iBaseToken'], '112233')\n\n    @mock.patch.object(RestClient, 'login')\n    def test_reset_connection(self, mock_login):\n        mock_login.return_value = None\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        self.assertEqual(rest_client.rest_host, \"10.0.0.1\")\n        self.assertEqual(rest_client.rest_port, 1234)\n\n        mock_login.side_effect = exception.StorageBackendException\n        with self.assertRaises(Exception) as exc:\n            RestClient(**kwargs)\n        self.assertIn('The credentials are invalid',\n                      str(exc.exception))\n\n    @mock.patch.object(RestClient, 'call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_storage(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_storage()\n        self.assertEqual(data['data'], 'dummy')\n\n        mock_call.return_value = {\n            \"error\": {\n                \"code\": 0\n            }\n        }\n        with self.assertRaises(Exception) as exc:\n            rest_client.get_storage()\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n        mock_call.return_value['error']['code'] = 1\n        with self.assertRaises(Exception) as exc:\n            rest_client.get_storage()\n        self.assertIn('Exception from Storage Backend',\n                      str(exc.exception))\n\n    @mock.patch.object(RestClient, 'call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_controller(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_all_controllers()\n        self.assertEqual(data['data'], 'dummy')\n        mock_call.assert_called_with(\"/controller\",\n                                     log_filter_flag=True, method='GET')\n\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_all_pools(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_all_pools()\n        self.assertEqual(data['data']['data'], 'dummy')\n        mock_call.assert_called_with(\"/storagepool\", None,\n                                     'GET', log_filter_flag=True)\n\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_all_hosts(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_all_hosts()\n        self.assertEqual(data['data']['data'], 'dummy')\n        mock_call.assert_called_with(\"/host\", None,\n                                     'GET', log_filter_flag=True)\n\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_all_host_groups(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_all_host_groups()\n        self.assertEqual(data['data']['data'], 'dummy')\n        mock_call.assert_called_with(\"/hostgroup\", None,\n                                     'GET', log_filter_flag=True)\n\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_all_port_groups(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_all_port_groups()\n        self.assertEqual(data['data']['data'], 'dummy')\n        mock_call.assert_called_with(\"/portgroup\", None,\n                                     'GET', log_filter_flag=True)\n\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_all_volume_groups(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_all_volume_groups()\n        self.assertEqual(data['data']['data'], 'dummy')\n        mock_call.assert_called_with(\"/lungroup\", None,\n                                     'GET', log_filter_flag=True)\n\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_all_volumes(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_all_volumes()\n        self.assertEqual(data['data']['data'], 'dummy')\n        mock_call.assert_called_with(\"/lun\", None,\n                                     'GET', log_filter_flag=True)\n\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_all_initiators(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.side_effects = [\"\", \"\", \"\"]\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        rest_client.get_all_initiators()\n        call1 = call(\"/fc_initiator\", None, 'GET', log_filter_flag=True)\n        call2 = call(\"/iscsi_initiator\", None, 'GET', log_filter_flag=True)\n        call3 = call(\"/ib_initiator\", None, 'GET', log_filter_flag=True)\n\n        calls = [call1, call2, call3]\n        mock_call.assert_has_calls(calls)\n\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_all_mapping_views(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_all_mapping_views()\n        self.assertEqual(data['data']['data'], 'dummy')\n        mock_call.assert_called_with(\"/mappingview\", None,\n                                     'GET', log_filter_flag=True)\n\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_volumes(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.get_all_volumes()\n        self.assertEqual(data['data']['data'], 'dummy')\n        mock_call.assert_called_with(\"/lun\", None, 'GET',\n                                     log_filter_flag=True)\n\n    @mock.patch.object(RestClient, 'call')\n    @mock.patch.object(RestClient, 'login')\n    def test_enable_metrics_collection(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.enable_metrics_collection()\n        self.assertEqual(data['data'], 'dummy')\n        mock_call.assert_called_with(\"/performance_statistic_switch\",\n                                     {'CMO_PERFORMANCE_SWITCH': '1'},\n                                     log_filter_flag=True, method='PUT')\n\n    @mock.patch.object(RestClient, 'call')\n    @mock.patch.object(RestClient, 'login')\n    def test_disable_metrics_collection(self, mock_login, mock_call):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        data = rest_client.disable_metrics_collection()\n        self.assertEqual(data['data'], 'dummy')\n        mock_call.assert_called_with(\"/performance_statistic_switch\",\n                                     {'CMO_PERFORMANCE_SWITCH': '0'},\n                                     log_filter_flag=True, method='PUT')\n\n    @mock.patch.object(RestClient, 'disable_metrics_collection')\n    @mock.patch.object(RestClient, 'enable_metrics_collection')\n    @mock.patch.object(RestClient, 'call')\n    @mock.patch.object(RestClient, 'login')\n    def test_configure_metrics_collection(self, mock_login, mock_call,\n                                          mock_en, mock_di):\n        mock_login.return_value = None\n        mock_call.return_value = RESP\n        mock_en.return_value = None\n        mock_di.return_value = None\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        rest_client.configure_metrics_collection()\n        data = {\n            \"CMO_STATISTIC_ARCHIVE_SWITCH\": 1,\n            \"CMO_STATISTIC_ARCHIVE_TIME\": 300,\n            \"CMO_STATISTIC_AUTO_STOP\": 0,\n            \"CMO_STATISTIC_INTERVAL\": 60,\n            \"CMO_STATISTIC_MAX_TIME\": 0\n        }\n        mock_call.assert_called_with(\"/performance_statistic_strategy\",\n                                     data,\n                                     log_filter_flag=True, method='PUT')\n\n    @mock.patch.object(RestClient, 'get_all_pools')\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_pool_metrics(self, mock_login, mock_call,\n                              mock_pools):\n        mock_login.return_value = None\n        mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25',\n                                   'CMO_STATISTIC_TIMESTAMP': 0}]\n        mock_pools.return_value = [\n            {'ID': '123', 'TYPE': '100', 'NAME': 'pool'}\n        ]\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        metrics = rest_client.get_pool_metrics('', {'iops': {'unit': 'IOPS'}})\n        mock_call.assert_called_with(\n            \"/performace_statistic/cur_statistic_data\",\n            None, 'GET', log_filter_flag=True,\n            params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&'\n                   'timeConversion=0&'\n        )\n        expected_label = {\n            'storage_id': '',\n            'resource_type': 'pool',\n            'resource_id': '123',\n            'type': 'RAW',\n            'unit': 'IOPS',\n            'resource_name': 'pool'\n        }\n        self.assertEqual(metrics[0].name, 'iops')\n        self.assertDictEqual(metrics[0].labels, expected_label)\n        self.assertListEqual(list(metrics[0].values.values()), [12])\n\n    @mock.patch.object(RestClient, 'get_all_volumes')\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_volume_metrics(self, mock_login, mock_call,\n                                mock_volumes):\n        mock_login.return_value = None\n        mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25',\n                                   'CMO_STATISTIC_TIMESTAMP': 0}]\n        mock_volumes.return_value = [\n            {'ID': '123', 'TYPE': '100', 'NAME': 'volume'}\n        ]\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        metrics = rest_client.get_volume_metrics(\n            '', {'iops': {'unit': 'IOPS'}})\n        mock_call.assert_called_with(\n            \"/performace_statistic/cur_statistic_data\",\n            None, 'GET', log_filter_flag=True,\n            params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&'\n                   'timeConversion=0&'\n        )\n        expected_label = {\n            'storage_id': '',\n            'resource_type': 'volume',\n            'resource_id': '123',\n            'type': 'RAW',\n            'unit': 'IOPS',\n            'resource_name': 'volume'\n        }\n        self.assertEqual(metrics[0].name, 'iops')\n        self.assertDictEqual(metrics[0].labels, expected_label)\n        self.assertListEqual(list(metrics[0].values.values()), [12])\n\n    @mock.patch.object(RestClient, 'get_all_controllers')\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_controller_metrics(self, mock_login, mock_call,\n                                    mock_controllers):\n        mock_login.return_value = None\n        mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25',\n                                   'CMO_STATISTIC_TIMESTAMP': 0}]\n        mock_controllers.return_value = [\n            {'ID': '123', 'TYPE': '100', 'NAME': 'controller'}\n        ]\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        metrics = rest_client.get_controller_metrics(\n            '', {'iops': {'unit': 'IOPS'}})\n        mock_call.assert_called_with(\n            \"/performace_statistic/cur_statistic_data\",\n            None, 'GET', log_filter_flag=True,\n            params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&'\n                   'timeConversion=0&'\n        )\n        expected_label = {\n            'storage_id': '',\n            'resource_type': 'controller',\n            'resource_id': '123',\n            'type': 'RAW',\n            'unit': 'IOPS',\n            'resource_name': 'controller'\n        }\n        self.assertEqual(metrics[0].name, 'iops')\n        self.assertDictEqual(metrics[0].labels, expected_label)\n        self.assertListEqual(list(metrics[0].values.values()), [12])\n\n    @mock.patch.object(RestClient, 'get_all_ports')\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_port_metrics(self, mock_login, mock_call,\n                              mock_ports):\n        mock_login.return_value = None\n        mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25',\n                                   'CMO_STATISTIC_TIMESTAMP': 0}]\n        mock_ports.return_value = [\n            {'ID': '123', 'TYPE': '100', 'NAME': 'port'}\n        ]\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        metrics = rest_client.get_port_metrics('', {'iops': {'unit': 'IOPS'}})\n        mock_call.assert_called_with(\n            \"/performace_statistic/cur_statistic_data\",\n            None, 'GET', log_filter_flag=True,\n            params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&'\n                   'timeConversion=0&'\n        )\n        expected_label = {\n            'storage_id': '',\n            'resource_type': 'port',\n            'resource_id': '123',\n            'type': 'RAW',\n            'unit': 'IOPS',\n            'resource_name': 'port'\n        }\n        self.assertEqual(metrics[0].name, 'iops')\n        self.assertDictEqual(metrics[0].labels, expected_label)\n        self.assertListEqual(list(metrics[0].values.values()), [12])\n\n    @mock.patch.object(RestClient, 'get_all_disks')\n    @mock.patch.object(RestClient, 'paginated_call')\n    @mock.patch.object(RestClient, 'login')\n    def test_get_disk_metrics(self, mock_login, mock_call,\n                              mock_disks):\n        mock_login.return_value = None\n        mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25',\n                                   'CMO_STATISTIC_TIMESTAMP': 0}]\n        mock_disks.return_value = [\n            {'ID': '123', 'TYPE': '100', 'MODEL': 'disk', 'SERIALNUMBER': '0'}\n        ]\n        kwargs = ACCESS_INFO\n        rest_client = RestClient(**kwargs)\n        metrics = rest_client.get_disk_metrics('', {'iops': {'unit': 'IOPS'}})\n        mock_call.assert_called_with(\n            \"/performace_statistic/cur_statistic_data\",\n            None, 'GET', log_filter_flag=True,\n            params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&'\n                   'timeConversion=0&'\n        )\n        expected_label = {\n            'storage_id': '',\n            'resource_type': 'disk',\n            'resource_id': '123',\n            'resource_name': 'disk:0',\n            'type': 'RAW',\n            'unit': 'IOPS',\n        }\n        self.assertEqual(metrics[0].name, 'iops')\n        self.assertDictEqual(metrics[0].labels, expected_label)\n        self.assertListEqual(list(metrics[0].values.values()), [12])\n"
  },
  {
    "path": "delfin/tests/unit/drivers/ibm/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/ibm/ibm_ds8k/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/ibm/ibm_ds8k/test_ibm_ds8k.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom unittest import TestCase, mock\n\nsys.modules['delfin.cryptor'] = mock.Mock()\n\nfrom delfin import context\nfrom delfin.drivers.ibm.ds8k.rest_handler import RestHandler\nfrom delfin.drivers.ibm.ds8k.ds8k import DS8KDriver\n\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"rest\": {\n        \"host\": \"110.143.132.231\",\n        \"port\": \"8443\",\n        \"username\": \"username\",\n        \"password\": \"cGFzc3dvcmQ=\"\n    },\n    \"ssh\": {\n        \"host\": \"110.143.132.231\",\n        \"port\": \"22\",\n        \"username\": \"username\",\n        \"password\": \"password\",\n        \"host_key\": \"weqewrerwerwerwe\"\n    },\n    \"vendor\": \"IBM\",\n    \"model\": \"DS8000\",\n    \"extra_attributes\": {\n        \"array_id\": \"00112233\"\n    }\n}\nGET_STORAGE = {\n    \"data\": {\n        \"systems\": [\n            {\n                \"id\": \"2107-75BXG71\",\n                \"name\": \"TDCUOB_DS8870\",\n                \"state\": \"online\",\n                \"release\": \"7.5.1\",\n                \"bundle\": \"87.51.103.5120\",\n                \"MTM\": \"2423-961\",\n                \"sn\": \"75BXG71\",\n                \"wwnn\": \"5005076304FFD7EF\",\n                \"cap\": \"1655709892608\",\n                \"capalloc\": \"1073741824000\",\n                \"capavail\": \"581968068608\",\n                \"capraw\": \"2516582400000\"\n            }\n        ]\n    }\n}\nGET_ALL_POOLS = {\n    \"data\": {\n        \"pools\": [\n            {\n                \"id\": \"P0\",\n                \"link\": {\n                    \"rel\": \"self\",\n                    \"href\": \"https:/192.168.1.170:8452/api/v1/pools/P0\"\n                },\n                \"name\": \"test_pool\",\n                \"node\": \"0\",\n                \"stgtype\": \"fb\",\n                \"cap\": \"1655709892608\",\n                \"capalloc\": \"1073741824000\",\n                \"capavail\": \"581968068608\",\n                \"overprovisioned\": \"0.6\",\n                \"easytier\": \"managed\",\n                \"tieralloc\": [\n                    {\n                        \"tier\": \"ENT\",\n                        \"cap\": \"1655709892608\",\n                        \"allocated\": \"1073741824000\",\n                        \"assigned\": \"0\"\n                    }\n                ],\n                \"threshold\": \"15\",\n                \"real_capacity_allocated_on_ese\": \"0\",\n                \"virtual_capacity_allocated_on_ese\": \"0\",\n                \"eserep\": {},\n                \"tserep\": {},\n                \"volumes\": {\n                    \"link\": {\n                        \"rel\": \"self\"\n                    }\n                }\n            }\n        ]\n    }\n}\nGET_ALL_LUNS = {\n    \"data\": {\n        \"volumes\":\n        [\n            {\n                \"link\": {\n                    \"rel\": \"self\",\n                    \"href\": \"https://{hmc}:443/api/v1/volumes/0000\"\n                },\n                \"id\": \"0000\",\n                \"name\": \"mytest\",\n                \"state\": \"normal\",\n                \"cap\": \"322122547200\",\n                \"stgtype\": \"fb\",\n                \"VOLSER\": \"\",\n                \"lss\": {\n                    \"id\": \"00\",\n                    \"link\": {\n                        \"rel\": \"self\",\n                        \"href\":\n                        \"https://{hmc}:443/api/lss/00\"\n                    }\n                },\n                \"allocmethod\": \"legacy\",\n                \"tp\": \"none\",\n                \"capalloc\": \"134217728\",\n                \"MTM\": \"2107-900\",\n                \"datatype\": \"FB 512\",\n                \"tieralloc\":\n                [\n                    {\n                        \"tier\": \"ENT\",\n                        \"allocated\": \"34502\"\n                    }\n                ],\n                \"pool\": {\n                    \"id\": \"P2\",\n                    \"link\": {\n                        \"rel\": \"self\",\n                        \"href\":\n                        \"https://{hmc}:443/api/v1/pools/P2\"\n                    }\n                }\n            }\n        ]\n    }\n}\nGET_ALL_LUNS_NULL = {\n    \"data\": {\n        \"volumes\":\n        []\n    }\n}\nGET_ALL_ALERTS = {\n    \"data\": {\n        \"events\":\n        [\n            {\n                \"id\": \"SEfe\",\n                \"type\": \"HostPortStateChanged\",\n                \"severity\": \"error\",\n                \"time\": \"2014-04-20T13:00:23-0700\",\n                \"resource_id\": \"1152922127280127616\",\n                \"formatted_parameter\":\n                    [\"10000090FA383E80\", \"Logged Off\",\n                     \"Logged In\", \"NISCSIHostPortID: \"\"IBM.2107-75BXG71/12\"],\n                \"description\": \"Host port 10000090FA383E80 state logged in.\"\n            }\n        ]\n    }\n}\nGET_ALL_PORTS = {\n    \"data\": {\n        \"ioports\":\n        [\n            {\n                \"id\": \"I0000\",\n                \"link\": {\n                    \"rel\": \"self\",\n                    \"href\": \"https:/192.168.1.170:8452/api/v1/ioports/I0000\"\n                },\n                \"state\": \"online\",\n                \"protocol\": \"FC-AL\",\n                \"wwpn\": \"50050763040017EF\",\n                \"type\": \"Fibre Channel-SW\",\n                \"speed\": \"8 Gb/s\",\n                \"loc\": \"U1400.1B1.RJ55380-P1-C1-T0\"\n            },\n            {\n                \"id\": \"I0005\",\n                \"link\": {\n                    \"rel\": \"self\",\n                    \"href\": \"https:/192.168.1.170:8452/api/v1/ioports/I0005\"\n                },\n                \"state\": \"online\",\n                \"protocol\": \"SCSI-FCP\",\n                \"wwpn\": \"50050763044057EF\",\n                \"type\": \"Fibre Channel-SW\",\n                \"speed\": \"8 Gb/s\",\n                \"loc\": \"U1400.1B1.RJ55380-P1-C1-T5\"\n            }\n        ]\n    }\n}\nGET_ALL_CONTROLLERS = {\n    'data': {\n        'nodes': [\n            {\n                'id': '00',\n                'state': 'online'\n            }, {\n                'id': '01',\n                'state': 'online'\n            }\n        ]\n    }\n}\nTOKEN_RESULT = {\n    \"server\": {\n        \"status\": \"ok\",\n        \"code\": \"200\",\n        \"message\": \"Operation done successfully.\"\n    },\n    \"token\": {\n        \"token\": \"ddb1743a\",\n        \"expired_time\": \"2014-08-25T03:28:15-0700\",\n        \"max_idle_interval\": \"1800000\"\n    }\n}\nTRAP_INFO = {\n    \"1.3.6.1.2.1.1.3.0\": \"0\",\n    '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1139.103.1.18.2.0',\n    '1.3.6.1.4.1.1139.103.1.18.1.1': 'eeeeeeeee',\n    '1.3.6.1.4.1.1139.103.1.18.1.3': 'ddddddd',\n    '1.3.6.1.4.1.1139.103.1.18.1.4': 'this is test',\n    '1.3.6.1.4.1.1139.103.1.18.1.5': '2020/11/20 14:10:10',\n    '1.3.6.1.4.1.1139.103.1.18.1.2': 'test'\n}\nstorage_result = {\n    'name': 'TDCUOB_DS8870',\n    'vendor': 'IBM',\n    'model': '2423-961',\n    'status': 'normal',\n    'serial_number': '75BXG71',\n    'firmware_version': '7.5.1',\n    'location': '',\n    'total_capacity': 1655709892608,\n    'raw_capacity': 2516582400000,\n    'used_capacity': 1073741824000,\n    'free_capacity': 581968068608\n}\npool_result = [\n    {\n        'name': 'test_pool_0',\n        'storage_id': '12345',\n        'native_storage_pool_id': 'P0',\n        'status': 'abnormal',\n        'storage_type': 'block',\n        'total_capacity': 1655709892608,\n        'used_capacity': 1073741824000,\n        'free_capacity': 581968068608\n    }\n]\nvolume_result = [\n    {\n        'name': 'mytest_0000',\n        'storage_id': '12345',\n        'description': '',\n        'status': 'normal',\n        'native_volume_id': '0000',\n        'native_storage_pool_id': 'P2',\n        'wwn': '',\n        'type': 'thick',\n        'total_capacity': 322122547200,\n        'used_capacity': 134217728,\n        'free_capacity': 321988329472\n    }\n]\nalert_result = [\n    {\n        'alert_id': 'HostPortStateChanged',\n        'alert_name': 'Host port 10000090FA383E80 state logged in.',\n        'severity': 'Critical',\n        'description': 'Host port 10000090FA383E80 state logged in.',\n        'category': 'Fault',\n        'type': 'EquipmentAlarm',\n        'sequence_number': 'SEfe',\n        'occur_time': 1397970023000,\n        'resource_type': 'Storage'\n    }\n]\nport_result = [\n    {\n        'name': 'U1400.1B1.RJ55380-P1-C1-T0',\n        'storage_id': '12345',\n        'native_port_id': 'I0000',\n        'location': 'U1400.1B1.RJ55380-P1-C1-T0',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'fc',\n        'logical_type': '',\n        'speed': 8000000000,\n        'max_speed': 8000000000,\n        'wwn': '50:05:07:63:04:00:17:EF'\n    }, {\n        'name': 'U1400.1B1.RJ55380-P1-C1-T5',\n        'storage_id': '12345',\n        'native_port_id': 'I0005',\n        'location': 'U1400.1B1.RJ55380-P1-C1-T5',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'fc',\n        'logical_type': '',\n        'speed': 8000000000,\n        'max_speed': 8000000000,\n        'wwn': '50:05:07:63:04:40:57:EF'\n    }\n]\ncontrl_result = [\n    {\n        'name': '00',\n        'storage_id': '12345',\n        'native_controller_id': '00',\n        'status': 'normal'\n    }, {\n        'name': '01',\n        'storage_id': '12345',\n        'native_controller_id': '01',\n        'status': 'normal'\n    }\n]\n\ntrap_result = {\n    'alert_id': 'ddddddd',\n    'alert_name': 'test',\n    'severity': 'Critical',\n    'category': 'Fault',\n    'type': 'EquipmentAlarm',\n    'occur_time': 1605852610000,\n    'description': 'this is test',\n    'resource_type': 'Storage',\n    'location': 'eeeeeeeee'\n}\nGET_INITORATORS = {\n    \"data\": {\n        \"host_ports\":\n        [\n            {\n                \"wwpn\": \"50050763030813A2\",\n                \"state\": \"logged in\",\n                \"hosttype\": \"VMware\",\n                \"addrdiscovery\": \"lunpolling\",\n                \"lbs\": \"512\",\n                \"host\": {\n                    \"name\": \"myhost\"\n                }\n            }\n        ]\n    }\n}\nINIT_RESULT = [\n    {\n        'name': '50050763030813A2',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': '50050763030813A2',\n        'wwn': '50050763030813A2',\n        'status': 'online',\n        'type': 'unknown',\n        'native_storage_host_id': 'myhost'\n    }\n]\nGET_ALL_HOSTS = {\n    \"data\": {\n        \"hosts\":\n        [\n            {\n                \"name\": \"test_host\",\n                \"state\": \"online\",\n                \"hosttype\": \"VMware\",\n                \"addrmode\": \"SCSI mask\",\n                \"addrdiscovery\": \"lunpolling\",\n                \"lbs\": \"512\"\n            }\n        ]\n    }\n}\nHOST_RESULT = [\n    {\n        'name': 'test_host',\n        'storage_id': '12345',\n        'native_storage_host_id': 'test_host',\n        'os_type': 'VMware ESX',\n        'status': 'normal'\n    }\n]\nGET_HOST_MAPPING = {\n    \"data\": {\n        \"mappings\":\n        [\n            {\n                \"lunid\": \"00\",\n                \"volume\": {\n                    \"id\": \"0005\"\n                }\n            }\n        ]\n    }\n}\nVIEW_RESULT = [\n    {\n        'name': '00_test_host',\n        'native_storage_host_id': 'test_host',\n        'storage_id': '12345',\n        'native_volume_id': '0005',\n        'native_masking_view_id': '00_test_host'\n    }\n]\n\n\nclass TestDS8KDriver(TestCase):\n\n    @mock.patch.object(RestHandler, 'get_rest_info')\n    def test_get_storage(self, mock_storage):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_storage.return_value = GET_STORAGE\n        storage = DS8KDriver(**ACCESS_INFO).get_storage(context)\n        self.assertDictEqual(storage, storage_result)\n\n    @mock.patch.object(RestHandler, 'get_rest_info')\n    def test_list_storage_pools(self, mock_pool):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_pool.return_value = GET_ALL_POOLS\n        pool = DS8KDriver(**ACCESS_INFO).list_storage_pools(context)\n        self.assertEqual(pool, pool_result)\n\n    def test_list_volumes(self):\n        RestHandler.login = mock.Mock(return_value=None)\n        RestHandler.get_rest_info = mock.Mock(\n            side_effect=[GET_ALL_POOLS, GET_ALL_LUNS])\n        vol = DS8KDriver(**ACCESS_INFO).list_volumes(context)\n        self.assertEqual(vol, volume_result)\n\n    @mock.patch.object(RestHandler, 'get_rest_info')\n    def test_list_alerts(self, mock_alert):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_alert.return_value = GET_ALL_ALERTS\n        alert = DS8KDriver(**ACCESS_INFO).list_alerts(context)\n        alert[0]['occur_time'] = alert_result[0]['occur_time']\n        self.assertEqual(alert, alert_result)\n\n    @mock.patch.object(RestHandler, 'call_with_token')\n    def test_call_and_login(self, mock_token):\n        with self.assertRaises(Exception) as exc:\n            mock_token.return_value = mock.MagicMock(\n                status_code=401, text='Authentication has failed')\n            DS8KDriver(**ACCESS_INFO).rest_handler.login()\n        self.assertEqual('Invalid username or password.', str(exc.exception))\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_token.return_value = mock.MagicMock(status_code=401)\n        DS8KDriver(**ACCESS_INFO).rest_handler.call('')\n\n    @mock.patch.object(RestHandler, 'get_rest_info')\n    def test_list_ports(self, mock_port):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_port.return_value = GET_ALL_PORTS\n        port = DS8KDriver(**ACCESS_INFO).list_ports(context)\n        self.assertEqual(port, port_result)\n\n    @mock.patch.object(RestHandler, 'get_rest_info')\n    def test_list_list_controllers(self, mock_contrl):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_contrl.return_value = GET_ALL_CONTROLLERS\n        controller = DS8KDriver(**ACCESS_INFO).list_controllers(context)\n        self.assertEqual(controller, contrl_result)\n\n    @mock.patch.object(RestHandler, 'get_rest_info')\n    def test_host_initiators(self, mock_init):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_init.return_value = GET_INITORATORS\n        initiators = DS8KDriver(\n            **ACCESS_INFO).list_storage_host_initiators(context)\n        self.assertEqual(initiators, INIT_RESULT)\n\n    @mock.patch.object(RestHandler, 'get_rest_info')\n    def test_hosts(self, mock_host):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_host.return_value = GET_ALL_HOSTS\n        hosts = DS8KDriver(**ACCESS_INFO).list_storage_hosts(context)\n        self.assertEqual(hosts, HOST_RESULT)\n\n    @mock.patch.object(RestHandler, 'get_rest_info')\n    def test_masking_views(self, mock_view):\n        RestHandler.login = mock.Mock(return_value=None)\n        mock_view.side_effect = [GET_ALL_HOSTS, GET_HOST_MAPPING]\n        views = DS8KDriver(**ACCESS_INFO).list_masking_views(context)\n        self.assertEqual(views, VIEW_RESULT)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/ibm/storwize_svc/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom unittest import TestCase, mock\n\nimport paramiko\n\nfrom delfin.common import constants\n\ntry:\n    import xml.etree.cElementTree as ET\nexcept ImportError:\n    import xml.etree.ElementTree as ET\n\nfrom delfin.drivers.utils.tools import Tools\n\nsys.modules['delfin.cryptor'] = mock.Mock()\nfrom delfin import context\nfrom delfin.drivers.ibm.storwize_svc.ssh_handler import SSHHandler\nfrom delfin.drivers.ibm.storwize_svc.storwize_svc import StorwizeSVCDriver\nfrom delfin.drivers.utils.ssh_client import SSHPool\n\n\nclass Request:\n    def __init__(self):\n        self.environ = {'delfin.context': context.RequestContext()}\n        pass\n\n\nUNSECURE_ALGORITHMS = {\n    \"ciphers\": [\n        \"aes128-cbc\",\n        \"aes192-cbc\",\n        \"aes256-cbc\",\n        \"blowfish-cbc\",\n        \"3des-cbc\"\n    ],\n    \"macs\": [\n        \"hmac-sha1-96\",\n        \"hmac-md5\",\n        \"hmac-md5-96\"\n    ],\n    \"keys\": [\n        \"ecdsa-sha2-nistp256\",\n        \"ecdsa-sha2-nistp384\",\n        \"ecdsa-sha2-nistp521\",\n        \"ssh-dss\"\n    ],\n    \"kex\": [\n        \"diffie-hellman-group14-sha256\",\n        \"diffie-hellman-group-exchange-sha1\",\n        \"diffie-hellman-group14-sha1\",\n        \"diffie-hellman-group1-sha1\"\n    ]}\n\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"hpe\",\n    \"model\": \"3par\",\n    \"rest\": {\n        \"host\": \"10.0.0.1\",\n        \"port\": 8443,\n        \"username\": \"user\",\n        \"password\": \"pass\"\n    },\n    \"ssh\": {\n        \"host\": \"110.143.132.231\",\n        \"port\": 22,\n        \"username\": \"user\",\n        \"password\": \"pass\",\n        \"pub_key\": \"ddddddddddddddddddddddddd\"\n    }\n}\n\nsystem_info = \"\"\"id 00000200A1207E1F\nname Cluster_192.168.70.125\nlocation local\npartnership\ntotal_mdisk_capacity 8.1TB\nspace_in_mdisk_grps 8.1TB\nspace_allocated_to_vdisks 5.06TB\ntotal_free_space 3.1TB\ntotal_vdiskcopy_capacity 5.51TB\ntotal_used_capacity 5.05TB\ntotal_overallocation 67\ntotal_vdisk_capacity 5.51TB\ntotal_allocated_extent_capacity 5.07TB\nstatistics_status on\nstatistics_frequency 5\ncluster_locale en_US\ntime_zone 246 Asia/Shanghai\ncode_level 7.4.0.11 (build 103.29.1609070000)\nconsole_IP 51.10.58.200:443\nid_alias 00000200A1007E1F\ngm_link_tolerance 300\ngm_inter_cluster_delay_simulation 0\ngm_intra_cluster_delay_simulation 0\ngm_max_host_delay 5\nemail_reply\nemail_contact\nemail_contact_primary\nemail_contact_alternate\nemail_contact_location\nemail_contact2\nemail_contact2_primary\nemail_contact2_alternate\nemail_state stopped\ninventory_mail_interval 0\ncluster_ntp_IP_address\ncluster_isns_IP_address\niscsi_auth_method none\niscsi_chap_secret\nauth_service_configured no\nauth_service_enabled no\nauth_service_url\nauth_service_user_name\nauth_service_pwd_set no\nauth_service_cert_set no\nauth_service_type tip\nrelationship_bandwidth_limit 25\ntier ssd\ntier_capacity 0.00MB\ntier_free_capacity 0.00MB\ntier enterprise\ntier_capacity 0.00MB\ntier_free_capacity 0.00MB\ntier nearline\ntier_capacity 8.13TB\ntier_free_capacity 3.06TB\nhas_nas_key no\nlayer storage\nrc_buffer_size 48\ncompression_active no\ncompression_virtual_capacity 0.00MB\ncompression_compressed_capacity 0.00MB\ncompression_uncompressed_capacity 0.00MB\ncache_prefetch on\nemail_organization\nemail_machine_address\nemail_machine_city\nemail_machine_state XX\nemail_machine_zip\nemail_machine_country\ntotal_drive_raw_capacity 10.92TB\ncompression_destage_mode off\nlocal_fc_port_mask 1111111111111111111111111111111\npartner_fc_port_mask 11111111111111111111111111111\nhigh_temp_mode off\ntopology standard\ntopology_status\nrc_auth_method none\nvdisk_protection_time 15\nvdisk_protection_enabled no\nproduct_name IBM Storwize V7000\nmax_replication_delay 0\npartnership_exclusion_threshold 315\n\"\"\"\n\nenclosure_info = \"\"\"id:status:type:managed:IO_id:IO_group_name:product_MTM\n1:online:control:yes:0:io_grp0:2076-124:78N16G4:2:2:2:2:24:0:0\n\"\"\"\n\npools_info = \"\"\"id name      status mdisk_count vdisk_count capacity\n1  mdiskgrp0 online 1           101         8.13TB   1024        3.06TB\n\"\"\"\n\npool_info = \"\"\"id 1\nname mdiskgrp0\nstatus online\nmdisk_count 1\nvdisk_count 101\ncapacity 8.13TB\nextent_size 1024\nfree_capacity 3.06TB\nvirtual_capacity 5.51TB\nused_capacity 5.05TB\nreal_capacity 5.06TB\noverallocation 67\nwarning 80\neasy_tier auto\neasy_tier_status balanced\ntier ssd\ntier_mdisk_count 0\ntier_capacity 0.00MB\ntier_free_capacity 0.00MB\ntier enterprise\ntier_mdisk_count 0\ntier_capacity 0.00MB\ntier_free_capacity 0.00MB\ntier nearline\ntier_mdisk_count 1\ntier_capacity 8.13TB\ntier_free_capacity 3.06TB\ncompression_active no\ncompression_virtual_capacity 0.00MB\ncompression_compressed_capacity 0.00MB\ncompression_uncompressed_capacity 0.00MB\nsite_id\nsite_name\nparent_mdisk_grp_id 1\nparent_mdisk_grp_name mdiskgrp0\nchild_mdisk_grp_count 0\nchild_mdisk_grp_capacity 0.00MB\ntype parent\nencrypt no\n\"\"\"\n\nvolumes_info = \"\"\"id  name            IO_group_id IO_group_name status\n0   V7000LUN_Mig    0           io_grp0       online 1\n\"\"\"\n\nvolume_info = \"\"\"id:0\nname:V7000LUN_Mig\nIO_group_id:0\nIO_group_name:io_grp0\nstatus:online\nmdisk_grp_id:1\nmdisk_grp_name:mdiskgrp0\ncapacity:50.00GB\ntype:striped\nformatted:no\nmdisk_id:\nmdisk_name:\nFC_id:\nFC_name:\nRC_id:\nRC_name:\nvdisk_UID:60050768028401F87C00000000000000\nthrottling:0\npreferred_node_id:3\nfast_write_state:empty\ncache:readwrite\nudid:\nfc_map_count:0\nsync_rate:50\ncopy_count:1\nse_copy_count:0\nfilesystem:\nmirror_write_priority:latency\nRC_change:no\ncompressed_copy_count:0\naccess_IO_group_count:1\nlast_access_time:190531130236\nparent_mdisk_grp_id:1\nparent_mdisk_grp_name:mdiskgrp0\n\ncopy_id:0\nstatus:online\nsync:yes\nprimary:yes\nmdisk_grp_id:1\nmdisk_grp_name:mdiskgrp0\ntype:striped\nmdisk_id:\nmdisk_name:\nfast_write_state:empty\nused_capacity:50.00GB\nreal_capacity:50.00GB\nfree_capacity:0.00MB\noverallocation:100\nautoexpand:\nwarning:\ngrainsize:\nse_copy:no\neasy_tier:on\neasy_tier_status:balanced\ntier:ssd\ntier_capacity:0.00MB\ntier:enterprise\ntier_capacity:0.00MB\ntier:nearline\ntier_capacity:50.00GB\ncompressed_copy:no\nuncompressed_used_capacity:50.00GB\nparent_mdisk_grp_id:1\nparent_mdisk_grp_name:mdiskgrp0\n\"\"\"\n\nalerts_info = \"\"\"sequence_number last_timestamp object_type object_id\n101             201111165750   node        3         node1\n\"\"\"\n\nalert_info = \"\"\"sequence_number 101\nfirst_timestamp 201111165750\nfirst_timestamp_epoch 1605085070\nlast_timestamp 201111165750\nlast_timestamp_epoch 1605085070\nobject_type node\nobject_id 3\nobject_name node1\ncopy_id\nreporting_node_id 3\nreporting_node_name node1\nroot_sequence_number\nevent_count 1\nstatus message\nfixed no\nauto_fixed no\nnotification_type warning\nevent_id 980221\nevent_id_text Error log cleared\nerror_code\nerror_code_text\nmachine_type 2076124\nserial_number 78N16G4\nFRU None\nfixed_timestamp\nfixed_timestamp_epoch\ncallhome_type none\nsense1 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\nsense2 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\nsense3 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\nsense4 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\nsense5 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\nsense6 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\nsense7 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\nsense8 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\n\"\"\"\n\ntrap_info = {\n    '1.3.6.1.2.1.1.3.0': '0',\n    '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.2.6.190.3',\n    '1.3.6.1.4.1.2.6.190.4.1': '# Machine Type = 2076124',\n    '1.3.6.1.4.1.2.6.190.4.2': '# Serial Number = 78N16G4',\n    '1.3.6.1.4.1.2.6.190.4.3': '# Error ID = 981004 : FC discovery occurred, '\n    'no configuration changes were detected',\n    '1.3.6.1.4.1.2.6.190.4.4': '# Error Code = ',\n    '1.3.6.1.4.1.2.6.190.4.5': '# System Version = 7.4.0.11 (build 103.29.'\n    '1609070000)',\n    '1.3.6.1.4.1.2.6.190.4.6': '# FRU = None ',\n    '1.3.6.1.4.1.2.6.190.4.7': '# System Name = Cluster_192.168.70.125',\n    '1.3.6.1.4.1.2.6.190.4.8': '# Node ID = 3',\n    '1.3.6.1.4.1.2.6.190.4.9': '# Error Sequence Number = 165',\n    '1.3.6.1.4.1.2.6.190.4.10': '# Timestamp = Tue Nov 10 09:08:27 2020',\n    '1.3.6.1.4.1.2.6.190.4.11': '# Object Type = cluster',\n    '1.3.6.1.4.1.2.6.190.4.12': '# Object ID = 0',\n    '1.3.6.1.4.1.2.6.190.4.17': '# Object Name = Cluster_192.168.70.125',\n    '1.3.6.1.4.1.2.6.190.4.15': '# Copy ID =  ',\n    '1.3.6.1.4.1.2.6.190.4.16': '# Machine Part Number = ',\n    '1.3.6.1.4.1.2.6.190.4.13': '# Additional Data (0 -> 63) = 01080000018A0',\n    '1.3.6.1.4.1.2.6.190.4.14': '# Additional Data (64 -> 127) = 00000000000',\n    'transport_address': '51.10.58.200',\n    'storage_id': '4992d7f5-4f73-4123-a27b-6e27889f3852'\n}\n\nstorage_result = {\n    'name': 'Cluster_192.168.70.125',\n    'vendor': 'IBM',\n    'model': 'IBM Storwize V7000',\n    'status': 'normal',\n    'serial_number': '00000200A1207E1F',\n    'firmware_version': '7.4.0.11',\n    'location': 'local',\n    'total_capacity': 8961019766374,\n    'raw_capacity': 8961019766374,\n    'subscribed_capacity': 0,\n    'used_capacity': 5552533720268,\n    'free_capacity': 3408486046105\n}\n\npool_result = [\n    {\n        'name': 'mdiskgrp0',\n        'storage_id': '12345',\n        'native_storage_pool_id': '1',\n        'description': '',\n        'status': 'normal',\n        'storage_type': 'block',\n        'subscribed_capacity': 6058309069045,\n        'total_capacity': 8939029533818,\n        'used_capacity': 5552533720268,\n        'free_capacity': 3364505580994\n    }\n]\n\nvolume_result = [\n    {\n        'description': '',\n        'status': 'normal',\n        'total_capacity': 53687091200,\n        'used_capacity': 53687091200,\n        'type': 'thick',\n        'free_capacity': 0,\n        'native_volume_id': '0',\n        'deduplicated': True,\n        'native_storage_pool_id': '1',\n        'wwn': '60050768028401F87C00000000000000',\n        'compressed': False,\n        'name': 'V7000LUN_Mig',\n        'storage_id': '12345'\n    }\n]\n\nalert_result = [\n    {\n        'type': 'EquipmentAlarm',\n        'location': 'node1',\n        'category': 'Fault',\n        'occur_time': 1605085070000,\n        'sequence_number': '101',\n        'resource_type': 'node',\n        'alert_name': 'Error log cleared',\n        'severity': 'warning',\n        'alert_id': '980221',\n        'description': 'Error log cleared'\n    }\n]\n\ntrap_alert_result = {\n    'alert_id': '981004',\n    'type': 'EquipmentAlarm',\n    'severity': 'Informational',\n    'sequence_number': '165',\n    'description': 'FC discovery occurred, no configuration changes '\n                   'were detected',\n    'occur_time': 1604970507000,\n    'alert_name': 'FC discovery occurred, no configuration changes '\n                  'were detected',\n    'resource_type': 'cluster',\n    'location': 'Cluster_192.168.70.125',\n    'category': 'Fault'\n}\nget_all_controllers = \"\"\"id name\n2  node_165084\n\"\"\"\nget_single_controller = \"\"\"id 2\nid 2\nname node_165084\nUPS_serial_number 100025I194\nWWNN 500507680100EF7C\nstatus online\nIO_group_id 0\nIO_group_name io_grp0\npartner_node_id 4\npartner_node_name node1\nconfig_node yes\nUPS_unique_id 2040000085641244\nport_id 500507680140EF7C\nport_status active\nport_speed 8Gb\nport_id 500507680130EF7C\nport_status active\nport_speed 8Gb\nport_id 500507680110EF7C\nport_status active\nport_speed 8Gb\nport_id 500507680120EF7C\nport_status active\nport_speed 8Gb\nhardware CG8\niscsi_name iqn.1986-03.com.ibm:2145.cluster8.44.162.140.node165084\niscsi_alias\nfailover_active no\nfailover_name node1\nfailover_iscsi_name iqn.1986-03.com.ibm:2145.cluster8.44.162.140.node1\nfailover_iscsi_alias\npanel_name 165084\nenclosure_id\ncanister_id\nenclosure_serial_number\nservice_IP_address 8.44.162.142\nservice_gateway 8.44.128.1\nservice_subnet_mask 255.255.192.0\nservice_IP_address_6\nservice_gateway_6\nservice_prefix_6\nservice_IP_mode static\nservice_IP_mode_6\nsite_id\nsite_name\nidentify_LED off\nproduct_mtm 2145-CG8\ncode_level 7.8.1.11 (build 135.9.1912100725000)\nserial_number 75PVZNA\nmachine_signature 0214-784E-C029-0147\n\"\"\"\n\nget_controller_cpu = \"\"\"id,2\nname,node_165084\nstatus,online\nIO_group_id,0\nIO_group_name,io_grp0\nhardware,CG8\nactual_different,no\nactual_valid,yes\nmemory_configured,24\nmemory_actual,24\nmemory_valid,yes\ncpu_count,1\ncpu_socket,1\ncpu_configured,6 core Intel(R) Xeon(R) CPU E5645 @ 2.40GHz\ncpu_actual,6 core Intel(R) Xeon(R) CPU E5645 @ 2.40GHz\ncpu_valid,yes\nadapter_count,3\nadapter_location,1\nadapter_configured,Four port 8Gb/s FC adapter\nadapter_actual,Four port 8Gb/s FC adapter\nadapter_valid,yes\nadapter_location,0\nadapter_configured,Two port 1Gb/s Ethernet adapter\nadapter_actual,Two port 1Gb/s Ethernet adapter\nadapter_valid,yes\nadapter_location,2\nadapter_configured,none\nadapter_actual,none\nadapter_valid,yes\nports_different,no\n\"\"\"\n\ncontroller_result = [\n    {\n        'name': 'node_165084',\n        'storage_id': '12345',\n        'native_controller_id': '2',\n        'status': 'normal',\n        'soft_version': '7.8.1.11',\n        'location': 'node_165084',\n        'cpu_info': '6 core Intel(R) Xeon(R) CPU E5645 @ 2.40GHz',\n        'cpu_count': 1\n    }\n]\n\nget_all_disks = \"\"\"id name\n4 mdisk4\n\"\"\"\nget_single_disk = \"\"\"id 4\nname mdisk4\nstatus offline\nmode managed\nmdisk_grp_id 1\nmdisk_grp_name Pool0_NBE\ncapacity 2.0TB\nquorum_index\nblock_size 512\ncontroller_name NBEPOC_target_Dorado5000V6\nctrl_type 4\nctrl_WWNN 210030E98EE1914C\ncontroller_id 41\npath_count 0\nmax_path_count 0\nctrl_LUN_# 0000000000000001\nUID 630e98e100e1914c1aa793ae0000001900000000000000000000000000000000\npreferred_WWPN\nactive_WWPN\nfast_write_state empty\nraid_status\nraid_level\nredundancy\nstrip_size\nspare_goal\nspare_protection_min\nbalanced\ntier tier0_flash\nslow_write_priority\nfabric_type fc\nsite_id\nsite_name\neasy_tier_load medium\nencrypt no\ndistributed no\ndrive_class_id\ndrive_count 0\nstripe_width 0\nrebuild_areas_total\nrebuild_areas_available\nrebuild_areas_goal\ndedupe no\npreferred_iscsi_port_id\nactive_iscsi_port_id\nreplacement_date\n\"\"\"\ndisk_result = [\n    {\n        'name': 'mdisk4',\n        'storage_id': '12345',\n        'native_disk_id': '4',\n        'capacity': 2199023255552,\n        'status': 'offline',\n        'physical_type': 'fc',\n        'native_disk_group_id': 'Pool0_NBE',\n        'location': 'NBEPOC_target_Dorado5000V6_mdisk4'\n    }\n]\nget_all_fcports = \"\"\"id fc_io_port_id\n0 1\n\"\"\"\nget_single_fcport = \"\"\"id 0\nfc_io_port_id 1\nport_id 1\ntype fc\nport_speed 8Gb\nnode_id 1\nnode_name node1\nWWPN 500507680140EF3E\nnportid 850600\nstatus active\nswitch_WWPN 200650EB1A8A59B8\nfpma N/A\nvlanid N/A\nfcf_MAC N/A\nattachment switch\ncluster_use local_partner\nadapter_location 1\nadapter_port_id 1\nfabric_WWN 100050EB1A8A59B8\n \"\"\"\nget_iscsiport_1 = \"\"\"id 1\nnode_id 1\nnode_name node1\nIP_address\nmask\ngateway\nIP_address_6\nprefix_6\ngateway_6\nMAC 34:40:b5:d7:5a:94\nduplex Full\nstate unconfigured\nspeed 1Gb/s\nfailover no\nmtu 1500\nlink_state active\nhost\nremote_copy 0\nhost_6\nremote_copy_6 0\nremote_copy_status\nremote_copy_status_6\nvlan\nvlan_6\nadapter_location 0\nadapter_port_id 1\ndcbx_state\nlossless_iscsi\nlossless_iscsi6\niscsi_priority_tag\nfcoe_priority_tag\npfc_enabled_tags\npfc_disabled_tags\npriority_group_0\npriority_group_1\npriority_group_2\npriority_group_3\npriority_group_4\npriority_group_5\npriority_group_6\npriority_group_7\nbandwidth_allocation\nstorage\nstorage_6\n\nid 1\nnode_id 1\nnode_name node1\nIP_address\nmask\ngateway\nIP_address_6\nprefix_6\ngateway_6\nMAC 34:40:b5:d7:5a:94\nduplex Full\nstate unconfigured\nspeed 1Gb/s\nfailover yes\nmtu 1500\nlink_state active\nhost\nremote_copy 0\nhost_6\nremote_copy_6 0\nremote_copy_status\nremote_copy_status_6\nvlan\nvlan_6\nadapter_location 0\nadapter_port_id 1\ndcbx_state\nlossless_iscsi\nlossless_iscsi6\niscsi_priority_tag\nfcoe_priority_tag\npfc_enabled_tags\npfc_disabled_tags\npriority_group_0\npriority_group_1\npriority_group_2\npriority_group_3\npriority_group_4\npriority_group_5\npriority_group_6\npriority_group_7\nbandwidth_allocation\nstorage\nstorage_6\n\nid 1\nnode_id 2\nnode_name node_165084\nIP_address\nmask\ngateway\nIP_address_6\nprefix_6\ngateway_6\nMAC 34:40:b5:d4:0c:f0\nduplex Full\nstate unconfigured\nspeed 1Gb/s\nfailover no\nmtu 1500\nlink_state active\nhost\nremote_copy 0\nhost_6\nremote_copy_6 0\nremote_copy_status\nremote_copy_status_6\nvlan\nvlan_6\nadapter_location 0\nadapter_port_id 1\ndcbx_state\nlossless_iscsi\nlossless_iscsi6\niscsi_priority_tag\nfcoe_priority_tag\npfc_enabled_tags\npfc_disabled_tags\npriority_group_0\npriority_group_1\npriority_group_2\npriority_group_3\npriority_group_4\npriority_group_5\npriority_group_6\npriority_group_7\nbandwidth_allocation\nstorage\nstorage_6\n\nid 1\nnode_id 2\nnode_name node_165084\nIP_address\nmask\ngateway\nIP_address_6\nprefix_6\ngateway_6\nMAC 34:40:b5:d4:0c:f0\nduplex Full\nstate unconfigured\nspeed 1Gb/s\nfailover yes\nmtu 1500\nlink_state active\nhost\nremote_copy 0\nhost_6\nremote_copy_6 0\nremote_copy_status\nremote_copy_status_6\nvlan\nvlan_6\nadapter_location 0\nadapter_port_id 1\ndcbx_state\nlossless_iscsi\nlossless_iscsi6\niscsi_priority_tag\nfcoe_priority_tag\npfc_enabled_tags\npfc_disabled_tags\npriority_group_0\npriority_group_1\npriority_group_2\npriority_group_3\npriority_group_4\npriority_group_5\npriority_group_6\npriority_group_7\nbandwidth_allocation\nstorage\nstorage_6\n \"\"\"\nget_iscsiport_2 = \"\"\"id 2\nnode_id 1\nnode_name node1\nIP_address\nmask\ngateway\nIP_address_6\nprefix_6\ngateway_6\nMAC 34:40:b5:d7:5a:94\nduplex Full\nstate unconfigured\nspeed 1Gb/s\nfailover no\nmtu 1500\nlink_state active\nhost\nremote_copy 0\nhost_6\nremote_copy_6 0\nremote_copy_status\nremote_copy_status_6\nvlan\nvlan_6\nadapter_location 0\nadapter_port_id 1\ndcbx_state\nlossless_iscsi\nlossless_iscsi6\niscsi_priority_tag\nfcoe_priority_tag\npfc_enabled_tags\npfc_disabled_tags\npriority_group_0\npriority_group_1\npriority_group_2\npriority_group_3\npriority_group_4\npriority_group_5\npriority_group_6\npriority_group_7\nbandwidth_allocation\nstorage\nstorage_6\n\nid 2\nnode_id 1\nnode_name node1\nIP_address\nmask\ngateway\nIP_address_6\nprefix_6\ngateway_6\nMAC 34:40:b5:d7:5a:94\nduplex Full\nstate unconfigured\nspeed 1Gb/s\nfailover yes\nmtu 1500\nlink_state active\nhost\nremote_copy 0\nhost_6\nremote_copy_6 0\nremote_copy_status\nremote_copy_status_6\nvlan\nvlan_6\nadapter_location 0\nadapter_port_id 1\ndcbx_state\nlossless_iscsi\nlossless_iscsi6\niscsi_priority_tag\nfcoe_priority_tag\npfc_enabled_tags\npfc_disabled_tags\npriority_group_0\npriority_group_1\npriority_group_2\npriority_group_3\npriority_group_4\npriority_group_5\npriority_group_6\npriority_group_7\nbandwidth_allocation\nstorage\nstorage_6\n\nid 2\nnode_id 2\nnode_name node_165084\nIP_address\nmask\ngateway\nIP_address_6\nprefix_6\ngateway_6\nMAC 34:40:b5:d4:0c:f0\nduplex Full\nstate unconfigured\nspeed 1Gb/s\nfailover no\nmtu 1500\nlink_state active\nhost\nremote_copy 0\nhost_6\nremote_copy_6 0\nremote_copy_status\nremote_copy_status_6\nvlan\nvlan_6\nadapter_location 0\nadapter_port_id 1\ndcbx_state\nlossless_iscsi\nlossless_iscsi6\niscsi_priority_tag\nfcoe_priority_tag\npfc_enabled_tags\npfc_disabled_tags\npriority_group_0\npriority_group_1\npriority_group_2\npriority_group_3\npriority_group_4\npriority_group_5\npriority_group_6\npriority_group_7\nbandwidth_allocation\nstorage\nstorage_6\n\nid 2\nnode_id 2\nnode_name node_165084\nIP_address\nmask\ngateway\nIP_address_6\nprefix_6\ngateway_6\nMAC 34:40:b5:d4:0c:f0\nduplex Full\nstate unconfigured\nspeed 1Gb/s\nfailover yes\nmtu 1500\nlink_state active\nhost\nremote_copy 0\nhost_6\nremote_copy_6 0\nremote_copy_status\nremote_copy_status_6\nvlan\nvlan_6\nadapter_location 0\nadapter_port_id 1\ndcbx_state\nlossless_iscsi\nlossless_iscsi6\niscsi_priority_tag\nfcoe_priority_tag\npfc_enabled_tags\npfc_disabled_tags\npriority_group_0\npriority_group_1\npriority_group_2\npriority_group_3\npriority_group_4\npriority_group_5\npriority_group_6\npriority_group_7\nbandwidth_allocation\nstorage\nstorage_6\n \"\"\"\nget_file_list = 'id filename\\n' \\\n                '1 Nn_stats_78N16G4-2_211201_161110\\n' \\\n                '2 Nn_stats_78N16G4-2_211201_161210\\n' \\\n                '3 Nm_stats_78N16G4-2_211201_161110\\n' \\\n                '4 Nm_stats_78N16G4-2_211201_161210\\n' \\\n                '5 Nv_stats_78N16G4-2_211201_161110\\n' \\\n                '6 Nv_stats_78N16G4-2_211201_161210'\nfile_nv_1611 = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<vdsk idx=\"0\"\nctps=\"0\" ctrhs=\"0\" ctrhps=\"0\" ctds=\"0\"\nctwfts=\"0\" ctwwts=\"0\" ctwfws=\"0\" ctwhs=\"0\"\ncv=\"0\" cm=\"0\" ctws=\"0\" ctrs=\"0\"\nctr=\"0\" ctw=\"0\" ctp=\"0\" ctrh=\"0\"\nctrhp=\"0\" ctd=\"0\" ctwft=\"0\" ctwwt=\"0\"\nctwfw=\"0\" ctwfwsh=\"0\" ctwfwshs=\"0\" ctwh=\"0\"\ngwot=\"0\" gwo=\"0\" gws=\"0\" gwl=\"0\"\nid=\"powerha_fence\"\nro=\"0\" wo=\"0\" wou=\"0\" rb=\"0\" wb=\"0\"\nrl=\"0\" wl=\"0\" rlw=\"0\" wlw=\"0\" xl=\"0\">\n <ca rh=\"0\" d=\"0\" ft=\"0\" wt=\"0\" fw=\"0\" wh=\"0\" v=\"0\" m=\"0\" ri=\"0\" wi=\"0\" r=\"0\"\ndav=\"0\" dcn=\"0\" sav=\"0\" scn=\"0\" teav=\"0\"\n tsav=\"0\"  tav=\"0\"  pp=\"0\"/>\n</vdsk>\n\"\"\"\nfile_nv_1612 = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<vdsk idx=\"0\"\nctps=\"0\" ctrhs=\"0\" ctrhps=\"0\" ctds=\"0\"\nctwfts=\"0\" ctwwts=\"0\" ctwfws=\"0\" ctwhs=\"0\"\ncv=\"0\" cm=\"0\" ctws=\"0\" ctrs=\"0\"\nctr=\"0\" ctw=\"0\" ctp=\"0\" ctrh=\"0\"\nctrhp=\"0\" ctd=\"0\" ctwft=\"0\" ctwwt=\"0\"\nctwfw=\"0\" ctwfwsh=\"0\" ctwfwshs=\"0\" ctwh=\"0\"\ngwot=\"0\" gwo=\"0\" gws=\"0\" gwl=\"0\"\nid=\"powerha_fence\"\nro=\"0\" wo=\"0\" wou=\"0\" rb=\"0\" wb=\"0\"\nrl=\"0\" wl=\"0\" rlw=\"0\" wlw=\"0\" xl=\"0\">\n<ca rh=\"0\" d=\"0\" ft=\"0\" wt=\"0\" fw=\"0\" wh=\"0\" v=\"0\" m=\"0\" ri=\"0\"\n wi=\"0\" r=\"0\" dav=\"0\" dcn=\"0\" sav=\"0\" scn=\"0\" teav=\"0\"\n tsav=\"0\"  tav=\"0\"  pp=\"0\"/>\n</vdsk>\n\"\"\"\nfile_nm_1611 = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<mdsk idx=\"0\"\n id=\"mdisk1\" ro=\"160422028\" wo=\"4792298\" rb=\"65855202896\" wb=\"5087205812\"\n  re=\"4510327873\" we=\"324970648\" rq=\"4510327873\" wq=\"324970648\"\n  ure=\"4511020738035\" uwe=\"325020569160\" urq=\"4511020738035\"\n   uwq=\"325020569160\"\n pre=\"14804\" pwe=\"0\" pro=\"14804\" pwo=\"0\">\n<ca dav=\"0\" dtav=\"0\" dfav=\"0\" />\n</mdsk>\n\"\"\"\nfile_nm_1612 = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<mdsk idx=\"0\"\n id=\"mdisk1\" ro=\"16532168\" wo=\"4800940\" rb=\"807398566\" wb=\"1268035694\"\n  re=\"336180638\" we=\"392975230\" rq=\"336180638\" wq=\"392975230\"\n  ure=\"336232281210\" uwe=\"393035597850\" urq=\"336232281210\"\n  uwq=\"393035597850\"\n pre=\"0\" pwe=\"0\" pro=\"0\" pwo=\"0\">\n<ca dav=\"0\" dtav=\"0\" dfav=\"0\" />\n</mdsk>\n\"\"\"\nfile_nn_1611 = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<port id=\"1\"\ntype=\"FC\"\ntype_id=\"1\"\nwwpn=\"0x50050768021065cb\"\nfc_wwpn=\"0x50050768021065cb\"\nfcoe_wwpn=\"\"\nsas_wwn=\"\"\niqn=\"\"\nhbt=\"534901200817\" hbr=\"523369795104\" het=\"0\" her=\"186406977\"\ncbt=\"0\" cbr=\"52250\" cet=\"1324\" cer=\"0\"\nlnbt=\"49310\" lnbr=\"197487\" lnet=\"2073731\" lner=\"2070067\"\nrmbt=\"0\" rmbr=\"0\" rmet=\"0\" rmer=\"0\"\nlf=\"9\" lsy=\"21\" lsi=\"5\" pspe=\"0\"\nitw=\"295290111\" icrc=\"0\" bbcz=\"29140\"\n/>\n\"\"\"\nfile_nn_1612 = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<port id=\"1\"\ntype=\"FC\"\ntype_id=\"1\"\nwwpn=\"0x50050768021065cb\"\nfc_wwpn=\"0x50050768021065cb\"\nfcoe_wwpn=\"\"\nsas_wwn=\"\"\niqn=\"\"\nhbt=\"534901200817\" hbr=\"523369795104\" het=\"0\" her=\"186406977\"\ncbt=\"0\" cbr=\"52250\" cet=\"1324\" cer=\"0\"\nlnbt=\"49310\" lnbr=\"197487\" lnet=\"2073806\" lner=\"2070142\"\nrmbt=\"0\" rmbr=\"0\" rmet=\"0\" rmer=\"0\"\nlf=\"9\" lsy=\"21\" lsi=\"5\" pspe=\"0\"\nitw=\"295290111\" icrc=\"0\" bbcz=\"29140\"\n/>\n\"\"\"\nfile_nn_node_1611 = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<node id=\"node1\" cluster=\"Cluster_V7000\" node_id=\"0x0000000000000003\"\n cluster_id=\"0x00000200a1207e1f\" ro=\"960680162\" wo=\"940411371\"\n  rb=\"2605358068064\" wb=\"2619210259131\" re=\"1193453\" we=\"135040076\"\n   rq=\"49536391\" wq=\"151133071\"/>\n\"\"\"\nfile_nn_node_1612 = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<node id=\"node1\" cluster=\"Cluster_V7000\" node_id=\"0x0000000000000003\"\n cluster_id=\"0x00000200a1207e1f\" ro=\"960684525\" wo=\"940415078\"\n  rb=\"2605359825065\" wb=\"2619220318131\" re=\"1193465\"\n   we=\"135040076\" rq=\"49536391\" wq=\"151134080\"/>\n\"\"\"\nresource_metrics = {\n    'volume': [\n        'iops', 'readIops', 'writeIops',\n        'throughput', 'readThroughput', 'writeThroughput',\n        'responseTime',\n        'ioSize', 'readIoSize', 'writeIoSize',\n    ],\n    'port': [\n        'iops', 'readIops', 'writeIops',\n        'throughput', 'readThroughput', 'writeThroughput',\n        'responseTime'\n    ],\n    'disk': [\n        'iops', 'readIops', 'writeIops',\n        'throughput', 'readThroughput', 'writeThroughput',\n        'responseTime'\n    ],\n    'controller': [\n        'iops', 'readIops', 'writeIops',\n        'throughput', 'readThroughput', 'writeThroughput',\n        'responseTime'\n    ]\n}\n\nport_result = [\n    {\n        'name': 'node1_0',\n        'storage_id': '12345',\n        'native_port_id': '0',\n        'location': 'node1_0',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'fc',\n        'speed': 8000000000,\n        'native_parent_id': 'node1',\n        'wwn': '500507680140EF3E'\n    }, {\n        'name': 'node1_1',\n        'storage_id': '12345',\n        'native_port_id': 'node1_1',\n        'location': 'node1_1',\n        'connection_status': 'connected',\n        'health_status': 'abnormal',\n        'type': 'eth',\n        'speed': 1000000000,\n        'native_parent_id': 'node1',\n        'mac_address': '34:40:b5:d7:5a:94',\n        'ipv4': '',\n        'ipv4_mask': '',\n        'ipv6': ''\n    }, {\n        'name': 'node_165084_1',\n        'storage_id': '12345',\n        'native_port_id': 'node_165084_1',\n        'location': 'node_165084_1',\n        'connection_status': 'connected',\n        'health_status': 'abnormal',\n        'type': 'eth',\n        'speed': 1000000000,\n        'native_parent_id': 'node_165084',\n        'mac_address': '34:40:b5:d4:0c:f0',\n        'ipv4': '',\n        'ipv4_mask': '',\n        'ipv6': ''\n    }, {\n        'name': 'node1_2',\n        'storage_id': '12345',\n        'native_port_id': 'node1_2',\n        'location': 'node1_2',\n        'connection_status': 'connected',\n        'health_status': 'abnormal',\n        'type': 'eth',\n        'speed': 1000000000,\n        'native_parent_id': 'node1',\n        'mac_address': '34:40:b5:d7:5a:94',\n        'ipv4': '',\n        'ipv4_mask': '',\n        'ipv6': ''\n    }, {\n        'name': 'node_165084_2',\n        'storage_id': '12345',\n        'native_port_id': 'node_165084_2',\n        'location': 'node_165084_2',\n        'connection_status': 'connected',\n        'health_status': 'abnormal',\n        'type': 'eth',\n        'speed': 1000000000,\n        'native_parent_id': 'node_165084',\n        'mac_address': '34:40:b5:d4:0c:f0',\n        'ipv4': '',\n        'ipv4_mask': '',\n        'ipv6': ''\n    }\n]\nperf_get_port_fc = [\n    {\n        'name': '0',\n        'storage_id': '12345',\n        'native_port_id': '0',\n        'location': 'node1_0',\n        'connection_status': 'connected',\n        'health_status': 'normal',\n        'type': 'fc',\n        'max_speed': 8589934592,\n        'native_parent_id': 'node1',\n        'wwn': '0x50050768021065cb'\n    }\n]\nmetrics_result = [\n    constants.metric_struct(\n        name='iops', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'IOPS'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='readIops', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'IOPS'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='writeIops', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'IOPS'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='throughput', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'MB/s'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='readThroughput', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'MB/s'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='writeThroughput', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'MB/s'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='responseTime', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'ms'\n        }, values={\n            1638346330000: 0\n        }), constants.metric_struct(name='ioSize', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'KB'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='readIoSize', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'KB'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='writeIoSize', labels={\n            'storage_id': '12345',\n            'resource_type': 'volume',\n            'resource_id': '0',\n            'resource_name': 'powerha',\n            'type': 'RAW',\n            'unit': 'KB'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='iops', labels={\n            'storage_id': '12345',\n            'resource_type': 'disk',\n            'resource_id': '0',\n            'resource_name': 'mdisk1',\n            'type': 'RAW',\n            'unit': 'IOPS'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='readIops', labels={\n            'storage_id': '12345',\n            'resource_type': 'disk',\n            'resource_id': '0',\n            'resource_name': 'mdisk1',\n            'type': 'RAW',\n            'unit': 'IOPS'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='writeIops', labels={\n            'storage_id': '12345',\n            'resource_type': 'disk',\n            'resource_id': '0',\n            'resource_name': 'mdisk1',\n            'type': 'RAW',\n            'unit': 'IOPS'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='throughput', labels={\n            'storage_id': '12345',\n            'resource_type': 'disk',\n            'resource_id': '0',\n            'resource_name': 'mdisk1',\n            'type': 'RAW',\n            'unit': 'MB/s'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='readThroughput', labels={\n            'storage_id': '12345',\n            'resource_type': 'disk',\n            'resource_id': '0',\n            'resource_name': 'mdisk1',\n            'type': 'RAW',\n            'unit': 'MB/s'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='writeThroughput', labels={\n            'storage_id': '12345',\n            'resource_type': 'disk',\n            'resource_id': '0',\n            'resource_name': 'mdisk1',\n            'type': 'RAW',\n            'unit': 'MB/s'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='responseTime', labels={\n            'storage_id': '12345',\n            'resource_type': 'disk',\n            'resource_id': '0',\n            'resource_name': 'mdisk1',\n            'type': 'RAW',\n            'unit': 'ms'\n        }, values={\n            1638346330000: 0\n        }), constants.metric_struct(name='iops', labels={\n            'storage_id': '12345',\n            'resource_type': 'port',\n            'resource_id': '0',\n            'resource_name': '0',\n            'type': 'RAW',\n            'unit': 'IOPS'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='readIops', labels={\n            'storage_id': '12345',\n            'resource_type': 'port',\n            'resource_id': '0',\n            'resource_name': '0',\n            'type': 'RAW',\n            'unit': 'IOPS'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='writeIops', labels={\n            'storage_id': '12345',\n            'resource_type': 'port',\n            'resource_id': '0',\n            'resource_name': '0',\n            'type': 'RAW',\n            'unit': 'IOPS'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='throughput', labels={\n            'storage_id': '12345',\n            'resource_type': 'port',\n            'resource_id': '0',\n            'resource_name': '0',\n            'type': 'RAW',\n            'unit': 'MB/s'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='readThroughput', labels={\n            'storage_id': '12345',\n            'resource_type': 'port',\n            'resource_id': '0',\n            'resource_name': '0',\n            'type': 'RAW',\n            'unit': 'MB/s'\n        }, values={\n            1638346330000: 0.0\n        }), constants.metric_struct(name='writeThroughput', labels={\n            'storage_id': '12345',\n            'resource_type': 'port',\n            'resource_id': '0',\n            'resource_name': '0',\n            'type': 'RAW',\n            'unit': 'MB/s'\n        }, values={\n            1638346330000: 0.0\n        })]\nget_all_hosts = \"\"\"id name\n1 host1\n\"\"\"\nget_host_summery = \"\"\"id 38\nname tjy_test_iscsi\nport_count 3\ntype generic\nmask 11111111111111111111111111111111111111\niogrp_count 4\nstatus online\nsite_id\nsite_name\nhost_cluster_id\nhost_cluster_name\nWWPN 21000024FF543B0C\nnode_logged_in_count 1\nstate inactive\nWWPN 21000024FF438098\nnode_logged_in_count 1\nstate active\nWWPN 21000024FF41C461\nnode_logged_in_count 1\nstate inactive\n\"\"\"\nhost_result = [\n    {\n        'name': 'tjy_test_iscsi',\n        'storage_id': '12345',\n        'native_storage_host_id': '38',\n        'os_type': 'Unknown',\n        'status': 'normal'\n    }\n]\nget_all_views = \"\"\"id name SCSI_id vdisk_id vdisk_name\n2  Solaris11.3_57         0       27       PG_1\n6  hwstorage_8.44.133.80  0       24       wyktest\n7  VNX-WIN8-TEST          0       31       SVC-WIN8_test\n14 pd_esx6                0       65       pd_taiping0\n14 pd_esx6                1       66       pd_taiping1\n14 pd_esx6                2       67       pd_taiping2\n\"\"\"\nview_result = [\n    {\n        'name': '2_27',\n        'native_storage_host_id': '2',\n        'storage_id': '12345',\n        'native_volume_id': '27',\n        'native_masking_view_id': '2_27'\n    }, {\n        'name': '6_24',\n        'native_storage_host_id': '6',\n        'storage_id': '12345',\n        'native_volume_id': '24',\n        'native_masking_view_id': '6_24'\n    }, {\n        'name': '7_31',\n        'native_storage_host_id': '7',\n        'storage_id': '12345',\n        'native_volume_id': '31',\n        'native_masking_view_id': '7_31'\n    }, {\n        'name': '14_65',\n        'native_storage_host_id': '14',\n        'storage_id': '12345',\n        'native_volume_id': '65',\n        'native_masking_view_id': '14_65'\n    }, {\n        'name': '14_66',\n        'native_storage_host_id': '14',\n        'storage_id': '12345',\n        'native_volume_id': '66',\n        'native_masking_view_id': '14_66'\n    }, {\n        'name': '14_67',\n        'native_storage_host_id': '14',\n        'storage_id': '12345',\n        'native_volume_id': '67',\n        'native_masking_view_id': '14_67'\n    }\n]\ninit_result = [\n    {\n        'name': '21000024FF543B0C',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': '21000024FF543B0C',\n        'wwn': '21000024FF543B0C',\n        'status': 'online',\n        'type': 'fc',\n        'native_storage_host_id': '38'\n    }, {\n        'name': '21000024FF438098',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': '21000024FF438098',\n        'wwn': '21000024FF438098',\n        'status': 'online',\n        'type': 'fc',\n        'native_storage_host_id': '38'\n    }, {\n        'name': '21000024FF41C461',\n        'storage_id': '12345',\n        'native_storage_host_initiator_id': '21000024FF41C461',\n        'wwn': '21000024FF41C461',\n        'status': 'online',\n        'type': 'fc',\n        'native_storage_host_id': '38'\n    }\n]\n\n\ndef create_driver():\n\n    SSHHandler.login = mock.Mock(\n        return_value={\"\"})\n\n    return StorwizeSVCDriver(**ACCESS_INFO)\n\n\nclass TestStorwizeSvcStorageDriver(TestCase):\n    driver = create_driver()\n\n    def test_init(self):\n        SSHHandler.login = mock.Mock(\n            return_value={\"\"})\n        StorwizeSVCDriver(**ACCESS_INFO)\n\n    def test_list_storage(self):\n        SSHPool.get = mock.Mock(\n            return_value={paramiko.SSHClient()})\n        SSHHandler.do_exec = mock.Mock(\n            side_effect=[system_info])\n        storage = self.driver.get_storage(context)\n        self.assertDictEqual(storage, storage_result)\n\n    def test_list_storage_pools(self):\n        SSHPool.get = mock.Mock(\n            return_value={paramiko.SSHClient()})\n        SSHHandler.do_exec = mock.Mock(\n            side_effect=[pools_info, pool_info])\n        pool = self.driver.list_storage_pools(context)\n        self.assertDictEqual(pool[0], pool_result[0])\n\n    def test_list_volumes(self):\n        SSHPool.get = mock.Mock(\n            return_value={paramiko.SSHClient()})\n        SSHHandler.do_exec = mock.Mock(\n            side_effect=[volumes_info, volume_info])\n        volume = self.driver.list_volumes(context)\n        self.assertDictEqual(volume[0], volume_result[0])\n\n    def test_list_alerts(self):\n        query_para = {\n            \"begin_time\": 1605085070000,\n            \"end_time\": 1605085070000\n        }\n        SSHPool.get = mock.Mock(\n            return_value={paramiko.SSHClient()})\n        SSHHandler.do_exec = mock.Mock(\n            side_effect=[alerts_info, alert_info])\n        alert = self.driver.list_alerts(context, query_para)\n        self.assertEqual(alert[0].get('alert_id'),\n                         alert_result[0].get('alert_id'))\n\n    def test_parse_alert(self):\n        alert = self.driver.parse_alert(context, trap_info)\n        trap_alert_result['occur_time'] = alert['occur_time']\n        self.assertEqual(alert, trap_alert_result)\n\n    def test_clear_alert(self):\n        alert_id = 101\n        SSHPool.get = mock.Mock(\n            return_value={paramiko.SSHClient()})\n        SSHHandler.do_exec = mock.Mock(\n            side_effect=['CMMVC8275E'])\n        self.driver.clear_alert(context, alert_id)\n        with self.assertRaises(Exception) as exc:\n            SSHPool.get = mock.Mock(\n                return_value={paramiko.SSHClient()})\n            SSHHandler.do_exec = mock.Mock(\n                side_effect=['can not find alert'])\n            self.driver.clear_alert(context, alert_id)\n        self.assertIn('The results are invalid. can not find alert',\n                      str(exc.exception))\n\n    @mock.patch.object(SSHHandler, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_controllers(self, mock_ssh_get, mock_control):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_control.side_effect = [get_all_controllers, get_single_controller,\n                                    get_controller_cpu]\n        controller = self.driver.list_controllers(context)\n        self.assertEqual(controller, controller_result)\n\n    @mock.patch.object(SSHHandler, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_disks(self, mock_ssh_get, mock_disk):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_disk.side_effect = [get_all_disks, get_single_disk]\n        disk = self.driver.list_disks(context)\n        self.assertEqual(disk, disk_result)\n\n    @mock.patch.object(SSHHandler, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_ports(self, mock_ssh_get, mock_port):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_port.side_effect = [get_all_fcports, get_single_fcport,\n                                 get_iscsiport_1, get_iscsiport_2]\n        port = self.driver.list_ports(context)\n        self.assertEqual(port, port_result)\n\n    @mock.patch.object(SSHHandler, 'get_fc_port')\n    @mock.patch.object(Tools, 'get_remote_file_to_xml')\n    @mock.patch.object(SSHHandler, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_collect_perf_metrics(self, mock_ssh_get, mock_file_list,\n                                  mock_get_file, mock_fc_port):\n        start_time = 1637346270000\n        end_time = 1639346330000\n        storage_id = '12345'\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_file_list.return_value = get_file_list\n        mock_get_file.return_value = [ET.fromstring(file_nv_1611),\n                                      ET.fromstring(file_nv_1612),\n                                      ET.fromstring(file_nm_1611),\n                                      ET.fromstring(file_nm_1612),\n                                      ET.fromstring(file_nn_1611),\n                                      ET.fromstring(file_nn_1612),\n                                      ET.fromstring(file_nn_node_1611),\n                                      ET.fromstring(file_nn_node_1612)\n                                      ]\n        mock_fc_port.return_value = perf_get_port_fc\n        metrics = self.driver.collect_perf_metrics(context, storage_id,\n                                                   resource_metrics,\n                                                   start_time, end_time)\n        self.assertEqual(metrics[0][1]['resource_name'], 'powerha')\n\n    @mock.patch.object(SSHHandler, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_hosts(self, mock_ssh_get, mock_host):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_host.side_effect = [get_all_hosts, get_host_summery]\n        host = self.driver.list_storage_hosts(context)\n        self.assertEqual(host, host_result)\n\n    @mock.patch.object(SSHHandler, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_masking_views(self, mock_ssh_get, mock_view):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_view.return_value = get_all_views\n        view = self.driver.list_masking_views(context)\n        self.assertEqual(view, view_result)\n\n    @mock.patch.object(SSHHandler, 'do_exec')\n    @mock.patch.object(SSHPool, 'get')\n    def test_list_host_initiators(self, mock_ssh_get, mock_host):\n        mock_ssh_get.return_value = {paramiko.SSHClient()}\n        mock_host.side_effect = [get_all_hosts, get_host_summery]\n        init = self.driver.list_storage_host_initiators(context)\n        self.assertEqual(init, init_result)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/macro_san/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/macro_san/ms/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/macro_san/ms/test_ms_stor.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom unittest import TestCase, mock\n\nimport paramiko\nimport six\nfrom paramiko import SSHClient\n\nsys.modules['delfin.cryptor'] = mock.Mock()\nimport time\nfrom oslo_utils import units\nfrom delfin.common import constants\nfrom delfin.drivers.macro_san.ms import consts\nfrom delfin.drivers.macro_san.ms.macro_ssh_client import MacroSanSSHPool\n\nfrom oslo_log import log\n\nfrom delfin import context\nfrom delfin.drivers.macro_san.ms.ms_handler import MsHandler\nfrom delfin.drivers.macro_san.ms.ms_stor import MacroSanDriver\n\nLOG = log.getLogger(__name__)\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"macro_san\",\n    \"model\": \"macro_san\",\n    \"ssh\": {\n        \"host\": \"110.143.133.200\",\n        \"port\": 22,\n        \"username\": \"admin\",\n        \"password\": \"admin\"\n    }\n}\nPOOLS_INFO = \"\"\"Last login: Wed Jul 13 15:05:45 2022 from 192.168.3.235\\r\n(null)@(null) ODSP CLI> pool mgt getlist\\r\nStorage Pools Sum: 4\\r\n\\r\nName: SYS-Pool\\r\nType: Traditional\\r\nIs Foreign: No\\r\nIs Reserved: Yes\\r\nCell Size: 1GB\\r\nAll Capacity: 7144GB\\r\nUsed Capacity: 961GB\\r\nUsed Capacity Rate: 13.5%\\r\nFree Capacity(RAID): 6183GB\\r\nFree Capacity(HDD RAID): 0GB\\r\nFree Capacity(SSD RAID): 6183GB\\r\n\\r\nName: pool-1\\r\nType: Traditional\\r\nIs Foreign: No\\r\nIs Reserved: No\\r\nCell Size: 1GB\\r\nAll Capacity: 0GB\\r\nUsed Capacity: 0GB\\r\nUsed Capacity Rate: 0.0%\\r\nFree Capacity(RAID): 0GB\\r\nFree Capacity(HDD RAID): 0GB\\r\nFree Capacity(SSD RAID): 0GB\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI>\"\"\"\nRAID_SYS_POOL = \"\"\"(null)@(null) ODSP CLI> raid mgt getlist -p SYS-Pool\\r\nRAIDs Sum: 1\\r\n\\r\nName: SYS-RAID\\r\nRAID Level: RAID5\\r\nHealth Status: Normal\\r\nTotal Capacity: 7144GB\\r\nFree Capacity: 6183GB\\r\nDisk Type: SSD\\r\nData Disks Sum: 8\\r\nDedicated Spare Disks Sum: 1\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI>\"\"\"\n\nRAID_POOL_1 = \"\"\"(null)@(null) ODSP CLI> raid mgt getlist -p pool-1\\r\nRAIDs Sum: 0\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\n\nPOOLS_DATA = [{'name': 'SYS-Pool', 'storage_id': '12345',\n               'native_storage_pool_id': 'SYS-Pool', 'status': 'normal',\n               'storage_type': 'block', 'total_capacity': 7670811590656.0,\n               'used_capacity': 1031865892864.0,\n               'free_capacity': 6638945697792.0},\n              {'name': 'pool-1', 'storage_id': '12345',\n               'native_storage_pool_id': 'pool-1', 'status': 'unknown',\n               'storage_type': 'block', 'total_capacity': 0.0,\n               'used_capacity': 0.0,\n               'free_capacity': 0.0}]\nVOLUME_INFO = \"\"\"(null)@(null) ODSP CLI> lun mgt getlist -p SYS-Pool\\r\nSYS-Pool: 18 LUNs (18 Normal 0 Faulty)\\r\n\\r\nName : SYS-LUN-Config\\r\nLUN id   : 0\\r\nTotal Size : 4GB\\r\nCurrent Owner(SP) : SP1\\r\nHealth Status : Normal\\r\nCache Status : Disable\\r\nMapped to Client : No\\r\n\\r\n\\r\nName : SYS-LUN-Log\\r\nLUN id   : 1\\r\nTotal Size : 4GB\\r\nCurrent Owner(SP) : SP1\\r\nHealth Status : Normal\\r\nCache Status : Disable\\r\nMapped to Client : No\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVOLUME_QUERY_ONE = \"\"\"(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Config\\r\nName : SYS-LUN-Config\\r\nDevice ID: 600B342F1B0F9ABD7BABD272BD0000DA\\r\nTotal Size : 4GB\\r\nCurrent Owner(SP) : SP1\\r\nOwner(Pool) : SYS-Pool\\r\nHealth Status : Normal\\r\nIs Reserved : Yes\\r\nIs Foreign : No\\r\nCreated Time: 2021/12/23 11:26:40\\r\nCache Set Status: Disable\\r\nCache Status: Disable\\r\nLUN Distr Mode : concatenated\\r\nMapped to Client : No\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVOLUME_QUERY_TWO = \"\"\"(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Log\\r\nName : SYS-LUN-Log\\r\nDevice ID: 600B342EF209582D8D07D1EE4D0000DA\\r\nTotal Size : 4GB\\r\nCurrent Owner(SP) : SP1\\r\nOwner(Pool) : SYS-Pool\\r\nHealth Status : Normal\\r\nIs Reserved : Yes\\r\nIs Foreign : No\\r\nCreated Time: 2021/12/23 11:26:44\\r\nCache Set Status: Disable\\r\nCache Status: Disable\\r\nLUN Distr Mode : concatenated\\r\nMapped to Client : No\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI>\"\"\"\nVOLUME_ONE_NEW = \"\"\"(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Config\\r\nName: SYS-LUN-Config\\r\nWWN: 600B342F1B0F9ABD7BABD272BD0000DA\\r\nType: Standard-LUN\\r\nIs RDV LUN: No\\r\nTotal Logical Size: 4GB (209715200sector)\\r\nTotal Physical Size: 4GB (209715200sector)\\r\nThin-Provisioning: Disable\\r\nDefault Owner(SP): SP1\\r\nCurrent Owner(SP): SP1\\r\nOwner(Group): N/A\\r\nOwner(Pool): SYS-Pool\\r\nHealth Status: Normal\\r\nUa_type: ALUA\\r\nIs Reserved: No\\r\nIs Foreign: No\\r\nWrite Zero Status: Disable\\r\nCreated Time: 2020/03/02 17:49:15\\r\nRead Cache: Enable\\r\nRead Cache Status: Enable\\r\nWrite Cache: Enable\\r\nWrite Cache Status: Enable\\r\nMapped to Client: No\\r\nLUN UUID: 0x50b34200-154800ee-a8746477-234b74a7\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVOLUME_TWO_NEW = \"\"\"(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Log\\r\nName: SYS-LUN-Log\\r\nWWN: 600B3423899AC1EDB125DCAE6D4E00D0\\r\nNGUID: 040F09004EE6CA2500B342B11EAC9938\\r\nType: Standard-LUN\\r\nIs RDV LUN: No\\r\nTotal Logical Size: 1GB (2097152sector)\\r\nTotal Physical Size: 1GB (2097152sector)\\r\nThin-Provisioning: Enable\\r\nThin-LUN Extent Size: 16KB\\r\nThin-LUN Private-area Allocate Mode: SSD RAID First\\r\nThin-LUN Data-area Allocate Mode: HDD RAID First\\r\nThin-LUN Expand Threshold: 30GB\\r\nThin-LUN Expand Step Size: 50GB\\r\nThin-LUN Allocated Physical Capacity: 1GB\\r\nThin-LUN Allocated Physical Capacity Percentage: 100.0%\\r\nThin-LUN Used Capacity: 3956KB\\r\nThin-LUN Used Capacity Percentage: 0.0%\\r\nThin-LUN Unused Capacity: 1,048,576KB\\r\nThin-LUN Unused Capacity Percentage: 100.0%\\r\nThin-LUN Distribute Mode: Single\\r\nThin-LUN Dedup Switch: Disable\\r\nThin-LUN Compress Switch: Disable\\r\nDefault Owner(SP): SP1\\r\nCurrent Owner(SP): SP1\\r\nOwner(Group): N/A\\r\nOwner(Pool): Pool-1\\r\nHealth Status: Normal\\r\nUa_type: ALUA\\r\nIs Reserved: No\\r\nIs Foreign: No\\r\nCreated Time: 2022/08/29 17:36:37\\r\nRead Cache: Enable\\r\nRead Cache Status: Enable\\r\nWrite Cache: Enable\\r\nWrite Cache Status: Enable\\r\nMapped to Client: No\\r\nLUN UUID: 0x00b34204-0f09004e-e6ca25b1-1eac9938\\r\nThin-LUN private UUID: 0x00b34204-0f09006f-6c27276c-a6d3f14b\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVOLUME_TWO_INFO = \"\"\"(null)@(null) ODSP CLI> lun mgt getlist -p pool-1\\r\npool-1: 0 LUNs (0 Normal 0 Faulty)\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVOLUMES_DATA = [\n    {'name': 'SYS-LUN-Config', 'storage_id': '12345', 'status': 'normal',\n     'native_volume_id': 'SYS-LUN-Config',\n     'native_storage_pool_id': 'SYS-Pool', 'type': 'thick',\n     'wwn': '600B342F1B0F9ABD7BABD272BD0000DA', 'total_capacity': 4294967296.0,\n     'used_capacity': 4294967296.0, 'free_capacity': 0.0},\n    {'name': 'SYS-LUN-Log', 'storage_id': '12345', 'status': 'normal',\n     'native_volume_id': 'SYS-LUN-Log', 'native_storage_pool_id': 'Pool-1',\n     'type': 'thin', 'wwn': '600B3423899AC1EDB125DCAE6D4E00D0',\n     'total_capacity': 1073741824.0, 'used_capacity': 4050944.0,\n     'free_capacity': 1069690880.0}]\nTHICK_VOLUMES_DATA = [\n    {'name': 'SYS-LUN-Config', 'storage_id': '12345', 'status': 'normal',\n     'native_volume_id': 'SYS-LUN-Config',\n     'native_storage_pool_id': 'SYS-Pool', 'type': 'thick',\n     'wwn': '600B342F1B0F9ABD7BABD272BD0000DA', 'total_capacity': 4294967296.0,\n     'used_capacity': 4294967296.0, 'free_capacity': 0.0},\n    {'name': 'SYS-LUN-Log', 'storage_id': '12345', 'status': 'normal',\n     'native_volume_id': 'SYS-LUN-Log', 'native_storage_pool_id': 'SYS-Pool',\n     'type': 'thick', 'wwn': '600B342EF209582D8D07D1EE4D0000DA',\n     'total_capacity': 4294967296.0, 'used_capacity': 4294967296.0,\n     'free_capacity': 0.0}]\nVERSION_INFO = \"\"\"(null)@(null) ODSP CLI> system mgt getversion\\r\n[SP1 Version]\\r\nSP1 ODSP_MSC Version: V2.0.14T04\\r\nSP1 ODSP_Driver Version: V607\\r\n\\r\n[SP2 Version]\\r\nSP2 ODSP_MSC Version: V2.0.14T04\\r\nSP2 ODSP_Driver Version: V607\\r\n\\r\n[SP3 Version]\\r\nSP3 ODSP_MSC Version: N/A\\r\nSP3 ODSP_Driver Version: N/A\\r\n\\r\n[SP4 Version]\\r\nSP4 ODSP_MSC Version: N/A\\r\nSP4 ODSP_Driver Version: N/A\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nCPU_INFO = \"\"\"(null)@(null) ODSP CLI> system mgt getcpuinfo\\r\n[SP1 CPU Information]\\r\nSP1 Processor0 ID: 0\\r\nSP1 Processor0 Vendor_id: GenuineIntel\\r\nSP1 Processor0 CPU Frequency: 2200.000 MHz\\r\nSP1 Processor1 ID: 1\\r\nSP1 Processor1 Vendor_id: GenuineIntel\\r\nSP1 Processor1 CPU Frequency: 2200.000 MHz\\r\nSP1 Processor2 ID: 2\\r\nSP1 Processor2 Vendor_id: GenuineIntel\\r\nSP1 Processor2 CPU Frequency: 2200.000 MHz\\r\nSP1 Processor3 ID: 3\\r\nSP1 Processor3 Vendor_id: GenuineIntel\\r\nSP1 Processor3 CPU Frequency: 2200.000 MHz\\r\n\\r\n[SP2 CPU Information]\\r\nSP2 Processor0 ID: 0\\r\nSP2 Processor0 Vendor_id: GenuineIntel\\r\nSP2 Processor0 CPU Frequency: 2200.000 MHz\\r\nSP2 Processor1 ID: 1\\r\nSP2 Processor1 Vendor_id: GenuineIntel\\r\nSP2 Processor1 CPU Frequency: 2200.000 MHz\\r\nSP2 Processor2 ID: 2\\r\nSP2 Processor2 Vendor_id: GenuineIntel\\r\nSP2 Processor2 CPU Frequency: 2200.000 MHz\\r\nSP2 Processor3 ID: 3\\r\nSP2 Processor3 Vendor_id: GenuineIntel\\r\nSP2 Processor3 CPU Frequency: 2200.000 MHz\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI>\"\"\"\nHA_STATUS = \"\"\"(null)@(null) ODSP CLI> ha mgt getstatus\\r\nSP1 HA Running Status  : dual--single\\r\nSP2 HA Running Status  : dual--single\\r\nSP3 HA Running Status  : absent--poweroff\\r\nSP4 HA Running Status  : absent--poweroff\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI>\"\"\"\nHA_STATUS_NEW = \"\"\"(null)@(null) ODSP CLI> ha mgt getstatus\\r\nSystem HA Status       : normal\\r\nSP1 HA Running Status  : single\\r\nSP2 HA Running Status  : single\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI>\"\"\"\nCONTROLLERS_DATA = [\n    {'name': 'SP1', 'storage_id': '12345', 'native_controller_id': 'SP1',\n     'status': 'normal', 'location': 'SP1', 'soft_version': 'V2.0.14T04',\n     'cpu_info': 'GenuineIntel@2200.000MHz', 'cpu_count': 1},\n    {'name': 'SP2', 'storage_id': '12345', 'native_controller_id': 'SP2',\n     'status': 'normal', 'location': 'SP2', 'soft_version': 'V2.0.14T04',\n     'cpu_info': 'GenuineIntel@2200.000MHz', 'cpu_count': 1},\n    {'name': 'SP3', 'storage_id': '12345', 'native_controller_id': 'SP3',\n     'status': 'offline', 'location': 'SP3', 'soft_version': 'N/A',\n     'cpu_info': ''},\n    {'name': 'SP4', 'storage_id': '12345', 'native_controller_id': 'SP4',\n     'status': 'offline', 'location': 'SP4', 'soft_version': 'N/A',\n     'cpu_info': ''}]\nDSU_INFO = \"\"\"(null)@(null) ODSP CLI> dsu mgt getlist\\r\nDSUs Sum:1\\r\n\\r\nName: DSU-7:1:1\\r\nDisks: 2\\r\nDSU EP1 SAS address: 500b342000dd26ff\\r\nDSU EP2 SAS address: 500b342000dd273f\\r\n\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nDISKS_INFO = \"\"\"(null)@(null) ODSP CLI> disk mgt getlist -d 7:1:1\\r\nDisks Sum: 2\\r\n\\r\nName: Disk-7:1:1:1\\r\nType: SSD\\r\nCapacity: 893GB\\r\nVendor: ATA\\r\nRPMs: 0\\r\nHealth Status: Normal\\r\nDisk Role: Data disk\\r\nOwner(Pool): SYS-Pool\\r\nOwner(RAID): SYS-RAID\\r\n\\r\nName: Disk-7:1:1:2\\r\nType: SSD\\r\nCapacity: 893GB\\r\nVendor: ATA\\r\nRPMs: 0\\r\nHealth Status: Normal\\r\nDisk Role: Data disk\\r\nOwner(Pool): SYS-Pool\\r\nOwner(RAID): SYS-RAID\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nDISK_ONE = \"\"\"(null)@(null) ODSP CLI> disk mgt query -d 7:1:1:1\\r\nName: Disk-7:1:1:1\\r\nType: HDD\\r\nCapacity: 893GB\\r\nVendor: ATA\\r\nModel: Micron_5200_MTFDDAK960TDD\\r\nFW Version: U004\\r\nSerial Number: 18311E8D2787\\r\nSize: 2.5inch\\r\nRPMs: 0\\r\nRead Cache Setting: Enable\\r\nWrite Cache Setting: Enable\\r\nHealth Status: Normal\\r\nRole: Data disk\\r\nOwner(Pool): SYS-Pool\\r\nOwner(RAID): SYS-RAID\\r\nLocating Status: NO\\r\nSP1 Disk Online Status: Online\\r\nSP2 Disk Online Status: Online\\r\nSP3 Disk Online Status: Online\\r\nSP4 Disk Online Status: Online\\r\nSSD Estimated Life Remaining: N/A\\r\nSSD Estimated Time Remaining: N/A\\r\nSSD Applicable Scene: N/A\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nDISKS_TWO = \"\"\"(null)@(null) ODSP CLI> disk mgt query -d 7:1:1:2\\r\nName: Disk-7:1:1:2\\r\nType: SSD\\r\nCapacity: 893GB\\r\nVendor: ATA\\r\nModel: Micron_5200_MTFDDAK960TDD\\r\nFW Version: U004\\r\nSerial Number: 18311E8D2C03\\r\nSize: 2.5inch\\r\nRPMs: 0\\r\nRead Cache Setting: Enable\\r\nWrite Cache Setting: Enable\\r\nHealth Status: Normal\\r\nRole: Data disk\\r\nOwner(Pool): SYS-Pool\\r\nOwner(RAID): SYS-RAID\\r\nLocating Status: NO\\r\nSP1 Disk Online Status: Online\\r\nSP2 Disk Online Status: Online\\r\nSP3 Disk Online Status: Online\\r\nSP4 Disk Online Status: Online\\r\nSSD Estimated Life Remaining: N/A\\r\nSSD Estimated Time Remaining: N/A\\r\nSSD Applicable Scene: N/A\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nDISKS_DATA = [{'name': 'Disk-7:1:1:1', 'storage_id': '12345',\n               'native_disk_id': 'Disk-7:1:1:1',\n               'serial_number': '18311E8D2787', 'manufacturer': 'ATA',\n               'model': 'Micron_5200_MTFDDAK960TDD', 'firmware': 'U004',\n               'location': 'Disk-7:1:1:1', 'speed': 0,\n               'capacity': 958851448832.0, 'status': 'normal',\n               'physical_type': 'hdd', 'logical_type': 'data'},\n              {'name': 'Disk-7:1:1:2', 'storage_id': '12345',\n               'native_disk_id': 'Disk-7:1:1:2',\n               'serial_number': '18311E8D2C03', 'manufacturer': 'ATA',\n               'model': 'Micron_5200_MTFDDAK960TDD', 'firmware': 'U004',\n               'location': 'Disk-7:1:1:2', 'speed': 0,\n               'capacity': 958851448832.0, 'status': 'normal',\n               'physical_type': 'ssd', 'logical_type': 'data'}]\nFC_INFO = \"\"\"(null)@(null) ODSP CLI> client target queryportlist\\r\nfc port-1:4:1\\r\nwwn                 : 50:0b:34:20:02:fe:b5:0d\\r\nonline state        : 2\\r\nactual speed        : 0\\r\nport topology       : 0\\r\ninitiator num       : 0\\r\nfc port-1:4:2\\r\nwwn                 : 50:0b:34:20:02:fe:b5:0e\\r\nonline state        : 2\\r\nactual speed        : 0\\r\nport topology       : 0\\r\ninitiator num       : 0\\r\nfc port-1:4:3\\r\nwwn                 : 50:0b:34:20:02:fe:b5:0f\\r\nonline state        : 2\\r\nactual speed        : 0\\r\nport topology       : 0\\r\ninitiator num       : 0\\r\nfc port-1:4:4\\r\nwwn                 : 50:0b:34:20:02:fe:b5:10\\r\nonline state        : 2\\r\nactual speed        : 0\\r\nport topology       : 0\\r\ninitiator num       : 0\\r\nfc port-2:4:1\\r\nwwn                 : 50:0b:34:20:02:fe:b3:0d\\r\nonline state        : 2\\r\nactual speed        : 0\\r\nport topology       : 0\\r\ninitiator num       : 0\\r\nfc port-2:4:2\\r\nwwn                 : 50:0b:34:20:02:fe:b3:0e\\r\nonline state        : 2\\r\nactual speed        : 0\\r\nport topology       : 0\\r\ninitiator num       : 0\\r\nfc port-2:4:3\\r\nwwn                 : 50:0b:34:20:02:fe:b3:0f\\r\nonline state        : 2\\r\nactual speed        : 0\\r\nport topology       : 0\\r\ninitiator num       : 0\\r\nfc port-2:4:4\\r\nwwn                 : 50:0b:34:20:02:fe:b3:10\\r\nonline state        : 2\\r\nactual speed        : 0\\r\nport topology       : 0\\r\ninitiator num       : 0\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nSAS_INFO = \"\"\"(null)@(null) ODSP CLI>system sas getportlist -c 1:1\\r\nSAS Controller 1:1 Ports Sum:2\\r\n\\r\nSAS-1:1:1 Link Status: Full-Linkup\\r\nSAS-1:1:1 PHY Max Speed: 12Gbps\\r\nSAS-1:1:1 PHY1 Speed: 12Gbps\\r\nSAS-1:1:1 PHY2 Speed: 12Gbps\\r\nSAS-1:1:1 PHY3 Speed: 12Gbps\\r\nSAS-1:1:1 PHY4 Speed: 12Gbps\\r\n\\r\nSAS-1:1:2 Link Status: Full-Linkup\\r\nSAS-1:1:2 PHY Max Speed: 12Gbps\\r\nSAS-1:1:2 PHY1 Speed: 6Gbps\\r\nSAS-1:1:2 PHY2 Speed: 6Gbps\\r\nSAS-1:1:2 PHY3 Speed: 6Gbps\\r\nSAS-1:1:2 PHY4 Speed: 6Gbps\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nPORT_DATA = [{'native_port_id': 'FC-1:4:1', 'name': 'FC-1:4:1', 'type': 'fc',\n              'logical_type': 'physical', 'connection_status': 'disconnected',\n              'health_status': 'unknown', 'location': 'FC-1:4:1',\n              'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0,\n              'wwn': '50:0b:34:20:02:fe:b5:0d'},\n             {'native_port_id': 'FC-1:4:2', 'name': 'FC-1:4:2', 'type': 'fc',\n              'logical_type': 'physical', 'connection_status': 'disconnected',\n              'health_status': 'unknown', 'location': 'FC-1:4:2',\n              'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0,\n              'wwn': '50:0b:34:20:02:fe:b5:0e'},\n             {'native_port_id': 'FC-1:4:3', 'name': 'FC-1:4:3', 'type': 'fc',\n              'logical_type': 'physical', 'connection_status': 'disconnected',\n              'health_status': 'unknown', 'location': 'FC-1:4:3',\n              'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0,\n              'wwn': '50:0b:34:20:02:fe:b5:0f'},\n             {'native_port_id': 'FC-1:4:4', 'name': 'FC-1:4:4', 'type': 'fc',\n              'logical_type': 'physical', 'connection_status': 'disconnected',\n              'health_status': 'unknown', 'location': 'FC-1:4:4',\n              'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0,\n              'wwn': '50:0b:34:20:02:fe:b5:10'},\n             {'native_port_id': 'FC-2:4:1', 'name': 'FC-2:4:1', 'type': 'fc',\n              'logical_type': 'physical', 'connection_status': 'disconnected',\n              'health_status': 'unknown', 'location': 'FC-2:4:1',\n              'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0,\n              'wwn': '50:0b:34:20:02:fe:b3:0d'},\n             {'native_port_id': 'FC-2:4:2', 'name': 'FC-2:4:2', 'type': 'fc',\n              'logical_type': 'physical', 'connection_status': 'disconnected',\n              'health_status': 'unknown', 'location': 'FC-2:4:2',\n              'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0,\n              'wwn': '50:0b:34:20:02:fe:b3:0e'},\n             {'native_port_id': 'FC-2:4:3', 'name': 'FC-2:4:3', 'type': 'fc',\n              'logical_type': 'physical', 'connection_status': 'disconnected',\n              'health_status': 'unknown', 'location': 'FC-2:4:3',\n              'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0,\n              'wwn': '50:0b:34:20:02:fe:b3:0f'},\n             {'native_port_id': 'FC-2:4:4', 'name': 'FC-2:4:4', 'type': 'fc',\n              'logical_type': 'physical', 'connection_status': 'disconnected',\n              'health_status': 'unknown', 'location': 'FC-2:4:4',\n              'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0,\n              'wwn': '50:0b:34:20:02:fe:b3:10'},\n             {'native_port_id': 'SAS-1:1:1', 'name': 'SAS-1:1:1',\n              'type': 'sas', 'logical_type': 'physical',\n              'connection_status': 'connected', 'health_status': 'unknown',\n              'location': 'SAS-1:1:1', 'storage_id': '12345',\n              'native_parent_id': 'SP1', 'max_speed': 12000000000,\n              'speed': 12000000000},\n             {'native_port_id': 'SAS-1:1:2', 'name': 'SAS-1:1:2',\n              'type': 'sas', 'logical_type': 'physical',\n              'connection_status': 'connected', 'health_status': 'unknown',\n              'location': 'SAS-1:1:2', 'storage_id': '12345',\n              'native_parent_id': 'SP1', 'max_speed': 12000000000,\n              'speed': 6000000000}]\nPARSE_ALERT_INFO = {\n    '1.3.6.1.2.1.1.3.0': '2995472',\n    '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.35904.1.3.3',\n    '1.3.6.1.2.1.25.1.2': '2022-07-12 17:43:40',\n    '1.3.6.1.4.1.35904.1.2.1.1': 'Storage-1',\n    '1.3.6.1.4.1.35904.1.2.1.4.1': 'Battery_expired',\n    '1.3.6.1.4.1.35904.1.2.1.4.2': 'SP1',\n    '1.3.6.1.4.1.35904.1.2.1.4.3': \"SSU-7:1:1's battery '2' becomes expired,\"\n                                   \" please prepare a new module and replace\"\n                                   \" it as soon as possible.\",\n    '1.3.6.1.4.1.35904.1.2.1.4.4': '2',\n    'transport_address': '192.168.3.235',\n    'storage_id': '05e007e4-62ef-4e24-a14e-57a8ee8e5bf3'}\nPARSE_ALERT_DATA = {\n    'alert_id': '2995472', 'severity': 'Major',\n    'category': 'Fault', 'occur_time': 1657619020000,\n    'description': \"SSU-7:1:1's battery '2' becomes expired, please prepare\"\n                   \" a new module and replace it as soon as possible.\",\n    'location': 'Storage-1:SP1', 'type': 'EquipmentAlarm',\n    'resource_type': 'Storage',\n    'alert_name': '电池模块超期',\n    'match_key': 'ec62c3cdd862da9b0f8da6d03d97d76e'}\nINITIATOR_INFO = \"\"\"(null)@(null) ODSP CLI> client initiator getlist -t all\\r\nInitiators Sum: 3\\r\n\nInitiator Alias: VMWare\\r\nInitiator WWN: 20:18:f8:2e:3f:f9:85:54\\r\nType: FC\\r\nOS: AIX\\r\nIP Address Used in Last iSCSI Login Session: N/A\\r\nMapped Client: Client-1\\r\nMapped Targets Sum: 2\\r\nMapped LUNs Sum: 6\\r\n\\r\nInitiator Alias: ds\\r\nInitiator WWN: 20:ab:30:48:56:01:fc:31\\r\nType: FC\\r\nOS: Other\\r\nIP Address Used in Last iSCSI Login Session: N/A\\r\nMapped Client: Client-2\\r\nMapped Targets Sum: 1\\r\nMapped LUNs Sum: 1\\r\n\\r\nInitiator Alias: dc\\r\nInitiator WWN: 42:25:dc:35:ab:69:12:cb\\r\nType: FC\\r\nOS: HP_UNIX\\r\nIP Address Used in Last iSCSI Login Session: N/A\\r\nMapped Client: Client-2\\r\nMapped Targets Sum: 1\\r\nMapped LUNs Sum: 2\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nINITIATOR_DATA = [\n    {'native_storage_host_initiator_id': '20:18:f8:2e:3f:f9:85:54',\n     'native_storage_host_id': 'Client-1', 'name': '20:18:f8:2e:3f:f9:85:54',\n     'alias': 'VMWare', 'type': 'fc', 'status': 'unknown',\n     'wwn': '20:18:f8:2e:3f:f9:85:54', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '20:ab:30:48:56:01:fc:31',\n     'native_storage_host_id': 'Client-2', 'name': '20:ab:30:48:56:01:fc:31',\n     'alias': 'ds', 'type': 'fc', 'status': 'unknown',\n     'wwn': '20:ab:30:48:56:01:fc:31', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '42:25:dc:35:ab:69:12:cb',\n     'native_storage_host_id': 'Client-2', 'name': '42:25:dc:35:ab:69:12:cb',\n     'alias': 'dc', 'type': 'fc', 'status': 'unknown',\n     'wwn': '42:25:dc:35:ab:69:12:cb', 'storage_id': '12345'}]\nUNKNOWN_COMMAND = \"\"\"(null)@(null) ODSP CLI> client host gethostlist\n% Unknown command.\n(null)@(null) ODSP CLI> \"\"\"\nHOSTS_INFO = \"\"\"(null)@(null) ODSP CLI> client mgt getclientlist\\r\nClients Sum: 7\\r\n\\r\nName: Client-1\\r\nDescription: ds  mss\\r\nMapped Initiators Num: 1\\r\n\\r\nName: Client-2\\r\nDescription: \\r\nMapped Initiators Num: 2\\r\n\\r\nName: Client-3\\r\nDescription: sss\\r\nMapped Initiators Num: 0\\r\n\\r\nName: Client-4\\r\nDescription: dsd\\r\nMapped Initiators Num: 0\\r\n\\r\nName: Client-5\\r\nDescription: ds\\r\nMapped Initiators Num: 0\\r\n\\r\nName: Client-6\\r\nDescription: \\r\nMapped Initiators Num: 0\\r\n\\r\nName: 5\\r\nDescription: \\r\nMapped Initiators Num: 0\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nHOST_INFO_NEW = \"\"\"(null)@(null) ODSP CLI> client host gethostlist\\r\nHost Sum: 1\\r\n\\r\nHost Name: Host-1\\r\nOS: Windows2008\\r\nIP Address: 192.168.1.20\\r\nDescription: Server 1\\r\nLocation: Room-201\\r\nInitiators Sum: 4\\r\niSCSI Initiators Sum: 2\\r\nFC Initiators Sum: 2\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nHOST_DATA = [{'name': 'Client-1', 'storage_id': '12345',\n              'native_storage_host_id': 'Client-1', 'os_type': 'AIX',\n              'status': 'normal', 'description': 'ds  mss'},\n             {'name': 'Client-2', 'storage_id': '12345',\n              'native_storage_host_id': 'Client-2', 'os_type': 'HP-UX',\n              'status': 'normal', 'description': ''},\n             {'name': 'Client-3', 'storage_id': '12345',\n              'native_storage_host_id': 'Client-3', 'os_type': 'Unknown',\n              'status': 'normal', 'description': 'sss'},\n             {'name': 'Client-4', 'storage_id': '12345',\n              'native_storage_host_id': 'Client-4', 'os_type': 'Unknown',\n              'status': 'normal', 'description': 'dsd'},\n             {'name': 'Client-5', 'storage_id': '12345',\n              'native_storage_host_id': 'Client-5', 'os_type': 'Unknown',\n              'status': 'normal', 'description': 'ds'},\n             {'name': 'Client-6', 'storage_id': '12345',\n              'native_storage_host_id': 'Client-6', 'os_type': 'Unknown',\n              'status': 'normal', 'description': ''},\n             {'name': '5', 'storage_id': '12345',\n              'native_storage_host_id': '5', 'os_type': 'Unknown',\n              'status': 'normal', 'description': ''}]\nHOST_DATA_NEW = [{'name': 'Host-1', 'storage_id': '12345',\n                  'native_storage_host_id': 'Host-1', 'os_type': 'Windows',\n                  'status': 'normal', 'description': 'Server 1',\n                  'ip_address': '192.168.1.20'}]\nHOST_GROUPS_INFO = \"\"\"(null)@(null) ODSP CLI> client hostgroup gethglist\\r\nHost Groups Sum: 1\\r\n\\r\nHost Group Name: Host-Group-1\\r\nDescription: Host Group\\r\nHosts Sum: 1\\r\nInitiators Sum: 4\\r\niSCSI Initiators Sum: 2\\r\nFC Initiators Sum: 2\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nHOST_GROUPS_H_INFO = \"\"\"(null)@(null) ODSP CLI> client hostgroup gethostlist\\\n -n Host-Group-1\\r\nHosts Sum: 1\\r\n\\r\nHostName: Host-1\\r\nOS: Windows2008\\r\nIP Address: 192.168.1.20\\r\nDescription: Server1\\r\nLocation: Room-201\\r\nInitiators Sum: 4\\r\niSCSI Initiators Sum: 2\\r\nFC Initiators Sum: 2\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nHOST_GROUPS_DATA = {\n    'storage_host_groups': [\n        {'name': 'Host-Group-1', 'storage_id': '12345',\n         'native_storage_host_group_id': 'Host-Group-1',\n         'description': 'Host Group'}\n    ],\n    'storage_host_grp_host_rels': [\n        {'storage_id': '12345', 'native_storage_host_group_id': 'Host-Group-1',\n         'native_storage_host_id': 'Host-1'}\n    ]\n}\nVOLUME_GROUPS_INFO = \"\"\"(null)@(null) ODSP CLI> client lungroup getlglist\\r\nLUN Group Sum: 1\\r\n\\r\nLUN Group Name: LUN-Group-1\\r\nDescription: LUN Group description\\r\nLUNs Sum: 4\\r\nLocal LUNs Sum: 4\\r\nRemote LUNs Sum: 0\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVOLUME_GROUPS_N_INFO = \"\"\"(null)@(null) ODSP CLI> client lungroup getlunlist\\\n -n LUN-Group-1\\r\nLUNs Sum: 1\\r\n\\r\nLUN Name: LUN-0001/N/A\\r\nLocation: Local/Remote\\r\nLUN Capacity: 10GB (20971520sector)/N/A\\r\nLUN WWN: 600B34249837CEBDC611DCB12DD500D6/N/A\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVOLUME_GROUP_DATA = {'volume_groups': [\n    {'name': 'LUN-Group-1', 'storage_id': '12345',\n     'native_volume_group_id': 'LUN-Group-1',\n     'description': 'LUN Group description'}], 'vol_grp_vol_rels': [\n    {'storage_id': '12345', 'native_volume_group_id': 'LUN-Group-1',\n     'native_volume_id': 'LUN-0001/N/A'}]}\nVIEWS_ONE = \"\"\"(null)@(null) ODSP CLI> client mgt getsharelunlist -n Client-1\\r\nLUNs Sum: 6\\r\n\\r\nLUN Name: Test_Lun-1\\r\nLUN Capacity: 10GB\\r\nLUN WWN: 600B3427C77BBDFD2FF0DBA82D0000DB\\r\nLUN ID: 0\\r\nAccess Mode: Read-Write\\r\nThin-Provisioning: Disable\\r\n\\r\nLUN Name: Test_Lun-2\\r\nLUN Capacity: 10GB\\r\nLUN WWN: 600B342A316B328D7035DD724D0000DB\\r\nLUN ID: 1\\r\nAccess Mode: Read-Write\\r\nThin-Provisioning: Disable\\r\n\\r\nLUN Name: Test_Lun-3\\r\nLUN Capacity: 10GB\\r\nLUN WWN: 600B342AB2FE2ACDBC63D8B0DD0000DB\\r\nLUN ID: 2\\r\nAccess Mode: Read-Write\\r\nThin-Provisioning: Disable\\r\n\\r\nLUN Name: Test_Lun-4\\r\nLUN Capacity: 10GB\\r\nLUN WWN: 600B342B328A722D55F7DEF5DD0000DB\\r\nLUN ID: 3\\r\nAccess Mode: Read-Write\\r\nThin-Provisioning: Disable\\r\n\\r\nLUN Name: Test_Lun-5\\r\nLUN Capacity: 10GB\\r\nLUN WWN: 600B34221067D72D65DFD18C8D0000DB\\r\nLUN ID: 4\\r\nAccess Mode: Read-Write\\r\nThin-Provisioning: Disable\\r\n\\r\nLUN Name: LUN-1\\r\nLUN Capacity: 2GB\\r\nLUN WWN: 600B342A816A4F2D9098DB015D0000DB\\r\nLUN ID: 5\\r\nAccess Mode: Read-Write\\r\nThin-Provisioning: Disable\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVIEW_TWO = \"\"\"(null)@(null) ODSP CLI> client mgt getsharelunlist -n Client-2\\r\nLUNs Sum: 0\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVIEWS_DATA = [{'native_masking_view_id': 'Client-10', 'name': 'Client-10',\n               'native_storage_host_id': 'Client-1', 'native_volume_id': '0',\n               'storage_id': '12345'},\n              {'native_masking_view_id': 'Client-11', 'name': 'Client-11',\n               'native_storage_host_id': 'Client-1', 'native_volume_id': '1',\n               'storage_id': '12345'},\n              {'native_masking_view_id': 'Client-12', 'name': 'Client-12',\n               'native_storage_host_id': 'Client-1', 'native_volume_id': '2',\n               'storage_id': '12345'},\n              {'native_masking_view_id': 'Client-13', 'name': 'Client-13',\n               'native_storage_host_id': 'Client-1', 'native_volume_id': '3',\n               'storage_id': '12345'},\n              {'native_masking_view_id': 'Client-14', 'name': 'Client-14',\n               'native_storage_host_id': 'Client-1', 'native_volume_id': '4',\n               'storage_id': '12345'},\n              {'native_masking_view_id': 'Client-15', 'name': 'Client-15',\n               'native_storage_host_id': 'Client-1', 'native_volume_id': '5',\n               'storage_id': '12345'}]\nVIEW_NEW_INFO = \"\"\"client mapview getlist\\r\nMapviews Sum: 1\\r\n\\r\nMapview Name: Mapview-1\\r\nDescription: Map view\\r\nHost Group Name: Host-Group-1\\r\nTarget Group Name: Target-Group-1\\r\nLUN Group Name: LUN-Group-1\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nVIEWS_NEW_DATA = [{'native_masking_view_id': 'Mapview-1', 'name': 'Mapview-1',\n                   'native_storage_host_group_id': 'Host-Group-1',\n                   'native_volume_group_id': 'LUN-Group-1',\n                   'description': 'Map view', 'storage_id': '12345'}]\nSYSTEM_QUERY = \"\"\"(null)@(null) ODSP CLI> system mgt query\\r\nsystem mgt query\\r\nDevice UUID:0x00b34202-fea90000-fa41e0d6-ded905a8\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nSYSTEM_QUERY_TWO = \"\"\"(null)@(null) ODSP CLI> system mgt query\\r\nDevice UUID:0x50b34200-0b750056-42ab74ff-6265d80e\\r\nDevice Name:Storage-1\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI> \"\"\"\nSTORAGE_DATA = {\n    'name': '0x00b34202-fea90000-fa41e0d6-ded905a8',\n    'vendor': 'MacroSAN', 'status': 'normal',\n    'serial_number': '110.143.133.200:0x00b34202-fea90000-fa41e0d6-ded905a8',\n    'firmware_version': 'V2.0.14T04',\n    'raw_capacity': 1917702897664.0,\n    'total_capacity': 7670811590656.0,\n    'used_capacity': 1031865892864.0,\n    'free_capacity': 6638945697792.0,\n    'model': ''\n}\nSTORAGE_TWO_DATA = {\n    'name': 'Storage-1', 'vendor': 'MacroSAN',\n    'status': 'normal',\n    'serial_number': '110.143.133.200:0x50b34200-0b750056-42ab74ff-6265d80e',\n    'firmware_version': 'V2.0.14T04',\n    'raw_capacity': 1917702897664.0,\n    'total_capacity': 7670811590656.0,\n    'used_capacity': 1031865892864.0,\n    'free_capacity': 6638945697792.0,\n    'model': ''\n}\nTIMESTAMP = \"\"\"[root@00-b3-42-04-0f-09 ~]# date +%s\\r\n1662345266\\r\n[root@00-b3-42-04-0f-09 ~]#\"\"\"\nVERSION_SHOW = \"\"\"[root@00-b3-42-04-0f-09 ~]# versionshow\\r\n\\r\nSP2 Version:\\r\n        ODSP_MSC: V1.5.12T03\\r\n        ODSP_DRIVER: V230T03\\r\n        BIOS    : V166\\r\n        BMC     : V272P001\\r\n        MCPLD   : V104\\r\n        MPCB    : VER.B\\r\n        BCB1    : V214\\r\n        BCB2    : V214\\r\n        BAT1HW  : BAT1111A\\r\n        BAT2HW  : FAN2021A\\r\n        IOC1PCB :\\r\n        IOC2PCB :\\r\nDSU : 1:1:1\\r\n        ODSP_JMC : V221\\r\n        ODSP_JMCB: N/A\\r\n        EPCB     : N/A\\r\n        ECPLD    : V101\\r\n        BAT0_BCB : N/A\\r\n        BAT1_BCB : N/A\\r\n\\r\n[root@00-b3-42-04-0f-09 ~]#\"\"\"\nGET_FILE_LIST = \"\"\"(null)@(null) ODSP CLI> system performance getfilelist\\r\nPerformance Statistics Files Sum:2\\r\n\nSP Name: SP2\\r\nObject Type: DEVICE\\r\nObject Name: Device\\r\nObject Identification: N/A\\r\nFile Name: perf_device_SP2_20220920181959.csv\\r\nFile Create Time: 2022-09-20 18:19:59\\r\nFile Size: 58 KB\\r\n\\r\nSP Name: SP2\\r\nObject Type: SAS PORT\\r\nObject Name: SAS-2:1:1\\r\nObject Identification: N/A\\r\nFile Name: perf_sasport_SAS-2_1_1_SP2_20220920181959.csv\\r\nFile Create Time: 2022-09-20 18:19:59\\r\nFile Size: 56 KB\\r\n\\r\nCommand completed successfully.\\r\n(null)@(null) ODSP CLI>\"\"\"\nresource_metrics = {\n    constants.ResourceType.STORAGE: consts.STORAGE_CAP,\n    constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n    constants.ResourceType.PORT: consts.PORT_CAP\n}\n\n\ndef create_driver():\n    MsHandler.login = mock.Mock(\n        return_value={None})\n    return MacroSanDriver(**ACCESS_INFO)\n\n\nclass test_macro_san_driver(TestCase):\n    driver = create_driver()\n\n    def test_init(self):\n        MsHandler.login = mock.Mock(\n            return_value={\"\"})\n        MacroSanDriver(**ACCESS_INFO)\n\n    def test_get_storage(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[SYSTEM_QUERY, VERSION_INFO,\n                         POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1,\n                         DSU_INFO, DISKS_INFO, DISK_ONE, DISKS_TWO,\n                         HA_STATUS, VERSION_INFO, CPU_INFO, HA_STATUS,\n                         VERSION_SHOW])\n        MacroSanSSHPool.create = mock.Mock(__class__)\n        SSHClient.open_sftp = mock.Mock(__class__)\n        storage_object = self.driver.get_storage(context)\n        self.assertDictEqual(storage_object, STORAGE_DATA)\n\n    def test_get_storage_new(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[SYSTEM_QUERY_TWO, VERSION_INFO,\n                         POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1,\n                         DSU_INFO, DISKS_INFO, DISK_ONE, DISKS_TWO,\n                         HA_STATUS_NEW, VERSION_INFO, CPU_INFO, HA_STATUS_NEW,\n                         VERSION_SHOW])\n        MacroSanSSHPool.create = mock.Mock(__class__)\n        SSHClient.open_sftp = mock.Mock(__class__)\n        storage_object = self.driver.get_storage(context)\n        self.assertDictEqual(storage_object, STORAGE_TWO_DATA)\n\n    def test_list_storage_pools(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1])\n        pools = self.driver.list_storage_pools(context)\n        self.assertListEqual(pools, POOLS_DATA)\n\n    def test_list_volumes(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1,\n                         VOLUME_INFO, VOLUME_QUERY_ONE, VOLUME_QUERY_TWO,\n                         VOLUME_TWO_INFO])\n        volumes = self.driver.list_volumes(context)\n        self.assertListEqual(volumes, THICK_VOLUMES_DATA)\n\n    def test_list_volumes_new(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1,\n                         VOLUME_INFO, VOLUME_ONE_NEW, VOLUME_TWO_NEW,\n                         VOLUME_TWO_INFO])\n        volumes = self.driver.list_volumes(context)\n        self.assertListEqual(volumes, VOLUMES_DATA)\n\n    def test_list_controllers(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[VERSION_INFO, CPU_INFO, HA_STATUS])\n        controllers = self.driver.list_controllers(context)\n        self.assertListEqual(controllers, CONTROLLERS_DATA)\n\n    def test_list_disks(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[DSU_INFO, DISKS_INFO, DISK_ONE, DISKS_TWO])\n        disks = self.driver.list_disks(context)\n        self.assertListEqual(disks, DISKS_DATA)\n\n    def test_list_ports(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[FC_INFO, HA_STATUS, DSU_INFO, SAS_INFO, None, None,\n                         None])\n        ports = self.driver.list_ports(context)\n        self.assertListEqual(ports, PORT_DATA)\n\n    def test_parse_alert(self):\n        parse_alert = self.driver.parse_alert(context, PARSE_ALERT_INFO)\n        PARSE_ALERT_DATA['occur_time'] = parse_alert.get('occur_time')\n        self.assertDictEqual(parse_alert, PARSE_ALERT_DATA)\n\n    def test_list_storage_host_initiators(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[INITIATOR_INFO])\n        initiators = self.driver.list_storage_host_initiators(context)\n        self.assertListEqual(initiators, INITIATOR_DATA)\n\n    def test_list_storage_hosts_old(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[consts.UNKNOWN_COMMAND_TAG,\n                         INITIATOR_INFO, HOSTS_INFO])\n        hosts = self.driver.list_storage_hosts(context)\n        self.assertListEqual(hosts, HOST_DATA)\n\n    def test_list_storage_hosts_new(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[HOST_INFO_NEW])\n        hosts = self.driver.list_storage_hosts(context)\n        self.assertListEqual(hosts, HOST_DATA_NEW)\n\n    def test_list_storage_hosts_group(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[HOST_GROUPS_INFO, HOST_GROUPS_H_INFO])\n        host_groups = self.driver.list_storage_host_groups(context)\n        self.assertDictEqual(host_groups, HOST_GROUPS_DATA)\n\n    def test_list_volume_groups(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[VOLUME_GROUPS_INFO, VOLUME_GROUPS_N_INFO])\n        volume_groups = self.driver.list_volume_groups(context)\n        self.assertDictEqual(volume_groups, VOLUME_GROUP_DATA)\n\n    def test_list_masking_views_old(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[consts.UNKNOWN_COMMAND_TAG,\n                         HOSTS_INFO, VIEWS_ONE, VIEW_TWO, VIEW_TWO, VIEW_TWO,\n                         VIEW_TWO, VIEW_TWO, VIEW_TWO])\n        views = self.driver.list_masking_views(context)\n        self.assertListEqual(views, VIEWS_DATA)\n\n    def test_list_masking_views_new(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[VIEW_NEW_INFO])\n        views = self.driver.list_masking_views(context)\n        self.assertListEqual(views, VIEWS_NEW_DATA)\n\n    def test_list_alert(self):\n        block = False\n        try:\n            self.driver.list_alerts(context)\n        except Exception as e:\n            LOG.error(six.text_type(e))\n            block = True\n        self.assertEqual(block, True)\n\n    def test_get_latest_perf_timestamp(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[TIMESTAMP])\n        timestamp = self.driver.get_latest_perf_timestamp(context)\n        times = 1662345240000\n        self.assertEqual(timestamp, times)\n\n    def test_get_capabilities(self):\n        capabilities = self.driver.get_capabilities(context)\n        metrics = {\n            'is_historic': True,\n            'resource_metrics': {\n                constants.ResourceType.STORAGE: consts.STORAGE_CAP,\n                constants.ResourceType.VOLUME: consts.VOLUME_CAP,\n                constants.ResourceType.PORT: consts.PORT_CAP,\n                constants.ResourceType.DISK: consts.DISK_CAP,\n            }\n        }\n        self.assertDictEqual(capabilities, metrics)\n\n    def test_collect_perf_metrics(self):\n        MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})\n        MacroSanSSHPool.do_exec_shell = mock.Mock(\n            side_effect=[VERSION_SHOW, GET_FILE_LIST])\n        MsHandler.down_perf_file = mock.Mock(return_value='')\n        localtime = time.mktime(time.localtime()) * units.k\n        storage_id = 12345\n        start_time = localtime - 1000 * 60 * 5\n        end_time = localtime\n        metrics = self.driver.collect_perf_metrics(\n            context, storage_id, resource_metrics, start_time, end_time)\n        self.assertListEqual(metrics, [])\n"
  },
  {
    "path": "delfin/tests/unit/drivers/netapp/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/netapp/netapp_ontap/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/netapp/netapp_ontap/test_constans.py",
    "content": "# Copyright 2021 The SODA Authors.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"hpe\",\n    \"model\": \"3par\",\n    \"ssh\": {\n        \"host\": \"192.168.159.130\",\n        \"port\": 22,\n        \"username\": \"admin\",\n        \"password\": \"aq114477\",\n    },\n    \"rest\": {\n        \"host\": \"192.168.159.130\",\n        \"port\": 22,\n        \"username\": \"admin\",\n        \"password\": \"cGFzc3dvcmQ=\",\n    },\n\n}\n\nSYSTEM_INFO = \"\"\"\n----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nCluster UUID: 47096983-8018-11eb-bd5b-000c293284bd\\r\n          Cluster Name: cl\\r\n Cluster Serial Number: -\\r\n      Cluster Location:\\r\n       Cluster Contact: \\r\"\"\"\n\nAGGREGATE_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nAggregate     Size Available Used% State   #Vols  Nodes   RAID Status\\r\n--------- -------- --------- ----- ------- ------ --------------------\\r\naggr0        855MB   42.14MB   95% online       1 cl-01   raid_dp,\\r\n                                normal\\r\naggr1       8.79GB    3.98GB   55% online       3 cl-01   raid_dp,\\r\n                                normal\\r\naggr2       8.79GB    4.98GB   43% online       3 cl-01   raid_dp,\\r\n                                normal\\r\"\"\"\n\nVERSION = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nNetApp Release 9.8: Fri Aug 19 06:39:33 UTC 2016\\r\n\"\"\"\n\nSYSTEM_STATUS = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nStatus\\r\n---------------\\r\nok\"\"\"\n\nDISK_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                     Usable           Disk    Container   Container\\r\nDisk                   Size Shelf Bay Type    Type        Name      Owner\\r\n---------------- ---------- ----- --- ------- ----------- --------- -----\\r\nNET-1.1              1020254     -  16 FCAL    aggregate   aggr0     cl-01\\r\nNET-1.2              1020MB     -  17 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.3              1020MB     -  18 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.4              1020MB     -  19 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.5              1020MB     -  20 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.6              1020MB     -  21 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.7              1020MB     -  22 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.8              1020MB     -  24 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.9              1020MB     -  16 FCAL    aggregate   aggr0     cl-01\\r\nNET-1.10             1020MB     -  17 FCAL    aggregate   aggr0     cl-01\\r\nNET-1.11             1020MB     -  18 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.12             1020MB     -  19 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.13             1020MB     -  20 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.14             1020MB     -  25 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.15             1020MB     -  26 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.16             1020MB     -  27 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.17             1020MB     -  28 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.18             1020MB     -  21 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.19             1020MB     -  22 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.20             1020MB     -  24 FCAL    aggregate   aggr1     cl-01\\r\nNET-1.21             1020MB     -  25 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.22             1020MB     -  26 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.23             1020MB     -  27 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.24             1020MB     -  28 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.25             1020MB     -  29 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.26             1020MB     -  32 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.27             1020MB     -  29 FCAL    aggregate   aggr2     cl-01\\r\nNET-1.28             1020MB     -  32 FCAL    spare       Pool0     cl-01\\r\n28 entries were displayed.\"\"\"\n\nPOOLS_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                        Storage Pool Name: Pool1\\r\n                     UUID of Storage Pool: 60f2f1b9-e60f-11e3\\r\n           Nodes Sharing the Storage Pool: node-a, node-b\\r\n          Number of Disks in Storage Pool: 2\\r\n                     Allocation Unit Size: 372.5GB\\r\n                             Storage Type: SSD\\r\n                 Storage Pool Usable Size: 1.09TB\\r\n                  Storage Pool Total Size: 1.45TB\\r\n                         Is Pool Healthy?: true\\r\n                State of the Storage Pool: normal\\r\n  Reason for storage pool being unhealthy: -\\r\nJob ID of the Currently Running Operation: - \\r\n\\r\n                        Storage Pool Name: Pool2\\r\n                     UUID of Storage Pool: 60f2f1b9-e60f-11e3\\r\n           Nodes Sharing the Storage Pool: node-a, node-b\\r\n          Number of Disks in Storage Pool: 2\\r\n                     Allocation Unit Size: 372.5GB\\r\n                             Storage Type: SSD\\r\n                 Storage Pool Usable Size: 1.09TB\\r\n                  Storage Pool Total Size: 1.45TB\\r\n                         Is Pool Healthy?: true\\r\n                State of the Storage Pool: normal\\r\n  Reason for storage pool being unhealthy: -\\r\nJob ID of the Currently Running Operation: - \\r\"\"\"\n\nAGGREGATE_DETAIL_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                                         Aggregate: aggr0\\r\n                                      Storage Type: hdd\\r\n                                    Checksum Style: block\\r\n                                   Number Of Disks: 3\\r\n                                            Mirror: false\\r\n                              Disks for First Plex: NET-1.9, NET-1.1\\r\n                           Disks for Mirrored Plex: -\\r\n                         Partitions for First Plex: -\\r\n                      Partitions for Mirrored Plex: -\\r\n                                              Node: cl-01\\r\n                           Free Space Reallocation: off\\r\n                                         HA Policy: cfo\\r\n                               Ignore Inconsistent: off\\r\n                Space Reserved for Snapshot Copies: 5%\\r\n           Aggregate Nearly Full Threshold Percent: 97%\\r\n                  Aggregate Full Threshold Percent: 98%\\r\n                             Checksum Verification: on\\r\n                                   RAID Lost Write: on\\r\n                             Enable Thorough Scrub: off\\r\n                                    Hybrid Enabled: false\\r\n                                    Available Size: 0B\\r\n                                  Checksum Enabled: true\\r\n                                   Checksum Status: active\\r\n                                           Cluster: cl\\r\n                                   Home Cluster ID: 47096983-8018-11eb-bd5b\\r\n                                        DR Home ID: -\\r\n                                      DR Home Name: -\\r\n                                   Inofile Version: 4\\r\n                                  Has Mroot Volume: true\\r\n                     Has Partner Node Mroot Volume: false\\r\n                                           Home ID: 4082368507\\r\n                                         Home Name: cl-01\\r\n                           Total Hybrid Cache Size: 0B\\r\n                                            Hybrid: false\\r\n                                      Inconsistent: false\\r\n                                 Is Aggregate Home: true\\r\n                                     Max RAID Size: 16\\r\n       Flash Pool SSD Tier Maximum RAID Group Size: -\\r\n                                          Owner ID: 4082368507\\r\n                                        Owner Name: cl-01\\r\n                                   Used Percentage: 96%\\r\n                                            Plexes: /aggr0/plex0\\r\n                                       RAID Groups: /aggr0/plex0/rg0 (block)\\r\n                             RAID Lost Write State: on\\r\n                                       RAID Status: raid_dp, normal\\r\n                                         RAID Type: raid_dp\\r\n   SyncMirror Resync Snapshot Frequency in Minutes: 5\\r\n                                           Is Root: true\\r\n      Space Used by Metadata for Volume Efficiency: 0B\\r\n                                              Size: 855MB\\r\n                                             State: online\\r\n                        Maximum Write Alloc Blocks: 0\\r\n                                         Used Size: 0\\r\n                                 Uses Shared Disks: false\\r\n                                       UUID String: a71b1e4e-d151-abebf8\\r\n                                 Number Of Volumes: 1\n                             Is Flash Pool Caching: -\\r\n            Is Eligible for Auto Balance Aggregate: false\\r\n             State of the aggregate being balanced: ineligible\\r\n                          Total Physical Used Size: 712.3MB\\r\n                          Physical Used Percentage: 79%\\r\n            State Change Counter for Auto Balancer: 0\\r\n                                      Is Encrypted: false\\r\n                                     SnapLock Type: non-snaplock\\r\n                                 Encryption Key ID: -\\r\n Is in the precommit phase of Copy-Free Transition: false\\r\n                Is a 7-Mode transitioning aggregat: false\\r\nThreshold When Aggregate Is Considered Unbalanced (%): 70\\r\nThreshold When Aggregate Is Considered Balanced (%): 40\\r\n                        Resynchronization Priority: -\\r\n                    Space Saved by Data Compaction: 0B\\r\n               Percentage Saved by Data Compaction: 0%\\r\n                          Amount of compacted data: 0B\\r\n\\r\n                                         Aggregate: aggr1\\r\n                                      Storage Type: hdd\\r\n                                    Checksum Style: block\\r\n                                   Number Of Disks: 12\\r\n                                            Mirror: false\\r\n                              Disks for First Plex: NET-1.2, NET-1.11,\\r\n                                                    NET-1.12, NET-1.4,\\r\n                                                    NET-1.13, NET-1.5,\\r\n                                                    NET-1.18, NET-1.6,\\r\n                                                    NET-1.19, NET-1.7\\r\n                           Disks for Mirrored Plex: -\\r\n                         Partitions for First Plex: -\\r\n                      Partitions for Mirrored Plex: -\\r\n                                              Node: cl-01\\r\n                           Free Space Reallocation: off\\r\n                                         HA Policy: sfo\\r\n                               Ignore Inconsistent: off\\r\n                Space Reserved for Snapshot Copies: -\\r\n           Aggregate Nearly Full Threshold Percent: 95%\\r\n                  Aggregate Full Threshold Percent: 98%\\r\n                             Checksum Verification: on\\r\n                                   RAID Lost Write: on\\r\n                             Enable Thorough Scrub: off\\r\n                                    Hybrid Enabled: false\\r\n                                    Available Size: 5.97GB\\r\n                                  Checksum Enabled: true\\r\n                                   Checksum Status: active\\r\n                                           Cluster: cl\\r\n                                   Home Cluster ID: 47096983-8018-bd\\r\n                                        DR Home ID: -\\r\n                                      DR Home Name: -\\r\n                                   Inofile Version: 4\\r\n                                  Has Mroot Volume: false\\r\n                     Has Partner Node Mroot Volume: false\\r\n                                           Home ID: 4082368507\\r\n                                         Home Name: cl-01\\r\n                           Total Hybrid Cache Size: 0B\\r\n                                            Hybrid: false\\r\n                                      Inconsistent: false\\r\n                                 Is Aggregate Home: true\\r\n                                     Max RAID Size: 16\\r\n       Flash Pool SSD Tier Maximum RAID Group Size: -\\r\n                                          Owner ID: 4082368507\\r\n                                        Owner Name: cl-01\\r\n                                   Used Percentage: 32%\\r\n                                            Plexes: /aggr1/plex0\\r\n                                       RAID Groups: /aggr1/plex0/rg0 (block)\\r\n                             RAID Lost Write State: on\\r\n                                       RAID Status: raid_dp, normal\\r\n                                         RAID Type: raid_dp\\r\n   SyncMirror Resync Snapshot Frequency in Minutes: 5\\r\n                                           Is Root: false\\r\n      Space Used by Metadata for Volume Efficiency: 0B\\r\n                                              Size: 8.79GB\\r\n                                             State: online\\r\n                        Maximum Write Alloc Blocks: 0\\r\n                                         Used Size: 2.82GB\\r\n                                 Uses Shared Disks: false\\r\n                                       UUID String: 68ffbbca-eb735\\r\n                                 Number Of Volumes: 3\\r\n                             Is Flash Pool Caching: -\\r\n            Is Eligible for Auto Balance Aggregate: false\\r\n             State of the aggregate being balanced: ineligible\\r\n                          Total Physical Used Size: 154.7MB\\r\n                          Physical Used Percentage: 2%\\r\n            State Change Counter for Auto Balancer: 0\\r\n                                      Is Encrypted: false\\r\n                                     SnapLock Type: non-snaplock\\r\n                                 Encryption Key ID: -\\r\n Is in the precommit phase of Copy-Free Transition: false\\r\n                 Is a 7-Mode transitioning aggrega: false\\r\nThreshold When Aggregate Is Considered Unbalanced (%): 70\nThreshold When Aggregate Is Considered Balanced (%): 40\\r\n                        Resynchronization Priority: -\\r\n                    Space Saved by Data Compaction: 0B\\r\n               Percentage Saved by Data Compaction: 0%\\r\n                          Amount of compacted data: 0B\\r\n\\r\n                                         Aggregate: aggr2\\r\n                                      Storage Type: hdd\\r\n                                    Checksum Style: block\\r\n                                   Number Of Disks: 12\\r\n                                            Mirror: false\\r\n                              Disks for First Plex: NET-1.8, NET-1.21,\\r\n                                                    NET-1.14, NET-1.22,\\r\n                                                    NET-1.15, NET-1.23,\\r\n                                                    NET-1.16, NET-1.24,\\r\n                                                    NET-1.17, NET-1.25,\\r\n                                                    NET-1.27, NET-1.26\\r\n                           Disks for Mirrored Plex: -\\r\n                         Partitions for First Plex: -\\r\n                      Partitions for Mirrored Plex: -\\r\n                                              Node: cl-01\\r\n                           Free Space Reallocation: off\\r\n                                         HA Policy: sfo\\r\n                               Ignore Inconsistent: off\\r\n                Space Reserved for Snapshot Copies: -\\r\n           Aggregate Nearly Full Threshold Percent: 95%\\r\n                  Aggregate Full Threshold Percent: 98%\\r\n                             Checksum Verification: on\\r\n                                   RAID Lost Write: on\\r\n                             Enable Thorough Scrub: off\\r\n                                    Hybrid Enabled: false\\r\n                                    Available Size: 2.93GB\\r\n                                  Checksum Enabled: true\\r\n                                   Checksum Status: active\\r\n                                           Cluster: cl\\r\n                                   Home Cluster ID: 47096983-8018-\\r\n                                        DR Home ID: -\\r\n                                      DR Home Name: -\\r\n                                   Inofile Version: 4\\r\n                                  Has Mroot Volume: false\\r\n                     Has Partner Node Mroot Volume: false\\r\n                                           Home ID: 4082368507\\r\n                                         Home Name: cl-01\\r\n                           Total Hybrid Cache Size: 0B\\r\n                                            Hybrid: false\\r\n                                      Inconsistent: false\\r\n                                 Is Aggregate Home: true\\r\n                                     Max RAID Size: 16\\r\n       Flash Pool SSD Tier Maximum RAID Group Size: -\\r\n                                          Owner ID: 4082368507\\r\n                                        Owner Name: cl-01\\r\n                                   Used Percentage: 67%\\r\n                                            Plexes: /aggr2/plex0\\r\n                                       RAID Groups: /aggr2/plex0/rg0 (block)\\r\n                             RAID Lost Write State: on\\r\n                                       RAID Status: raid_dp, normal\\r\n                                         RAID Type: raid_dp\\r\n   SyncMirror Resync Snapshot Frequency in Minutes: 5\\r\n                                           Is Root: false\\r\n      Space Used by Metadata for Volume Efficiency: 0B\\r\n                                              Size: 8.79GB\\r\n                                             State: online\\r\n                        Maximum Write Alloc Blocks: 0\\r\n                                         Used Size: 5.85GB\\r\n                                 Uses Shared Disks: false\\r\n                                       UUID String: b5cfe36e-ea\\r\n                                 Number Of Volumes: 6\n                             Is Flash Pool Caching: -\\r\n            Is Eligible for Auto Balance Aggregate: false\\r\n             State of the aggregate being balanced: ineligible\\r\n                          Total Physical Used Size: 68.84MB\\r\n                          Physical Used Percentage: 1%\\r\n            State Change Counter for Auto Balancer: 0\\r\n                                      Is Encrypted: false\\r\n                                     SnapLock Type: non-snaplock\\r\n                                 Encryption Key ID: -\\r\n Is in the precommit phase of Copy-Free Transition: false\\r\n                              Is a 7-Mode of space: false\\r\nThreshold When Aggregate Is Considered Unbalanced (%): 70\\r\nThreshold When Aggregate Is Considered Balanced (%): 40\\r\n                        Resynchronization Priority: -\\r\n                    Space Saved by Data Compaction: 0B\\r\n               Percentage Saved by Data Compaction: 0%\\r\n                          Amount of compacted data: 0B\\r\n3 entries were displayed.\\r\n\"\"\"\n\nLUN_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n              Vserver Name: svm5\\r\n                  LUN Path: /vol/lun_0_vol/lun_0\\r\n               Volume Name: lun_0_vol\\r\n                Qtree Name: \"\"\\r\n                  LUN Name: lun_0\\r\n                  LUN Size: 512MB\\r\n                   OS Type: linux\\r\n         Space Reservation: enabled\\r\n             Serial Number: wpEzy]QpkWFm\\r\n       Serial Number (Hex): 7770457a795d51706b57466d\\r\n                   Comment:\\r\nSpace Reservations Honored: true\\r\n          Space Allocation: disabled\\r\n                     State: online\\r\n                  LUN UUID: d4d1c11a-fa21-4ef8-9536-776017748474\\r\n                    Mapped: unmapped\n                Block Size: 512\\r\n          Device Legacy ID: -\\r\n          Device Binary ID: -\\r\n            Device Text ID: -\\r\n                 Read Only: false\\r\n     Fenced Due to Restore: false\\r\n                 Used Size: 0\\r\n       Maximum Resize Size: 64.00GB\\r\n             Creation Time: 5/7/2021 18:34:52\\r\n                     Class: regular\\r\n      Node Hosting the LUN: cl-01\\r\n          QoS Policy Group: -\\r\n       Caching Policy Name: -\\r\n                     Clone: false\\r\n  Clone Autodelete Enabled: false\\r\n       Inconsistent Import: false\\r\n       \"\"\"\n\nFS_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                                   Vserver Name: cl-01\\r\n                                    Volume Name: vol0\\r\n                                 Aggregate Name: aggr0\\r\n  List of Aggregates for FlexGroup Constituents: -\\r\n                                    Volume Size: 807.3MB\\r\n                             Volume Data Set ID: -\\r\n                      Volume Master Data Set ID: -\\r\n                                   Volume State: online\\r\n                                   Volume Style: flex\\r\n                          Extended Volume Style: flexvol\\r\n                         Is Cluster-Mode Volume: false\\r\n                          Is Constituent Volume: false\\r\n                                  Export Policy: -\\r\n                                        User ID: -\\r\n                                       Group ID: -\\r\n                                 Security Style: -\\r\n                               UNIX Permissions: ------------\\r\n                                  Junction Path: -\\r\n                           Junction Path Source: -\\r\n                                Junction Active: -\\r\n                         Junction Parent Volume: -\\r\n                                        Comment: -\\r\n                                 Available Size: 135.4MB\\r\n                                Filesystem Size: 807.3MB\\r\n                        Total User-Visible Size: 766.9MB\\r\n                                      Used Size: -\\r\n                                Used Percentage: 83%\\r\n           Volume Nearly Full Threshold Percent: 95%\\r\n                  Volume Full Threshold Percent: 98%\\r\n           Maximum Autosize (for flexvols only): 968.7MB\\r\n                               Minimum Autosize: 807.3MB\\r\n             Autosize Grow Threshold Percentage: 85%\\r\n           Autosize Shrink Threshold Percentage: 50%\\r\n                                  Autosize Mode: off\\r\n            Total Files (for user-visible data): 24539\\r\n             Files Used (for user-visible data): 16715\\r\n                      Space Guarantee in Effect: true\\r\n                            Space SLO in Effect: true\\r\n                                      Space SLO: none\\r\n                          Space Guarantee Style: volume\\r\n                             Fractional Reserve: 100%\\r\n                                    Volume Type: RW\\r\n              Snapshot Directory Access Enabled: true\\r\n             Space Reserved for Snapshot Copies: 5%\\r\n                          Snapshot Reserve Used: 604%\\r\n                                Snapshot Policy: -\\r\n                                  Creation Time: Mon Mar 08 14:09:37 2021\\r\n                                       Language: -\\r\n                                   Clone Volume: -\\r\n                                      Node name: cl-01\\r\n                      Clone Parent Vserver Name: -\\r\n                        FlexClone Parent Volume: -\\r\n                                  NVFAIL Option: on\\r\n                          Volume's NVFAIL State: false\\r\n        Force NVFAIL on MetroCluster Switchover: off\\r\n                      Is File System Size Fixed: false\\r\n                     (DEPRECATED)-Extent Option: off\\r\n                  Reserved Space for Overwrites: 0B\\r\n              Primary Space Management Strategy: volume_grow\\r\n                       Read Reallocation Option: off\\r\n    Naming Scheme for Automatic Snapshot Copies: ordinal\\r\n               Inconsistency in the File System: false\\r\n                   Is Volume Quiesced (On-Disk): false\\r\n                 Is Volume Quiesced (In-Memory): false\\r\n      Volume Contains Shared or Compressed Data: false\\r\n              Space Saved by Storage Efficiency: 0B\\r\n         Percentage Saved by Storage Efficiency: 0%\\r\n                   Space Saved by Deduplication: 0B\\r\n              Percentage Saved by Deduplication: 0%\\r\n                  Space Shared by Deduplication: 0B\\r\n                     Space Saved by Compression: 0B\\r\n          Percentage Space Saved by Compression: 0%\\r\n            Volume Size Used by Snapshot Copies: 243.7MB\\r\n                                     Block Type: 64-bit\\r\n                               Is Volume Moving: -\\r\n                 Flash Pool Caching Eligibility: read-write\\r\n  Flash Pool Write Caching Ineligibility Reason: -\\r\n                     Managed By Storage Service: -\\r\nCreate Namespace Mirror Constituents For SnapDiff Use: -\\r\n                        Constituent Volume Role: -\\r\n                          QoS Policy Group Name: -\\r\n                            Caching Policy Name: -\\r\n                Is Volume Move in Cutover Phase: -\\r\n        Number of Snapshot Copies in the Volume: 8\\r\nVBN_BAD may be present in the active filesystem: false\\r\n                Is Volume on a hybrid aggregate: false\\r\n                       Total Physical Used Size: 671.8MB\\r\n                       Physical Used Percentage: 83%\\r\n                                  List of Nodes: -\\r\n                          Is Volume a FlexGroup: false\\r\n                                  SnapLock Type: non-snaplock\\r\n                          Vserver DR Protection: -\\r\n UUID of the Efficiency Policy: b0f36cd7-e7bc-11e2-9994-123478563412\\r\n\\r\n                                   Vserver Name: svm1\\r\n                                    Volume Name: svm1_root\\r\n                                 Aggregate Name: aggr1\\r\n  List of Aggregates for FlexGroup Constituents: -\\r\n                                    Volume Size: 800MB\\r\n                             Volume Data Set ID: 1025\\r\n                      Volume Master Data Set ID: 2155388521\\r\n                                   Volume State: online\\r\n                                   Volume Style: flex\\r\n                          Extended Volume Style: flexvol\\r\n                         Is Cluster-Mode Volume: true\\r\n                          Is Constituent Volume: false\\r\n                                  Export Policy: default\\r\n                                        User ID: -\\r\n                                       Group ID: -\\r\n                                 Security Style: ntfs\\r\n                               UNIX Permissions: ------------\\r\n                                  Junction Path: /\\r\n                           Junction Path Source: -\\r\n                                Junction Active: true\\r\n                         Junction Parent Volume: -\\r\n                                        Comment:\\r\n                                 Available Size: 759.8MB\\r\n                                Filesystem Size: 800MB\\r\n                        Total User-Visible Size: 760MB\\r\n                                      Used Size: 244KB\\r\n                                Used Percentage: 5%\\r\n           Volume Nearly Full Threshold Percent: 95%\\r\n                  Volume Full Threshold Percent: 98%\\r\n           Maximum Autosize (for flexvols only): 960MB\\r\n                               Minimum Autosize: 800MB\\r\n             Autosize Grow Threshold Percentage: 85%\\r\n           Autosize Shrink Threshold Percentage: 50%\\r\n                                  Autosize Mode: off\\r\n            Total Files (for user-visible data): 24313\\r\n             Files Used (for user-visible data): 103\\r\n                      Space Guarantee in Effect: true\\r\n                            Space SLO in Effect: true\\r\n                                      Space SLO: none\\r\n                          Space Guarantee Style: volume\\r\n                             Fractional Reserve: 100%\\r\n                                    Volume Type: RW\\r\n              Snapshot Directory Access Enabled: false\\r\n             Space Reserved for Snapshot Copies: 5%\\r\n                          Snapshot Reserve Used: 0%\\r\n                                Snapshot Policy: none\\r\n                                  Creation Time: Mon Mar 08 14:31:03 2021\\r\n                                       Language: C.UTF-8\\r\n                                   Clone Volume: false\\r\n                                      Node name: cl-01\\r\n                      Clone Parent Vserver Name: -\\r\n                        FlexClone Parent Volume: -\\r\n                                  NVFAIL Option: off\\r\n                          Volume's NVFAIL State: false\\r\n        Force NVFAIL on MetroCluster Switchover: off\\r\n                      Is File System Size Fixed: false\\r\n                     (DEPRECATED)-Extent Option: off\\r\n                  Reserved Space for Overwrites: 0B\\r\n              Primary Space Management Strategy: volume_grow\\r\n                       Read Reallocation Option: off\\r\n    Naming Scheme for Automatic Snapshot Copies: create_time\\r\n               Inconsistency in the File System: false\\r\n                   Is Volume Quiesced (On-Disk): false\\r\n                 Is Volume Quiesced (In-Memory): false\\r\n      Volume Contains Shared or Compressed Data: false\\r\n              Space Saved by Storage Efficiency: 0B\\r\n         Percentage Saved by Storage Efficiency: 0%\\r\n                   Space Saved by Deduplication: 0B\\r\n              Percentage Saved by Deduplication: 0%\\r\n                  Space Shared by Deduplication: 0B\\r\n                     Space Saved by Compression: 0B\\r\n          Percentage Space Saved by Compression: 0%\\r\n            Volume Size Used by Snapshot Copies: 0B\\r\n                                     Block Type: 64-bit\\r\n                               Is Volume Moving: false\\r\n                 Flash Pool Caching Eligibility: read-write\\r\n  Flash Pool Write Caching Ineligibility Reason: -\\r\n                     Managed By Storage Service: -\\r\nCreate Namespace Mirror Constituents For SnapDiff Use: -\\r\n                        Constituent Volume Role: -\\r\n                          QoS Policy Group Name: -\\r\n                            Caching Policy Name: -\\r\n                Is Volume Move in Cutover Phase: false\\r\n        Number of Snapshot Copies in the Volume: 0\\r\nVBN_BAD may be present in the active filesystem: false\\r\n                Is Volume on a hybrid aggregate: false\\r\n                       Total Physical Used Size: 244KB\\r\n                       Physical Used Percentage: 0%\\r\n                                  List of Nodes: -\\r\n                          Is Volume a FlexGroup: false\\r\n                                  SnapLock Type: non-snaplock\\r\n                          Vserver DR Protection: -\\r\n UUID of the Efficiency Policy: b0f36cd7-e7bc-11e2-9994-123478563412\\r\n\\r\n                                   Vserver Name: svm1\\r\n                                    Volume Name: vol_svm1_1\\r\n                                 Aggregate Name: aggr1\\r\n  List of Aggregates for FlexGroup Constituents: -\\r\n                                    Volume Size: 2GB\\r\n                             Volume Data Set ID: 1027\\r\n                      Volume Master Data Set ID: 2155388523\\r\n                                   Volume State: online\\r\n                                   Volume Style: flex\\r\n                          Extended Volume Style: flexvol\\r\n                         Is Cluster-Mode Volume: true\\r\n                          Is Constituent Volume: false\\r\n                                  Export Policy: default\\r\n                                        User ID: -\\r\n                                       Group ID: -\\r\n                                 Security Style: ntfs\\r\n                               UNIX Permissions: ------------\\r\n                                  Junction Path: -\\r\n                           Junction Path Source: -\\r\n                                Junction Active: -\\r\n                         Junction Parent Volume: -\\r\n                                        Comment:\\r\n                                 Available Size: 2.00GB\\r\n                                Filesystem Size: 2GB\\r\n                        Total User-Visible Size: 2GB\\r\n                                      Used Size: 3.84MB\\r\n                                Used Percentage: 0%\\r\n           Volume Nearly Full Threshold Percent: 95%\\r\n                  Volume Full Threshold Percent: 98%\\r\n           Maximum Autosize (for flexvols only): 2.40GB\\r\n                               Minimum Autosize: 2GB\\r\n             Autosize Grow Threshold Percentage: 85%\\r\n           Autosize Shrink Threshold Percentage: 50%\\r\n                                  Autosize Mode: off\\r\n            Total Files (for user-visible data): 62258\\r\n             Files Used (for user-visible data): 97\\r\n                      Space Guarantee in Effect: true\\r\n                            Space SLO in Effect: true\\r\n                                      Space SLO: none\\r\n                          Space Guarantee Style: volume\\r\n                             Fractional Reserve: 100%\\r\n                                    Volume Type: RW\\r\n              Snapshot Directory Access Enabled: true\\r\n             Space Reserved for Snapshot Copies: 0%\\r\n                          Snapshot Reserve Used: 0%\\r\n                                Snapshot Policy: default\\r\n                                  Creation Time: Mon Mar 08 14:32:54 2021\\r\n                                       Language: C.UTF-8\\r\n                                   Clone Volume: false\\r\n                                      Node name: cl-01\\r\n                      Clone Parent Vserver Name: -\\r\n                        FlexClone Parent Volume: -\\r\n                                  NVFAIL Option: off\\r\n                          Volume's NVFAIL State: false\\r\n        Force NVFAIL on MetroCluster Switchover: off\\r\n                      Is File System Size Fixed: false\\r\n                     (DEPRECATED)-Extent Option: off\\r\n                  Reserved Space for Overwrites: 0B\\r\n              Primary Space Management Strategy: volume_grow\\r\n                       Read Reallocation Option: off\\r\n    Naming Scheme for Automatic Snapshot Copies: create_time\\r\n               Inconsistency in the File System: false\\r\n                   Is Volume Quiesced (On-Disk): false\\r\n                 Is Volume Quiesced (In-Memory): false\\r\n      Volume Contains Shared or Compressed Data: false\\r\n              Space Saved by Storage Efficiency: 0B\\r\n         Percentage Saved by Storage Efficiency: 0%\\r\n                   Space Saved by Deduplication: 0B\\r\n              Percentage Saved by Deduplication: 0%\\r\n                  Space Shared by Deduplication: 0B\\r\n                     Space Saved by Compression: 0B\\r\n          Percentage Space Saved by Compression: 0%\\r\n            Volume Size Used by Snapshot Copies: 2.98MB\\r\n                                     Block Type: 64-bit\\r\n                               Is Volume Moving: false\\r\n                 Flash Pool Caching Eligibility: read-write\\r\n  Flash Pool Write Caching Ineligibility Reason: -\\r\n                     Managed By Storage Service: -\\r\nCreate Namespace Mirror Constituents For SnapDiff Use: -\\r\n                        Constituent Volume Role: -\\r\n                          QoS Policy Group Name: -\\r\n                            Caching Policy Name: -\\r\n                Is Volume Move in Cutover Phase: false\\r\n        Number of Snapshot Copies in the Volume: 8\\r\nVBN_BAD may be present in the active filesystem: false\\r\n                Is Volume on a hybrid aggregate: false\\r\n                       Total Physical Used Size: 3.84MB\\r\n                       Physical Used Percentage: 0%\\r\n                                  List of Nodes: -\\r\n                          Is Volume a FlexGroup: false\\r\n                                  SnapLock Type: non-snaplock\\r\n                          Vserver DR Protection: -\\r\n UUID of the Efficiency Policy: b0f36cd7-e7bc-11e2-9994-123478563412\\r\n7 entries were displayed.\"\"\"\n\nALERT_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                  Node: node1\\r\n               Monitor: node-connect\\r\n              Alert ID: DualPathToDiskShelf_Alert\\r\n     Alerting Resource: 50:05:0c:c1:02:00:0f:02\\r\n             Subsystem: SAS-connect\\r\n       Indication Time: Mon Mar 10 10:26:38 2021\\r\n    Perceived Severity: Major\\r\n        Probable Cause: Connection_establishment_error\\r\n           Description: Disk shelf 2 does not disk\\r\n           disk 12312\\r\n    Corrective Actions: 1. Halt controller node1 and \\r\n                        2. Connect disk shelf 2 t\\r\n                        3. Reboot the halted controllers.\\r\n                        4. Contact support per.\\r\n       Possible Effect: Access to disk shelf\\r\n           Acknowledge: false\\r\n              Suppress: false\\r\n                Policy: DualPathToDiskShelf_Policy\\r\n          Acknowledger: -\\r\n            Suppressor: -   \\r\nAdditional Information: Shelf uuid: 50:05:0c:c1:02:00:0f:02\\r\n                        Shelf id: 2\\r\n                        Shelf Name: 4d.shelf2\\r\n                        Number of Paths: 1\\r\n                        Number of Disks: 6\\r\n                        Adapter connected to IOMA:\\r\n                        Adapter connected to IOMB: 4d\\r\nAlerting Resource Name: Shelf ID 2\\r\n Additional Alert Tags: quality-of-service, nondisruptive-upgrade\\r\"\"\"\n\nCONTROLLER_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                                              Node: cl-01\\r\n                                             Owner: \\r\n                                          Location: \\r\n                                             Model: SIMBOX\\r\n                                     Serial Number: 4082368-50-7\\r\n                                         Asset Tag: -\\r\n                                            Uptime: 1 days 06:17\\r\n                                   NVRAM System ID: 4082368507\\r\n                                         System ID: 4082368507\\r\n                                            Vendor: NetApp\\r\n                                            Health: true\\r\n                                       Eligibility: true\\r\n                           Differentiated Services: false\\r\n                               All-Flash Optimized: false\\r\n                               \"\"\"\n\nPORTS_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                                        Node: cl-01\\r\n                                        Port: e0a\\r\n                                        Link: up\\r\n                                         MTU: 1500\\r\n             Auto-Negotiation Administrative: true\\r\n                Auto-Negotiation Operational: true\\r\n                  Duplex Mode Administrative: auto\\r\n                     Duplex Mode Operational: full\\r\n                        Speed Administrative: auto\\r\n                           Speed Operational: 1000\\r\n                 Flow Control Administrative: full\\r\n                    Flow Control Operational: none\\r\n                                 MAC Address: 00:0c:29:32:84:bd\\r\n                                   Port Type: physical\\r\n                 Interface Group Parent Node: -\\r\n                 Interface Group Parent Port: -\\r\n                       Distribution Function: -\\r\n                               Create Policy: -\\r\n                            Parent VLAN Node: -\\r\n                            Parent VLAN Port: -\\r\n                                    VLAN Tag: -\\r\n                            Remote Device ID: -\\r\n                                IPspace Name: Default\\r\n                            Broadcast Domain: Default\\r\n                          MTU Administrative: 1500\\r\n                          Port Health Status: healthy\\r\n                   Ignore Port Health Status: false\\r\n                Port Health Degraded Reasons: -\\r\n\\r\n                                        Node: cl-01\\r\n                                        Port: e0b\\r\n                                        Link: up\\r\n                                         MTU: 1500\\r\n             Auto-Negotiation Administrative: true\\r\n                Auto-Negotiation Operational: true\\r\n                  Duplex Mode Administrative: auto\\r\n                     Duplex Mode Operational: full\\r\n                        Speed Administrative: auto\\r\n                           Speed Operational: 1000\\r\n                 Flow Control Administrative: full\\r\n                    Flow Control Operational: none\\r\n                                 MAC Address: 00:0c:29:32:84:c7\\r\n                                   Port Type: physical\\r\n                 Interface Group Parent Node: -\\r\n                 Interface Group Parent Port: -\\r\n                       Distribution Function: -\\r\n                               Create Policy: -\\r\n                            Parent VLAN Node: -\\r\n                            Parent VLAN Port: -\\r\n                                    VLAN Tag: -\\r\n                            Remote Device ID: -\\r\n                                IPspace Name: Default\\r\n                            Broadcast Domain: Default\\r\n                          MTU Administrative: 1500\\r\n                          Port Health Status: healthy\\r\n                   Ignore Port Health Status: false\\r\n                Port Health Degraded Reasons: -\\r\"\"\"\n\nFC_PORT_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                          Node: cl-01\\r\n                       Adapter: 0a\\r\n                   Description: Fibre Channel Target Adap\\r\n             Physical Protocol: fibre-channel\\r\n                 Maximum Speed: 8\\r\n         Administrative Status: up\\r\n            Operational Status: online\\r\n               Extended Status: ADAPTER UP\\r\n             Host Port Address: 3e8\\r\n             Firmware Revision: 1.0.0\\r\n         Data Link Rate (Gbit): 8\\r\n            Fabric Established: true\\r\n                   Fabric Name: -\\r\n        Connection Established: ptp\\r\n                     Mediatype: ptp\\r\n              Configured Speed: auto\\r\n                  Adapter WWNN: 50:0a:09:80:06:32:84:bd\\r\n                  Adapter WWPN: 50:0a:09:81:06:32:84:bd\\r\n                   Switch Port: ACME Switch:1\\r\n    Form Factor Of Transceiver: ACM\\r\n    Vendor Name Of Transceiver: SFP Vendor\\r\n    Part Number Of Transceiver: 0000\\r\n       Revision Of Transceiver: 1.0\\r\n  Serial Number Of Transceiver: 0000\\r\nFC Capabilities Of Transceiver: 8 (Gbit/sec)\\r\n     Vendor OUI Of Transceiver: 0:5:2\\r\n      Wavelength In Nanometers: 0\\r\n      Date Code Of Transceiver: 11:04:02\\r\n       Validity Of Transceiver: true\\r\n                Connector Used: ACME Connector\\r\n                 Encoding Used: 0\\r\n      Is Internally Calibrated: true\\r\n        Received Optical Power: 10.0 (uWatts)\\r\n    Is Received Power In Range: true\\r\n SPF Transmitted Optical Power: 10.0 (uWatts)\\r\n        Is Xmit Power In Range: true\\r\n\\r\n                          Node: cl-01\\r\n                       Adapter: 0b\\r\n                   Description: Fibre Channel Target \\r\n             Physical Protocol: fibre-channel\\r\n                 Maximum Speed: 8\\r\n         Administrative Status: up\\r\n            Operational Status: online\\r\n               Extended Status: ADAPTER UP\\r\n             Host Port Address: 3e9\\r\n             Firmware Revision: 1.0.0\\r\n         Data Link Rate (Gbit): 8\\r\n            Fabric Established: true\\r\n                   Fabric Name: -\\r\n        Connection Established: ptp\\r\n                     Mediatype: ptp\\r\n              Configured Speed: auto\\r\n                  Adapter WWNN: 50:0a:09:80:06:32:84:bd\\r\n                  Adapter WWPN: 50:0a:09:82:06:32:84:bd\\r\n                   Switch Port: ACME Switch:1\\r\n    Form Factor Of Transceiver: ACM\\r\n    Vendor Name Of Transceiver: SFP Vendor\\r\n    Part Number Of Transceiver: 0000\\r\n       Revision Of Transceiver: 1.0\\r\n  Serial Number Of Transceiver: 0000\\r\nFC Capabilities Of Transceiver: 8 (Gbit/sec)\\r\n     Vendor OUI Of Transceiver: 0:5:2\\r\n      Wavelength In Nanometers: 0\\r\n      Date Code Of Transceiver: 11:04:02\\r\n       Validity Of Transceiver: true\\r\n                Connector Used: ACME Connector\\r\n                 Encoding Used: 0\\r\n      Is Internally Calibrated: true\\r\n        Received Optical Power: 10.0 (uWatts)\\r\n    Is Received Power In Range: true\\r\n SPF Transmitted Optical Power: 10.0 (uWatts)\\r\n        Is Xmit Power In Range: true\\r\n\\r\n                          Node: cl-01\\r\n                       Adapter: 0c\\r\n                   Description: Fibre Channel Target Adapter)\\r\n             Physical Protocol: ethernet\\r\n                 Maximum Speed: 10\\r\n         Administrative Status: up\\r\n            Operational Status: online\\r\n               Extended Status: ADAPTER UP\\r\n             Host Port Address: 3ea\\r\n             Firmware Revision: 1.0.0\\r\n         Data Link Rate (Gbit): 10\\r\n            Fabric Established: true\\r\n                   Fabric Name: -\\r\n        Connection Established: ptp\\r\n                     Mediatype: ptp\\r\n              Configured Speed: auto\\r\n                  Adapter WWNN: 50:0a:09:80:06:32:84:bd\\r\n                  Adapter WWPN: 50:0a:09:83:06:32:84:bd\\r\n                   Switch Port: ACME Switch:1\\r\n    Form Factor Of Transceiver: ACM\\r\n    Vendor Name Of Transceiver: SFP Vendor\\r\n    Part Number Of Transceiver: 0000\\r\n       Revision Of Transceiver: 1.0\\r\n  Serial Number Of Transceiver: 0000\\r\nFC Capabilities Of Transceiver: 2,8 (Gbit/sec)\\r\n     Vendor OUI Of Transceiver: 0:5:2\\r\n      Wavelength In Nanometers: 0\\r\n      Date Code Of Transceiver: 11:04:02\\r\n       Validity Of Transceiver: true\\r\n                Connector Used: ACME Connector\\r\n                 Encoding Used: 0\\r\n      Is Internally Calibrated: true\\r\n        Received Optical Power: 10.0 (uWatts)\\r\n    Is Received Power In Range: true\\r\n SPF Transmitted Optical Power: 10.0 (uWatts)\\r\n        Is Xmit Power In Range: true\\r\n\\r\n                          Node: cl-01\\r\n                       Adapter: 0d\\r\n                   Description: Fibre Channel Target Adapt)\\r\n             Physical Protocol: ethernet\\r\n                 Maximum Speed: 10\\r\n         Administrative Status: up\\r\n            Operational Status: online\\r\n               Extended Status: ADAPTER UP\\r\n             Host Port Address: 3eb\\r\n             Firmware Revision: 1.0.0\\r\n         Data Link Rate (Gbit): 10\\r\n            Fabric Established: true\\r\n                   Fabric Name: -\\r\n        Connection Established: ptp\\r\n                     Mediatype: ptp\\r\n              Configured Speed: auto\\r\n                  Adapter WWNN: 50:0a:09:80:06:32:84:bd\\r\n                  Adapter WWPN: 50:0a:09:84:06:32:84:bd\\r\n                   Switch Port: ACME Switch:1\\r\n    Form Factor Of Transceiver: ACM\\r\n    Vendor Name Of Transceiver: SFP Vendor\\r\n    Part Number Of Transceiver: 0000\\r\n       Revision Of Transceiver: 1.0\\r\n  Serial Number Of Transceiver: 0000\\r\nFC Capabilities Of Transceiver: 2,8 (Gbit/sec)\\r\n     Vendor OUI Of Transceiver: 0:5:2\\r\n      Wavelength In Nanometers: 0\\r\n      Date Code Of Transceiver: 11:04:02\\r\n       Validity Of Transceiver: true\\r\n                Connector Used: ACME Connector\\r\n                 Encoding Used: 0\\r\n      Is Internally Calibrated: true\\r\n        Received Optical Power: 10.0 (uWatts)\\r\n    Is Received Power In Range: true\\r\n SPF Transmitted Optical Power: 10.0 (uWatts)\\r\n        Is Xmit Power In Range: true\\r\n\\r\n                          Node: cl-01\\r\n                       Adapter: 0e\\r\n                   Description: Fibre Channel Target Adap)\\r\n             Physical Protocol: fibre-channel\\r\n                 Maximum Speed: 16\\r\n         Administrative Status: up\\r\n            Operational Status: online\\r\n               Extended Status: ADAPTER UP\\r\n             Host Port Address: 3ec\\r\n             Firmware Revision: 1.0.0\\r\n         Data Link Rate (Gbit): 16\\r\n            Fabric Established: true\\r\n                   Fabric Name: -\\r\n        Connection Established: ptp\\r\n                     Mediatype: ptp\\r\n              Configured Speed: auto\\r\n                  Adapter WWNN: 50:0a:09:80:06:32:84:bd\\r\n                  Adapter WWPN: 50:0a:09:85:06:32:84:bd\\r\n                   Switch Port: ACME Switch:1\\r\n    Form Factor Of Transceiver: ACM\\r\n    Vendor Name Of Transceiver: SFP Vendor\\r\n    Part Number Of Transceiver: 0000\\r\n       Revision Of Transceiver: 1.0\\r\n  Serial Number Of Transceiver: 0000\\r\nFC Capabilities Of Transceiver: 10 (Gbit/sec)\\r\n     Vendor OUI Of Transceiver: 0:5:2\\r\n      Wavelength In Nanometers: 0\\r\n      Date Code Of Transceiver: 11:04:02\\r\n       Validity Of Transceiver: true\\r\n                Connector Used: ACME Connector\\r\n                 Encoding Used: 0\\r\n      Is Internally Calibrated: true\\r\n        Received Optical Power: 10.0 (uWatts)\\r\n    Is Received Power In Range: true\\r\n SPF Transmitted Optical Power: 10.0 (uWatts)\\r\n        Is Xmit Power In Range: true\\r\n5 entries were displayed.\\r\"\"\"\n\nDISKS_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                  Disk: NET-1.1\\r\n        Container Type: aggregate\\r\n            Owner/Home: cl-01 / cl-01\\r\n               DR Home: -\\r\n    Stack ID/Shelf/Bay: -  / -  / 16\\r\n                   LUN: 0\\r\n                 Array: NETAPP_VD_1\\r\n                Vendor: NETAPP\\r\n                 Model: VD-1000MB-FZ-520\\r\n         Serial Number: 07294300\n                   UID: 4E455441:50502020:56442D31:\\r\n                   BPS: 520\\r\n         Physical Size: 1.00GB\\r\n              Position: parity\\r\nChecksum Compatibility: block\\r\n             Aggregate: aggr0\\r\n                  Plex: plex0\\r\nPaths:\\r\n                LUN Initiatr Side Target Side      Link\\r\nController IniD SwitcSwitch Port Acc Use  Target Port     TPGN  Speed/s IOPS\\r\n--------------- -------------------- ---  -------------------- -------- ----\\r\ncl-01      v1 0 N/A  N/A         AO  INU  0000000000000000   0 0 Gb/S 0    0\\r\ncl-01      v5 0 N/A  N/A         AO  RDY  0000000000000000   0 0 Gb/S 0    0\\r\n\\r\nErrors:\\r\n-\n                  Disk: NET-1.2\\r\n        Container Type: aggregate\\r\n            Owner/Home: cl-01 / cl-01\\r\n               DR Home: -\\r\n    Stack ID/Shelf/Bay: -  / -  / 17\\r\n                   LUN: 0\\r\n                 Array: NETAPP_VD_1\\r\n                Vendor: NETAPP\\r\n                 Model: VD-1000MB-FZ-520\\r\n         Serial Number: 07294301\\r\n                   UID: 4E455441:50502\\r\n                   BPS: 520\\r\n         Physical Size: 1.00GB\\r\n              Position: dparity\\r\nChecksum Compatibility: block\\r\n             Aggregate: aggr1\\r\n                  Plex: plex0\\r\nPaths:\\r\n                LUN Initiatr Side Target Side      Link\\r\nController IniD SwitcSwitch Port Acc Use  Target Port     TPGN  Speed/s IOPS\\r\n--------------- -------------------- ---  -------------------- -------- ----\\r\ncl-01      v1 0 N/A  N/A         AO  INU  0000000000000000   0 0 Gb/S 0    0\\r\ncl-01      v5 0 N/A  N/A         AO  RDY  0000000000000000   0 0 Gb/S 0    0\\r\n\\r\nErrors:\\r\n-\\r\n\"\"\"\n\nPHYSICAL_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nDisk             Type    Vendor   Model                Revision     RPM  BPS\\r\n---------------- ------- -------- -------------------- -------- ------- ----\\r\nNET-1.1          FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294300\\r\nNET-1.2          FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294301\\r\nNET-1.3          FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294302\\r\nNET-1.4          FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294303\\r\nNET-1.5          FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294304\\r\nNET-1.6          FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294305\\r\nNET-1.7          FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294306\\r\nNET-1.8          FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294307\\r\nNET-1.9          FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904200\\r\nNET-1.10         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904201\\r\nNET-1.11         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904202\\r\nNET-1.12         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904203\\r\nNET-1.13         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904204\\r\nNET-1.14         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294308\\r\nNET-1.15         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294309\\r\nNET-1.16         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294310\\r\nNET-1.17         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294311\\r\nNET-1.18         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904205\\r\nNET-1.19         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904206\\r\nNET-1.20         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904207\\r\nNET-1.21         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904208\\r\nNET-1.22         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904209\\r\nNET-1.23         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904210\\r\nNET-1.24         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904311\\r\nNET-1.25         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904312\\r\nNET-1.26         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07904313\\r\nNET-1.27         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294312\\r\nNET-1.28         FCAL    NETAPP   VD-1000MB-FZ-520     0042       15000  520\\r\n                 SerialNumber: 07294313\\r\n28 entries were displayed.\\r\"\"\"\n\nERROR_DISK_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nDisk             Error Type        Error Text\\r\n---------------- ----------------- ----------------------------------\\r\nNET-1.25         diskfail          .\"\"\"\n\nQTREES_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                      Vserver Name: svm1\\r\n                       Volume Name: svm1_root\\r\n                        Qtree Name: \"\"\\r\n  Actual (Non-Junction) Qtree Path: /vol/svm1_root\\r\n                    Security Style: ntfs\\r\n                       Oplock Mode: enable\\r\n                  Unix Permissions: -\\r\n                          Qtree Id: 0\\r\n                      Qtree Status: normal\\r\n                     Export Policy: default\\r\n        Is Export Policy Inherited: true\\r\n\\r\n                      Vserver Name: svm1\\r\n                       Volume Name: vol_svm1_1\\r\n                        Qtree Name: \"\"\\r\n  Actual (Non-Junction) Qtree Path: /vol/vol_svm1_1\\r\n                    Security Style: ntfs\\r\n                       Oplock Mode: enable\\r\n                  Unix Permissions: -\\r\n                          Qtree Id: 0\\r\n                      Qtree Status: normal\\r\n                     Export Policy: default\\r\n        Is Export Policy Inherited: true\\r\n\\r\n                      Vserver Name: svm1\\r\n                       Volume Name: vol_svm1_1\\r\n                        Qtree Name: qtree_svm1_1\\r\n  Actual (Non-Junction) Qtree Path: /vol/vol_svm1_1/qtree_svm1_1\\r\n                    Security Style: unix\\r\n                       Oplock Mode: enable\\r\n                  Unix Permissions: ---rwxrwxrwx\\r\n                          Qtree Id: 1\\r\n                      Qtree Status: normal\\r\n                     Export Policy: default\\r\n        Is Export Policy Inherited: true\\r\n\\r\n                      Vserver Name: svm1\\r\n                       Volume Name: vol_svm1_2\\r\n                        Qtree Name: \"\"\\r\n  Actual (Non-Junction) Qtree Path: /vol/vol_svm1_2\\r\n                    Security Style: ntfs\\r\n                       Oplock Mode: enable\\r\n                  Unix Permissions: -\\r\n                          Qtree Id: 0\\r\n                      Qtree Status: normal\\r\n                     Export Policy: default\\r\n        Is Export Policy Inherited: true\"\"\"\n\nSHARE_VSERVER_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                               Admin      Operational Root\\r\nVserver     Type    Subtype    State      State       Volume     Aggregate\\r\n----------- ------- ---------- ---------- ----------- ---------- ----------\\r\nsvm4.example.com      data    default    running    running  SVC_FC_ NETAPP\"\"\"\n\nSHARES_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                                      Vserver: svm4.example.com\\r\n                                        Share: admin$\\r\n                     CIFS Server NetBIOS Name: NETAPP-NODE01\\r\n                                         Path: /\\r\n                             Share Properties: browsable\\r\n                           Symlink Properties: -\\r\n                      File Mode Creation Mask: -\\r\n                 Directory Mode Creation Mask: -\\r\n                                Share Comment: -\\r\n                                    Share ACL: -\\r\n                File Attribute Cache Lifetime: -\\r\n                                  Volume Name: svm4examplecom_root\\r\n                                Offline Files: -\\r\n                Vscan File-Operations Profile: standard\\r\n            Maximum Tree Connections on Share: 4294967295\\r\n                   UNIX Group for File Create: -\\r\n\\r\n                                      Vserver: svm4.example.com\\r\n                                        Share: c$\\r\n                     CIFS Server NetBIOS Name: NETAPP-NODE01\\r\n                                         Path: /\\r\n                             Share Properties: oplocks\\r\n                                               browsable\\r\n                                               changenotify\\r\n                                               show-previous-versions\\r\n                           Symlink Properties: symlinks\\r\n                      File Mode Creation Mask: -\\r\n                 Directory Mode Creation Mask: -\\r\n                                Share Comment: -\\r\n                                    Share ACL: BUILTIN\\r\n                File Attribute Cache Lifetime: -\\r\n                                  Volume Name: svm4examplecom_root\\r\n                                Offline Files: -\\r\n                Vscan File-Operations Profile: standard\\r\n            Maximum Tree Connections on Share: 4294967295\\r\n                   UNIX Group for File Create: -\\r\n\\r\n                                      Vserver: svm4.example.com\\r\n                                        Share: etc\\r\n                     CIFS Server NetBIOS Name: NETAPP-NODE01\\r\n                                         Path: /.vsadmin/config/etc\\r\n                             Share Properties: browsable\\r\n                                               changenotify\\r\n                                               oplocks\\r\n                                               show-previous-versions\\r\n                           Symlink Properties: enable\\r\n                      File Mode Creation Mask: -\\r\n                 Directory Mode Creation Mask: -\\r\n                                Share Comment: -\\r\n                                    Share ACL: Everyone / Full Control\\r\n                File Attribute Cache Lifetime: -\\r\n                                  Volume Name: svm4examplecom_root\\r\n                                Offline Files: manual\\r\n                Vscan File-Operations Profile: standard\\r\n            Maximum Tree Connections on Share: 4294967295\\r\n                   UNIX Group for File Create: -\\r\n\\r\n                                      Vserver: svm4.example.com\\r\n                                        Share: ipc$\\r\n                     CIFS Server NetBIOS Name: NETAPP-NODE01\\r\n                                         Path: /\\r\n                             Share Properties: browsable\\r\n                           Symlink Properties: -\\r\n                      File Mode Creation Mask: -\\r\n                 Directory Mode Creation Mask: -\\r\n                                Share Comment: -\\r\n                                    Share ACL: -\\r\n                File Attribute Cache Lifetime: -\\r\n                                  Volume Name: svm4examplecom_root\\r\n                                Offline Files: -\\r\n                Vscan File-Operations Profile: standard\\r\n            Maximum Tree Connections on Share: 4294967295\\r\n                   UNIX Group for File Create: -\\r\n\\r\n                                      Vserver: svm4.example.com\\r\n                                        Share: vol_svm4_1\\r\n                     CIFS Server NetBIOS Name: NETAPP-NODE01\\r\n                                         Path: /vol_svm4_1\\r\n                             Share Properties: oplocks\\r\n                                               browsable\\r\n                                               changenotify\\r\n                                               show-previous-versions\\r\n                           Symlink Properties: symlinks\\r\n                      File Mode Creation Mask: -\\r\n                 Directory Mode Creation Mask: -\\r\n                                Share Comment: -\\r\n                                    Share ACL: Everyone / Full Control\\r\n                File Attribute Cache Lifetime: -\\r\n                                  Volume Name: vol_svm4_1\\r\n                                Offline Files: manual\\r\n                Vscan File-Operations Profile: standard\\r\n            Maximum Tree Connections on Share: 4294967295\\r\n                   UNIX Group for File Create: -\"\"\"\n\nSHARES_AGREEMENT_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nvserver allowed-protocols\\r\n------- -----------------\\r\nsvm4.example.com\nnfs,cifs,fcp,iscsi\\r\n7 entries were displayed.\\r\n\"\"\"\n\nTHIN_FS_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nVserver Volume Aggregate State Type Size Available Used%\\r\n--------- ------------ ------------ ---------- ---- -\\r\nsvm1 vol_svm1_2 aggr1 online RW 2GB 2.00GB 0%\\r\"\"\"\n\nTRAP_MAP = {\n    '1.3.6.1.4.1.789.1.1.12.0':\n        'A Health Monitor has clear an alert. '\n        '[Alert Id = DisabledInuseSASPort_Alert , Alerting Resource = 0a].',\n    'controller_name': 'cl-01',\n    '1.3.6.1.4.1.789.1.1.9.0': '1-80-000008'\n}\n\n\nQUOTAS_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                 Vserver: svm5\\r\n             Policy Name: default\\r\n             Volume Name: svm5_vol1\\r\n                    Type: tree\\r\n                  Target: qtree_21052021_110317_94\\r\n              Qtree Name: \"\"\\r\n            User Mapping: -\\r\n              Disk Limit: 4.88MB\\r\n             Files Limit: 1000\\r\nThreshold for Disk Limit: 4.88MB\\r\n         Soft Disk Limit: 4.88MB\\r\n        Soft Files Limit: 1000\\r\n\\r\n                 Vserver: svm5\\r\n             Policy Name: default\\r\n             Volume Name: svm5_vol1\\r\n                    Type: user\\r\n                  Target: \"\"\\r\n              Qtree Name: \"\"\\r\n            User Mapping: off\\r\n              Disk Limit: 4.88MB\\r\n             Files Limit: 1000\\r\nThreshold for Disk Limit: 4.88MB\\r\n         Soft Disk Limit: 4.88MB\\r\n        Soft Files Limit: 1000\\r\n\\r\n                 Vserver: svm5\\r\n             Policy Name: default\\r\n             Volume Name: svm5_vol1\\r\n                    Type: group\\r\n                  Target: \"\"\\r\n              Qtree Name: \"\"\\r\n            User Mapping: -\\r\n              Disk Limit: 4.88MB\\r\n             Files Limit: 1000\\r\nThreshold for Disk Limit: 4.88MB\\r\n         Soft Disk Limit: 4.88MB\\r\n        Soft Files Limit: 1000\\r\n\\r\n                 Vserver: svm5\\r\n             Policy Name: default\\r\n             Volume Name: svm5_vol1\\r\n                    Type: group\\r\n                  Target: \"\"\\r\n              Qtree Name: qtree_08052021_152034_44\\r\n            User Mapping: -\\r\n              Disk Limit: 4.88MB\\r\n             Files Limit: 100\\r\nThreshold for Disk Limit: 4.88MB\\r\n         Soft Disk Limit: 4.88MB\\r\n        Soft Files Limit: 100\\r\n\\r\n                 Vserver: svm5\\r\n             Policy Name: default\\r\n             Volume Name: svm5_vol1\\r\n                    Type: group\\r\n                  Target: pcuser\\r\n              Qtree Name: \"\"\\r\n            User Mapping: -\\r\n              Disk Limit: 4.88MB\\r\n             Files Limit: 1000\\r\nThreshold for Disk Limit: 4.88MB\\r\n         Soft Disk Limit: 4.88MB\\r\n        Soft Files Limit: 1000\\r\n5 entries were displayed.\"\"\"\n\nNFS_SHARE_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\n                                   Vserver Name: svm4.example.com\\r\n                                    Volume Name: svm4examplecom_root\\r\n                                 Aggregate Name: aggr1\\r\n  List of Aggregates for FlexGroup Constituents: -\\r\n                                    Volume Size: 20MB\\r\n                             Volume Data Set ID: 1036\\r\n                      Volume Master Data Set ID: 2155388532\\r\n                                   Volume State: online\\r\n                                   Volume Style: flex\\r\n                          Extended Volume Style: flexvol\\r\n                         Is Cluster-Mode Volume: true\\r\n                          Is Constituent Volume: false\\r\n                                  Export Policy: default\\r\n                                        User ID: -\\r\n                                       Group ID: -\\r\n                                 Security Style: ntfs\\r\n                               UNIX Permissions: ------------\\r\n                                  Junction Path: /\\r\n                           Junction Path Source: -\\r\n                                Junction Active: true\\r\n                         Junction Parent Volume: -\\r\n                                        Comment:\\r\n                                 Available Size: 18.79MB\\r\n                                Filesystem Size: 20MB\\r\n                        Total User-Visible Size: 19MB\\r\n                                      Used Size: 220KB\\r\n                                Used Percentage: 6%\\r\n           Volume Nearly Full Threshold Percent: 95%\\r\n                  Volume Full Threshold Percent: 98%\\r\n           Maximum Autosize (for flexvols only): 24MB\\r\n                               Minimum Autosize: 20MB\\r\n             Autosize Grow Threshold Percentage: 85%\\r\n           Autosize Shrink Threshold Percentage: 50%\\r\n                                  Autosize Mode: off\\r\n            Total Files (for user-visible data): 566\\r\n             Files Used (for user-visible data): 104\\r\n                      Space Guarantee in Effect: true\\r\n                            Space SLO in Effect: true\\r\n                                      Space SLO: none\\r\n                          Space Guarantee Style: volume\\r\n                             Fractional Reserve: 100%\\r\n                                    Volume Type: RW\\r\n              Snapshot Directory Access Enabled: true\\r\n             Space Reserved for Snapshot Copies: 5%\\r\n                          Snapshot Reserve Used: 100%\\r\n                                Snapshot Policy: default\\r\n                                  Creation Time: Sat Mar 13 20:15:43 20\\r\n                                       Language: C.UTF-8\\r\n                                   Clone Volume: false\\r\n                                      Node name: cl-01\\r\n                      Clone Parent Vserver Name: -\\r\n                        FlexClone Parent Volume: -\\r\n                                  NVFAIL Option: off\\r\n                          Volume's NVFAIL State: false\\r\n        Force NVFAIL on MetroCluster Switchover: off\\r\n                      Is File System Size Fixed: false\\r\n                     (DEPRECATED)-Extent Option: off\\r\n                  Reserved Space for Overwrites: 0B\\r\n              Primary Space Management Strategy: volume_grow\\r\n                       Read Reallocation Option: off\\r\n    Naming Scheme for Automatic Snapshot Copies: create_time\\r\n               Inconsistency in the File System: false\\r\n                   Is Volume Quiesced (On-Disk): false\\r\n                 Is Volume Quiesced (In-Memory): false\\r\n      Volume Contains Shared or Compressed Data: false\\r\n              Space Saved by Storage Efficiency: 0B\\r\n         Percentage Saved by Storage Efficiency: 0%\\r\n                   Space Saved by Deduplication: 0B\\r\n              Percentage Saved by Deduplication: 0%\\r\n                  Space Shared by Deduplication: 0B\\r\n                     Space Saved by Compression: 0B\\r\n          Percentage Space Saved by Compression: 0%\\r\n            Volume Size Used by Snapshot Copies: 1.00MB\\r\n                                     Block Type: 64-bit\\r\n                               Is Volume Moving: false\\r\n                 Flash Pool Caching Eligibility: read-write\\r\n  Flash Pool Write Caching Ineligibility Reason: -\\r\n                     Managed By Storage Service: -\\r\nCreate Namespace Mirror Constituents For SnapDiff Use: -\\r\n                        Constituent Volume Role: -\\r\n                          QoS Policy Group Name: -\\r\n                            Caching Policy Name: -\\r\n                Is Volume Move in Cutover Phase: false\\r\n        Number of Snapshot Copies in the Volume: 8\\r\nVBN_BAD may be present in the active filesystem: false\\r\n                Is Volume on a hybrid aggregate: false\\r\n                       Total Physical Used Size: 1.21MB\\r\n                       Physical Used Percentage: 6%\\r\n                                  List of Nodes: -\\r\n                          Is Volume a FlexGroup: false\\r\n                                  SnapLock Type: non-snaplock\\r\n                          Vserver DR Protection: -\\r\n\\r\n                                   Vserver Name: svm4.example.com\\r\n                                    Volume Name: vol_svm4_1\\r\n                                 Aggregate Name: aggr1\\r\n  List of Aggregates for FlexGroup Constituents: -\\r\n                                    Volume Size: 1GB\\r\n                             Volume Data Set ID: 1037\\r\n                      Volume Master Data Set ID: 2155388533\\r\n                                   Volume State: online\\r\n                                   Volume Style: flex\\r\n                          Extended Volume Style: flexvol\\r\n                         Is Cluster-Mode Volume: true\\r\n                          Is Constituent Volume: false\\r\n                                  Export Policy: default\\r\n                                        User ID: 0\\r\n                                       Group ID: 0\\r\n                                 Security Style: mixed\\r\n                               UNIX Permissions: ---rwxrwxrwx\\r\n                                  Junction Path: /vol_svm4_1\\r\n                           Junction Path Source: RW_volume\\r\n                                Junction Active: true\\r\n                         Junction Parent Volume: svm4examplecom_root\\r\n                                        Comment:\\r\n                                 Available Size: 972.5MB\\r\n                                Filesystem Size: 1GB\\r\n                        Total User-Visible Size: 972.8MB\\r\n                                      Used Size: 340KB\\r\n                                Used Percentage: 5%\\r\n           Volume Nearly Full Threshold Percent: 95%\\r\n                  Volume Full Threshold Percent: 98%\\r\n           Maximum Autosize (for flexvols only): 1.20GB\\r\n                               Minimum Autosize: 1GB\\r\n             Autosize Grow Threshold Percentage: 85%\\r\n           Autosize Shrink Threshold Percentage: 50%\\r\n                                  Autosize Mode: off\\r\n            Total Files (for user-visible data): 31122\\r\n             Files Used (for user-visible data): 97\\r\n                      Space Guarantee in Effect: true\\r\n                            Space SLO in Effect: true\\r\n                                      Space SLO: none\\r\n                          Space Guarantee Style: volume\\r\n                             Fractional Reserve: 100%\\r\n                                    Volume Type: RW\\r\n              Snapshot Directory Access Enabled: true\\r\n             Space Reserved for Snapshot Copies: 5%\\r\n                          Snapshot Reserve Used: 3%\\r\n                                Snapshot Policy: default\\r\n                                  Creation Time: Sat Mar 13 20:35:56 20\\r\n                                       Language: C.UTF-8\\r\n                                   Clone Volume: false\\r\n                                      Node name: cl-01\\r\n                      Clone Parent Vserver Name: -\\r\n                        FlexClone Parent Volume: -\\r\n                                  NVFAIL Option: off\\r\n                          Volume's NVFAIL State: false\\r\n        Force NVFAIL on MetroCluster Switchover: off\\r\n                      Is File System Size Fixed: false\\r\n                     (DEPRECATED)-Extent Option: off\\r\n                  Reserved Space for Overwrites: 0B\\r\n              Primary Space Management Strategy: volume_grow\\r\n                       Read Reallocation Option: off\\r\n    Naming Scheme for Automatic Snapshot Copies: create_time\\r\n               Inconsistency in the File System: false\\r\n                   Is Volume Quiesced (On-Disk): false\\r\n                 Is Volume Quiesced (In-Memory): false\\r\n      Volume Contains Shared or Compressed Data: false\\r\n              Space Saved by Storage Efficiency: 0B\\r\n         Percentage Saved by Storage Efficiency: 0%\\r\n                   Space Saved by Deduplication: 0B\\r\n              Percentage Saved by Deduplication: 0%\\r\n                  Space Shared by Deduplication: 0B\\r\n                     Space Saved by Compression: 0B\\r\n          Percentage Space Saved by Compression: 0%\\r\n            Volume Size Used by Snapshot Copies: 1.45MB\\r\n                                     Block Type: 64-bit\\r\n                               Is Volume Moving: false\\r\n                 Flash Pool Caching Eligibility: read-write\\r\n  Flash Pool Write Caching Ineligibility Reason: -\\r\n                     Managed By Storage Service: -\\r\nCreate Namespace Mirror Constituents For SnapDiff Use: -\\r\n                        Constituent Volume Role: -\\r\n                          QoS Policy Group Name: -\\r\n                            Caching Policy Name: -\\r\n                Is Volume Move in Cutover Phase: false\\r\n        Number of Snapshot Copies in the Volume: 8\\r\nVBN_BAD may be present in the active filesystem: false\\r\n                Is Volume on a hybrid aggregate: false\\r\n                       Total Physical Used Size: 1.78MB\\r\n                       Physical Used Percentage: 0%\\r\n                                  List of Nodes: -\\r\n                          Is Volume a FlexGroup: false\\r\n                                  SnapLock Type: non-snaplock\\r\n                          Vserver DR Protection: -\\r\n8 entries were displayed.\\r\"\"\"\n\nNODE_IPS_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nvserver lif         address\\r\n------- ----------- ---------------\\r\ncl      cl-01_mgmt1 192.168.159.130\"\"\"\n\nCLUSTER_IPS_INFO = \"\"\"----cluster----\\r\nlast login time : 12 456 789\\r\n\\r\nvserver lif         address\\r\n------- ------------ ---------------\\r\ncl      cluster_mgmt 192.168.159.131\"\"\"\n\nCONTROLLER_IP_INFO = \"\"\"vserver   lif     curr-node address\\r\n--------- --------------- --------- ------------\\r\nNetappFSA Netapp-01_mgmt1 cl-01 8.44.162.245\"\"\"\n\nRESOURCE_METRICS = {\n    'storage':\n        ['iops', 'readIops', 'writeIops', 'throughput',\n         'readThroughput', 'writeThroughput', 'responseTime'],\n    'storagePool':\n        ['iops', 'readIops', 'writeIops', 'throughput',\n         'readThroughput', 'writeThroughput', 'responseTime'],\n    'volume':\n        ['iops', 'readIops', 'writeIops', 'throughput',\n         'readThroughput', 'writeThroughput', 'responseTime',\n         'cacheHitRatio', 'readCacheHitRatio', 'writeCacheHitRatio',\n         'ioSize', 'readIoSize', 'writeIoSize'],\n    'controller':\n        ['iops', 'readIops', 'writeIops', 'throughput',\n         'readThroughput', 'writeThroughput', 'responseTime'],\n    'port':\n        ['iops', 'readIops', 'writeIops', 'throughput',\n         'readThroughput', 'writeThroughput', 'responseTime'],\n    'disk':\n        ['iops', 'readIops', 'writeIops', 'throughput',\n         'readThroughput', 'writeThroughput', 'responseTime'],\n    'filesystem':\n        ['iops', 'readIops', 'writeIops', 'throughput',\n         'readThroughput', 'writeThroughput',\n         'ioSize', 'readIoSize', 'writeIoSize'],\n}\n\nCLUSTER_PER_INFO = [\n    {\n        \"timestamp\": \"2017-01-25T11:20:00Z\",\n        \"status\": \"ok\",\n        \"_links\": {\n            \"self\": {\n                \"href\": \"/api/resourcelink\"\n            }\n        },\n        \"throughput\": {\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"latency\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"iops\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"duration\": \"PT15S\"\n    }\n]\n\nPOOL_PER_INFO = [\n    {\n        \"timestamp\": \"2017-01-25T11:20:00Z\",\n        \"status\": \"ok\",\n        \"_links\": {\n            \"self\": {\n                \"href\": \"/api/resourcelink\"\n            }\n        },\n        \"throughput\": {\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"latency\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"iops\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"duration\": \"PT15S\"\n    }\n]\n\nLUN_PER_INFO = [\n    {\n        \"timestamp\": \"2017-01-25T11:20:00Z\",\n        \"status\": \"ok\",\n        \"_links\": {\n            \"self\": {\n                \"href\": \"/api/resourcelink\"\n            }\n        },\n        \"throughput\": {\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"latency\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"iops\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"duration\": \"PT15S\"\n    }\n]\n\nFS_PER_INFO = [\n    {\n        \"timestamp\": \"2017-01-25T11:20:00Z\",\n        \"status\": \"ok\",\n        \"_links\": {\n            \"self\": {\n                \"href\": \"/api/resourcelink\"\n            }\n        },\n        \"throughput\": {\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"latency\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"iops\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"duration\": \"PT15S\"\n    }\n]\n\nFS_REST_INFO = [\n    {\n        \"name\": \"vol1\",\n        \"uuid\": \"02c9e252-41be-11e9-81d5-00a0986138f7\",\n        \"svm\": {\n            \"_links\": {\n                \"self\": {\n                    \"href\": \"/api/resourcelink\"\n                }\n            },\n            \"name\": \"svm1\",\n            \"uuid\": \"02c9e252-41be-11e9-81d5-00a0986138f7\"\n        },\n    }\n]\n\nPORT_REST_INFO = [\n    {\n        \"name\": \"e0a\",\n        \"uuid\": \"02c9e252-41be-11e9-81d5-00a0986138f7\",\n        \"node\": {\n            \"_links\": {\n                \"self\": {\n                    \"href\": \"/api/resourcelink\"\n                }\n            },\n            \"name\": \"node1\",\n            \"uuid\": \"02c9e252-41be-11e9-81d5-00a0986138f7\"\n        },\n    }\n]\n\nFC_PER_INFO = [\n    {\n        \"timestamp\": \"2017-01-25T11:20:00Z\",\n        \"status\": \"ok\",\n        \"_links\": {\n            \"self\": {\n                \"href\": \"/api/resourcelink\"\n            }\n        },\n        \"throughput\": {\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"latency\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"iops\": {\n            \"other\": 0,\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"duration\": \"PT15S\"\n    }\n]\n\nETH_PER_INFO = [\n    {\n        \"timestamp\": \"2017-01-25T11:20:00Z\",\n        \"status\": \"ok\",\n        \"_links\": {\n            \"self\": {\n                \"href\": \"/api/resourcelink\"\n            }\n        },\n        \"throughput\": {\n            \"read\": \"200\",\n            \"total\": \"1000\",\n            \"write\": \"100\"\n        },\n        \"duration\": \"PT15S\"\n    }\n]\n\nFC_INITIATOR_INFO = \"\"\"             Vserver: PSA-xiejun00295347\\r\n   Logical Interface: PSA02-xiejun\\r\n      Initiator WWPN: 10:00:00:00:c9:d5:b9:6e\\r\n        Port Address: 50400\\r\n      Initiator WWNN: 20:00:00:00:c9:d5:b9:6e\\r\nInitiator WWPN Alias: -\\r\n         Igroup Name: test00101, Test_MKL_Suse_8.44.133.65\\r\n\\r\n             Vserver: SVC_FC\\r\n   Logical Interface: Migration_NetApp02_0c_02\\r\n      Initiator WWPN: 10:00:00:00:c9:d5:b9:6e\\r\n        Port Address: 50400\\r\n      Initiator WWNN: 20:00:00:00:c9:d5:b9:6e\\r\nInitiator WWPN Alias: -\\r\n         Igroup Name: -\\r\n\\r\n             Vserver: SVM_VDF\\r\n   Logical Interface: VDF_test02\\r\n      Initiator WWPN: 10:00:00:00:c9:d5:b9:6e\\r\n        Port Address: 50400\\r\n      Initiator WWNN: 20:00:00:00:c9:d5:b9:6e\\r\nInitiator WWPN Alias: -\\r\n         Igroup Name: -\\r\n\\r\n             Vserver: xiejun_00295347\\r\n   Logical Interface: xiejun_port1\\r\n      Initiator WWPN: 10:00:00:00:c9:d5:b9:6e\\r\n        Port Address: 50400\\r\n      Initiator WWNN: 20:00:00:00:c9:d5:b9:6e\\r\nInitiator WWPN Alias: -\\r\n         Igroup Name: -\\r\n4 entries were displayed.\n\"\"\"\n\nISCSI_INITIATOR_INFO = \"\"\"      Vserver: svm3\\r\n                    Target Portal Group: zb_IPV602\\r\n                      Target Session ID: 357\\r\nInitiator Name: iqn.2006-08.com.huawei:21004447dcca426::01\\r\n                       Initiator Alias : -\\r\n                            TPGroup Tag: 1062 \\r\n                   Initiator Session ID: 80:12:34:58:78:9a\\r\n                            Igroup Name: -\"\"\"\n\nHOSTS_INFO = \"\"\"          Vserver Name: svm1\\r\n           Igroup Name: fcstart1\\r\n              Protocol: mixed\\r\n               OS Type: linux\\r\nPortset Binding Igroup: portgroup\\r\n           Igroup UUID: c5ca5750-121f-11ec-b66c-000c29bfc4d7\\r\n                  ALUA: true\\r\n            Initiators: 20:01:00:0c:29:bf:c4:d7 (not logged in)\\r\n10:00:00:00:c9:d5:b9:6e (not logged in)\\r\niqn.2006-08.com.huawei:21004447dcca426::0 (not logged in)\\r\n\\r\n          Vserver Name: svm3\\r\n           Igroup Name: svm3\\r\n              Protocol: mixed\\r\n               OS Type: windows\\r\nPortset Binding Igroup: portgroup2\\r\n           Igroup UUID: 9a6c2496-174b-11ec-b66c-000c29bfc4d7\\r\n                  ALUA: true\\r\nInitiators: iqn.2006-08.com.huawei:21004447dcca426::0 (not logged in)\\r\n10:00:00:00:c9:d5:b9:6e (not logged in)\"\"\"\n\nPORT_SET_INFO = \"\"\"        Vserver Name: svm1\\r\n    Portset Name: portgroup\\r\n LIF Or TPG Name: ontap-01_fc_lif_1, ontap-01_fcoe_lif_1, fc1\\r\n        Protocol: fcp\\r\n Number Of Ports: 3\\r\nBound To Igroups: fcstart1\\r\n\\r\n    Vserver Name: svm3\\r\n    Portset Name: portgroup2\\r\n LIF Or TPG Name: ontap-01_iscsi_lif_1\\r\n        Protocol: iscsi\\r\n Number Of Ports: 1\\r\nBound To Igroups: svm3\\r\n2 entries were displayed.\"\"\"\n\nLIF_INFO = \"\"\"                    Vserver Name: svm1\\r\n          Logical Interface Name: ontap-01_fc_lif_1\\r\n                            Role: data\\r\n                   Data Protocol: fcp\\r\n                       Home Node: ontap-01\\r\n                       Home Port: 0a\\r\n                    Current Node: ontap-01\\r\n                    Current Port: 0a\\r\n              Operational Status: down\\r\n                 Extended Status: Groovy, man!\\r\n                         Is Home: true\\r\n                 Network Address: -\\r\n                         Netmask: -\\r\n             Bits in the Netmask: -\\r\n                     Subnet Name: -\\r\n           Administrative Status: up\\r\n                 Failover Policy: disabled\\r\n                 Firewall Policy: -\\r\n                     Auto Revert: false\\r\n   Fully Qualified DNS Zone Name: none\\r\n         DNS Query Listen Enable: -\\r\n             Failover Group Name: -\\r\n                        FCP WWPN: 20:00:00:0c:29:bf:c4:d7\\r\n                  Address family: -\\r\n                         Comment: -\\r\n                  IPspace of LIF: -\\r\n  Is Dynamic DNS Update Enabled?: -\\r\n\\r\n                    Vserver Name: svm1\\r\n          Logical Interface Name: ontap-01_fcoe_lif_1\\r\n                            Role: data\\r\n                   Data Protocol: fcp\\r\n                       Home Node: ontap-01\\r\n                       Home Port: 0c\\r\n                    Current Node: ontap-01\\r\n                    Current Port: 0c\\r\n              Operational Status: down\\r\n                 Extended Status: Groovy, man!\\r\n                         Is Home: true\\r\n                 Network Address: -\\r\n                         Netmask: -\\r\n             Bits in the Netmask: -\\r\n                     Subnet Name: -\\r\n           Administrative Status: up\\r\n                 Failover Policy: disabled\\r\n                 Firewall Policy: -\\r\n                     Auto Revert: false\\r\n   Fully Qualified DNS Zone Name: none\\r\n         DNS Query Listen Enable: -\\r\n             Failover Group Name: -\\r\n                        FCP WWPN: 20:01:00:0c:29:bf:c4:d7\\r\n                  Address family: -\\r\n                         Comment: -\\r\n                  IPspace of LIF: -\\r\n  Is Dynamic DNS Update Enabled?: -\\r\n\\r\n                    Vserver Name: svm3\\r\n          Logical Interface Name: ontap-01_iscsi_lif_1\\r\n                            Role: data\\r\n                   Data Protocol: iscsi\\r\n                       Home Node: ontap-01\\r\n                       Home Port: e0a\\r\n                    Current Node: ontap-01\\r\n                    Current Port: e0a\\r\n              Operational Status: up\\r\n                 Extended Status: -\\r\n                         Is Home: true\\r\n                 Network Address: 192.168.159.140\\r\n                         Netmask: 255.255.255.0\\r\n             Bits in the Netmask: 24\\r\n                     Subnet Name: -\\r\n           Administrative Status: up\\r\n                 Failover Policy: disabled\\r\n                 Firewall Policy: data\\r\n                     Auto Revert: false\\r\n   Fully Qualified DNS Zone Name: none\\r\n         DNS Query Listen Enable: false\\r\n             Failover Group Name: -\\r\n                        FCP WWPN: -\\r\n                  Address family: ipv4\\r\n                         Comment: -\\r\n                  IPspace of LIF: Default\\r\n  Is Dynamic DNS Update Enabled?: false\"\"\"\n\nLUN_MAPPING_INFO = \"\"\"          Vserver Name: svm1\\r\n              LUN Path: /vol/lun_1_vol/lun_1\\r\n           Volume Name: lun_1_vol\\r\n            Qtree Name: \"\"\\r\n              LUN Name: lun_1\\r\n           Igroup Name: fcstart1\\r\n        Igroup OS Type: windows\\r\n  Igroup Protocol Type: fcp\\r\n                LUN ID: 123\\r\nPortset Binding Igroup: portgroup\\r\n                  ALUA: true\\r\n            Initiators: 20:00:00:0c:29:bf:c4:d7, 10:00:00:00:c9:d5:b9:6e\\r\n              LUN Node: ontap-01\\r\n       Reporting Nodes: ontap-01\\r\n\\r\n          Vserver Name: svm3\\r\n              LUN Path: /vol/svm3_lun/svm3_lun\\r\n           Volume Name: svm3_lun\\r\n            Qtree Name: \"\"\\r\n              LUN Name: svm3_lun\\r\n           Igroup Name: svm3\\r\n        Igroup OS Type: windows\\r\n  Igroup Protocol Type: iscsi\\r\n                LUN ID: 0\\r\nPortset Binding Igroup: portgroup2\\r\n                  ALUA: true\\r\n            Initiators: iqn.2006-08.com.huawei:21004447dcca426::0\\r\n              LUN Node: ontap-01\\r\n       Reporting Nodes: ontap-01\\r\n2 entries were displayed.\"\"\"\n\nMAPPING_LUN_INFO = \"\"\"              Vserver Name: svm1\\r\n                  LUN Path: /vol/lun_1_vol/lun_1\\r\n               Volume Name: lun_1_vol\\r\n                Qtree Name: \"\"\\r\n                  LUN Name: lun_1\\r\n                  LUN Size: 1.00GB\\r\n                   OS Type: windows_2008\\r\n         Space Reservation: enabled\\r\n             Serial Number: wpEzy]RQjLqN\\r\n       Serial Number (Hex): 7770457a795d52516a4c714e\\r\n                   Comment:\\r\nSpace Reservations Honored: true\\r\n          Space Allocation: disabled\\r\n                     State: online\\r\n                  LUN UUID: 2aa5a7ab-efbe-41f3-a4bf-dcd741e641a1\\r\n                    Mapped: mapped\\r\n          Device Legacy ID: -\\r\n          Device Binary ID: -\\r\n            Device Text ID: -\\r\n                 Read Only: false\\r\n     Fenced Due to Restore: false\\r\n                 Used Size: 0\\r\n       Maximum Resize Size: 502.0GB\\r\n             Creation Time: 9/10/2021 09:57:47\\r\n                     Class: regular\\r\n      Node Hosting the LUN: ontap-01\\r\n          QoS Policy Group: -\\r\n       Caching Policy Name: -\\r\n                     Clone: false\\r\n  Clone Autodelete Enabled: false\\r\n       Inconsistent Import: false\\r\n\\r\n              Vserver Name: svm3\\r\n                  LUN Path: /vol/svm3_lun/svm3_lun\\r\n               Volume Name: svm3_lun\\r\n                Qtree Name: \"\"\\r\n                  LUN Name: svm3_lun\\r\n                  LUN Size: 1.00GB\\r\n                   OS Type: windows_2008\\r\n         Space Reservation: enabled\\r\n             Serial Number: wpEzy]RQjLqA\\r\n       Serial Number (Hex): 7770457a795d52516a4c714e\\r\n                   Comment:\\r\nSpace Reservations Honored: true\\r\n          Space Allocation: disabled\\r\n                     State: online\\r\n                  LUN UUID: 2aa5a7ab-efbe-41f3-a4bf-dcd741e624a1\\r\n                    Mapped: mapped\\r\n          Device Legacy ID: -\\r\n          Device Binary ID: -\\r\n            Device Text ID: -\\r\n                 Read Only: false\\r\n     Fenced Due to Restore: false\\r\n                 Used Size: 0\\r\n       Maximum Resize Size: 502.0GB\\r\n             Creation Time: 9/10/2021 09:57:47\\r\n                     Class: regular\\r\n      Node Hosting the LUN: ontap-01\\r\n          QoS Policy Group: -\\r\n       Caching Policy Name: -\\r\n                     Clone: false\\r\n  Clone Autodelete Enabled: false\\r\n       Inconsistent Import: false\"\"\"\n"
  },
  {
    "path": "delfin/tests/unit/drivers/netapp/netapp_ontap/test_netapp.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom unittest import TestCase, mock\n\nimport paramiko\n\nfrom delfin.tests.unit.drivers.netapp.netapp_ontap import test_constans\nfrom delfin import context\nfrom delfin.drivers.netapp.dataontap.netapp_handler import NetAppHandler\nfrom delfin.drivers.netapp.dataontap.cluster_mode import NetAppCmodeDriver\nfrom delfin.drivers.utils.ssh_client import SSHPool\n\n\nclass Request:\n    def __init__(self):\n        self.environ = {'delfin.context': context.RequestContext()}\n        pass\n\n\nclass TestNetAppCmodeDriver(TestCase):\n    SSHPool.get = mock.Mock(\n        return_value={paramiko.SSHClient()})\n\n    NetAppHandler.login = mock.Mock()\n    NetAppHandler.do_rest_call = mock.Mock()\n    netapp_client = NetAppCmodeDriver(**test_constans.ACCESS_INFO)\n\n    def test_reset_connection(self):\n        kwargs = test_constans.ACCESS_INFO\n        NetAppHandler.login = mock.Mock()\n        netapp_client = NetAppCmodeDriver(**kwargs)\n        netapp_client.reset_connection(context, **kwargs)\n        netapp_client.netapp_handler.do_rest_call = mock.Mock()\n        self.assertEqual(netapp_client.netapp_handler.ssh_pool.ssh_host,\n                         \"192.168.159.130\")\n        self.assertEqual(netapp_client.netapp_handler.ssh_pool.ssh_port, 22)\n\n    def test_get_storage(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.SYSTEM_INFO,\n                         test_constans.VERSION,\n                         test_constans.SYSTEM_STATUS,\n                         test_constans.CONTROLLER_INFO,\n                         test_constans.CONTROLLER_IP_INFO,\n                         test_constans.DISKS_INFO,\n                         test_constans.PHYSICAL_INFO,\n                         test_constans.ERROR_DISK_INFO,\n                         test_constans.POOLS_INFO,\n                         test_constans.AGGREGATE_DETAIL_INFO])\n        data = self.netapp_client.get_storage(context)\n        self.assertEqual(data['vendor'], 'NetApp')\n\n    def test_list_storage_pools(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.POOLS_INFO,\n                         test_constans.AGGREGATE_DETAIL_INFO])\n        data = self.netapp_client.list_storage_pools(context)\n        self.assertEqual(data[0]['name'], 'aggr0')\n\n    def test_list_volumes(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.LUN_INFO,\n                         test_constans.FS_INFO,\n                         test_constans.THIN_FS_INFO,\n                         test_constans.POOLS_INFO,\n                         test_constans.AGGREGATE_DETAIL_INFO])\n        data = self.netapp_client.list_volumes(context)\n        self.assertEqual(data[0]['name'], 'lun_0')\n\n    def test_list_alerts(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.ALERT_INFO])\n        data = self.netapp_client.list_alerts(context)\n        self.assertEqual(data[0]['alert_name'],\n                         'DualPathToDiskShelf_Alert')\n\n    def test_clear_alters(self):\n        alert = {'alert_id': '123'}\n        SSHPool.do_exec = mock.Mock()\n        self.netapp_client.clear_alert(context, alert)\n\n    def test_parse_alert(self):\n        data = self.netapp_client.parse_alert(context, test_constans.TRAP_MAP)\n        self.assertEqual(data['alert_name'], 'DisabledInuseSASPort_Alert')\n\n    def test_list_controllers(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.CONTROLLER_INFO,\n                         test_constans.CONTROLLER_IP_INFO])\n        data = self.netapp_client.list_controllers(context)\n        self.assertEqual(data[0]['name'], 'cl-01')\n\n    def test_list_ports(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.FC_PORT_INFO,\n                         test_constans.PORTS_INFO])\n        data = self.netapp_client.list_ports(context)\n        self.assertEqual(data[0]['name'], 'cl-01:0a')\n\n    def test_list_disks(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.DISKS_INFO,\n                         test_constans.PHYSICAL_INFO,\n                         test_constans.ERROR_DISK_INFO])\n        data = self.netapp_client.list_disks(context)\n        self.assertEqual(data[0]['name'], 'NET-1.1')\n\n    def test_list_qtrees(self):\n        SSHPool.do_exec = mock.Mock(side_effect=[\n            test_constans.QTREES_INFO, test_constans.FS_INFO])\n        data = self.netapp_client.list_qtrees(context)\n        self.assertEqual(data[0]['security_mode'], 'ntfs')\n\n    def test_list_shares(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.QTREES_INFO,\n                         test_constans.FS_INFO,\n                         test_constans.SHARES_AGREEMENT_INFO,\n                         test_constans.SHARE_VSERVER_INFO,\n                         test_constans.SHARES_INFO,\n                         test_constans.NFS_SHARE_INFO])\n        data = self.netapp_client.list_shares(context)\n        self.assertEqual(data[0]['name'], 'admin$')\n\n    def test_list_filesystems(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.FS_INFO,\n                         test_constans.THIN_FS_INFO,\n                         test_constans.POOLS_INFO,\n                         test_constans.AGGREGATE_DETAIL_INFO])\n        data = self.netapp_client.list_filesystems(context)\n        self.assertEqual(data[0]['name'], 'vol0')\n\n    def test_list_quotas(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.QUOTAS_INFO])\n        data = self.netapp_client.list_quotas(context)\n        self.assertEqual(data[0]['file_soft_limit'], 1000)\n\n    def test_ge_alert_sources(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.CLUSTER_IPS_INFO,\n                         test_constans.CONTROLLER_INFO,\n                         test_constans.CONTROLLER_IP_INFO])\n        data = self.netapp_client.get_alert_sources(context)\n        self.assertEqual(data[0]['host'], '8.44.162.245')\n\n    def test_get_storage_performance(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[\n                # storage\n                test_constans.SYSTEM_INFO,\n                # pool\n                test_constans.AGGREGATE_DETAIL_INFO,\n                # volume\n                test_constans.LUN_INFO,\n            ])\n        self.netapp_client.netapp_handler.do_rest_call = mock.Mock(\n            side_effect=[  # storage\n                test_constans.CLUSTER_PER_INFO,\n                # pool\n                test_constans.POOL_PER_INFO,\n                test_constans.POOL_PER_INFO,\n                test_constans.POOL_PER_INFO,\n                # volume\n                test_constans.LUN_PER_INFO,\n                # port\n                test_constans.PORT_REST_INFO,\n                test_constans.FC_PER_INFO,\n                test_constans.PORT_REST_INFO,\n                test_constans.ETH_PER_INFO,\n                # fs\n                test_constans.FS_REST_INFO,\n                test_constans.FS_PER_INFO,\n            ])\n        data = self.netapp_client.collect_perf_metrics(\n            context, test_constans.ACCESS_INFO['storage_id'],\n            test_constans.RESOURCE_METRICS,\n            start_time=str(1435214300000),\n            end_time=str(1495315500000))\n        self.assertEqual(data[0][2][1485343200000], 1000)\n\n    def test_get_capabilities_is_None(self):\n        data = self.netapp_client.get_capabilities(context, None)\n        self.assertEqual(data[9.8]['resource_metrics']['storage']\n                         ['throughput']['unit'], 'MB/s')\n\n    def test_get_capabilities(self):\n        data = self.netapp_client.\\\n            get_capabilities(context,\n                             {'firmware_version': 'NetApp Release 9.8R15'})\n        self.assertEqual(data['resource_metrics']['storage']\n                         ['throughput']['unit'], 'MB/s')\n\n    def test_list_storage_host_initiators(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.ISCSI_INITIATOR_INFO,\n                         test_constans.FC_INITIATOR_INFO,\n                         test_constans.HOSTS_INFO])\n        data = self.netapp_client.list_storage_host_initiators(context)\n        self.assertEqual(data[0]['name'], '20:01:00:0c:29:bf:c4:d7')\n\n    def test_list_port_groups(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.PORT_SET_INFO,\n                         test_constans.LIF_INFO])\n        data = self.netapp_client.list_port_groups(context)\n        self.assertEqual(data['port_groups'][0]['name'], 'portgroup')\n\n    def test_list_storage_hosts(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.HOSTS_INFO])\n        data = self.netapp_client.list_storage_hosts(context)\n        self.assertEqual(data[0]['name'], 'fcstart1')\n\n    def test_list_masking_views(self):\n        SSHPool.do_exec = mock.Mock(\n            side_effect=[test_constans.LUN_MAPPING_INFO,\n                         test_constans.MAPPING_LUN_INFO,\n                         test_constans.HOSTS_INFO])\n        data = self.netapp_client.list_masking_views(context)\n        self.assertEqual(data[0]['name'], 'fcstart1_lun_1')\n\n    def test_get_latest_perf_timestamp(self):\n        self.netapp_client.netapp_handler.do_rest_call = mock.Mock(\n            side_effect=[test_constans.CLUSTER_PER_INFO])\n        data = self.netapp_client.get_latest_perf_timestamp(context)\n        self.assertEqual(data, 1485343200000)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/pure/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/pure/flasharray/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/drivers/pure/flasharray/test_pure_flasharray.py",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n# import time\nfrom unittest import TestCase, mock\n\nimport six\nfrom oslo_log import log\n# from oslo_utils import units\n\nfrom delfin.common import constants\nfrom delfin.drivers.pure.flasharray import consts\n\nsys.modules['delfin.cryptor'] = mock.Mock()\nfrom delfin import context\nfrom delfin.drivers.pure.flasharray.rest_handler import RestHandler\nfrom delfin.drivers.pure.flasharray.pure_flasharray import PureFlashArrayDriver\n\nLOG = log.getLogger(__name__)\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"rest\": {\n        \"host\": \"10.0.0.1\",\n        \"port\": 8443,\n        \"username\": \"user\",\n        \"password\": \"pass\"\n    }\n}\n\nvolumes_info = [\n    {\n        \"total\": 116272464547,\n        \"name\": \"oracl_ail\",\n        \"system\": \"\",\n        \"snapshots\": 0,\n        \"volumes\": 116272464547,\n        \"data_reduction\": 1.82656654775252,\n        \"size\": 2156324555567,\n        \"shared_space\": \"\",\n        \"thin_provisioning\": 0.9225557589632,\n        \"total_reduction\": 18.92245232244555\n    },\n    {\n        \"total\": 0,\n        \"name\": \"wxt1\",\n        \"system\": \"\",\n        \"snapshots\": 0,\n        \"volumes\": 0,\n        \"data_reduction\": 1,\n        \"size\": 1073741824,\n        \"shared_space\": \"\",\n        \"thin_provisioning\": 1,\n        \"total_reduction\": 1\n    }\n]\n\npool_info = [\n    {\n        \"name\": \"lktest\",\n        \"volumes\": [\n            \"oracl_ail\",\n            \"wxt1\",\n            \"lktest/lk301\",\n            \"lktest/lk401\",\n            \"lktest/lk501\",\n        ]\n    },\n    {\n        \"name\": \"ethanTestVG\",\n        \"volumes\": [\n\n        ]\n    }\n]\nvolume_info = {\n    \"created\": \"2016-05-02T20:36:20Z\",\n    \"name\": \"oracl_ail\",\n    \"serial\": \"Fedd3455666y\",\n    \"size\": 1073740124,\n    \"source\": \"\"\n}\nvolume_info_two = {\n    \"created\": \"2016-05-02T20:36:20Z\",\n    \"name\": \"wxt1\",\n    \"serial\": \"Fedd3475666y\",\n    \"size\": 1073740124,\n    \"source\": \"\"\n}\nstorage_info = [\n    {\n        \"parity\": \"0.996586544522471235\",\n        \"provisioned\": \"20869257625600\",\n        \"hostname\": \"FA-m20\",\n        \"system\": 0,\n        \"snapshots\": 0,\n        \"volumes\": 227546215656,\n        \"data_reduction\": 1,\n        \"capacity\": 122276719419392,\n        \"total\": 324829845504,\n        \"shared_space\": 97544451659,\n        \"thin_provisioning\": 0.9526445631455244,\n        \"total_reduction\": 64.152236458789225\n    }\n]\nstorage_id_info = {\n    \"array_name\": \"pure01\",\n    \"id\": \"dlmkk15xcfdf4v5\",\n    \"revision\": \"2016-20-29mfmkkk\",\n    \"version\": \"4.6.7\"\n}\nalerts_info = [\n    {\n        \"category\": \"array\",\n        \"code\": 42,\n        \"actual\": \"\",\n        \"opened\": \"2018-05-12T10:55:21Z\",\n        \"component_type\": \"hardware\",\n        \"event\": \"failure\",\n        \"current_severity\": \"warning\",\n        \"details\": \"\",\n        \"expected\": \"\",\n        \"id\": 135,\n        \"component_name\": \"ct1.eth0\"\n    },\n    {\n        \"category\": \"array\",\n        \"code\": 13,\n        \"actual\": \"\",\n        \"opened\": \"2018-05-12T10:55:21Z\",\n        \"component_type\": \"process\",\n        \"event\": \"server unreachable\",\n        \"current_severity\": \"warning\",\n        \"details\": \"\",\n        \"expected\": \"\",\n        \"id\": 10088786,\n        \"component_name\": \"ct1.ntpd\"\n    }\n]\nparse_alert_info = {\n    '1.3.6.1.2.1.1.3.0': '30007589',\n    '1.3.6.1.4.1.40482.3.7': '2',\n    '1.3.6.1.4.1.40482.3.6': 'server error',\n    '1.3.6.1.4.1.40482.3.3': 'cto',\n    '1.3.6.1.4.1.40482.3.5': 'cto.server error'\n}\ncontrollers_info = [\n    {\n        \"status\": \"ready\",\n        \"name\": \"CT0\",\n        \"version\": \"5.3.0\",\n        \"mode\": \"primary\",\n        \"model\": \"FA-m20r2\",\n        \"type\": \"array_controller\"\n    },\n    {\n        \"status\": \"ready\",\n        \"name\": \"CT1\",\n        \"version\": \"5.3.0\",\n        \"mode\": \"secondary\",\n        \"model\": \"FA-m20r2\",\n        \"type\": \"array_controller\"\n    }\n]\nhardware_info = [\n    {\n        \"details\": \"\",\n        \"identify\": \"off\",\n        \"index\": 0,\n        \"name\": \"CTO.FC1\",\n        \"slot\": \"\",\n        \"speed\": 0,\n        \"status\": \"ok\",\n        \"temperature\": \"\"\n    },\n    {\n        \"details\": \"\",\n        \"identify\": \"\",\n        \"index\": 0,\n        \"name\": \"CTO.ETH15\",\n        \"slot\": 0,\n        \"speed\": 1000000,\n        \"status\": \"ok\",\n        \"temperature\": \"\"\n    }\n]\ndrive_info = [\n    {\n        \"status\": \"healthy\",\n        \"protocol\": \"SAS\",\n        \"name\": \"CH0.BAY1\",\n        \"last_evac_completed\": \"1970-01-01T00:00:00Z\",\n        \"details\": \"\",\n        \"capacity\": 1027895542547,\n        \"type\": \"SSD\",\n        \"last_failure\": \"1970-01-01T00:00:00Z\"\n    },\n    {\n        \"status\": \"healthy\",\n        \"protocol\": \"SAS\",\n        \"name\": \"CH0.BAY2\",\n        \"last_evac_completed\": \"1970-01-01T00:00:00Z\",\n        \"details\": \"\",\n        \"capacity\": 1027895542547,\n        \"type\": \"SSD\",\n        \"last_failure\": \"1970-01-01T00:00:00Z\"\n    },\n    {\n        \"status\": \"healthy\",\n        \"protocol\": \"SAS\",\n        \"name\": \"CH0.BAY3\",\n        \"last_evac_completed\": \"1970-01-01T00:00:00Z\",\n        \"details\": \"\",\n        \"capacity\": 1027895542547,\n        \"type\": \"SSD\",\n        \"last_failure\": \"1970-01-01T00:00:00Z\"\n    }\n]\nport_info = [\n    {\n        \"name\": \"CTO.FC1\",\n        \"failover\": \"\",\n        \"iqn\": \"iqn.2016-11-01.com.pure\",\n        \"portal\": \"100.12.253.23:4563\",\n        \"wwn\": \"43ddff45ggg4rty\",\n        \"nqn\": \"\"\n    },\n    {\n        \"name\": \"CTO.ETH15\",\n        \"failover\": \"\",\n        \"iqn\": \"iqn.2016-11-01.com.pure\",\n        \"portal\": \"100.12.253.23:4563\",\n        \"wwn\": None,\n        \"nqn\": None\n    }\n]\nport_network_info = [\n    {\n        \"name\": \"CTO.FC1\",\n        \"address\": \"45233662jksndj\",\n        \"speed\": 12000,\n        \"netmask\": \"100.12.253.23:4563\",\n        \"wwn\": \"43ddff45ggg4rty\",\n        \"nqn\": None,\n        \"services\": [\n            \"management\"\n        ]\n    },\n    {\n        \"name\": \"CTO.ETH15\",\n        \"address\": \"45233662jksndj\",\n        \"speed\": 13000,\n        \"netmask\": \"100.12.253.23:4563\",\n        \"wwn\": None,\n        \"nqn\": None,\n        \"services\": [\n            \"management\"\n        ]\n    }\n]\npools_info = [\n    {\n        \"total\": \"\",\n        \"name\": \"lktest\",\n        \"snapshots\": \"\",\n        \"volumes\": 0,\n        \"data_reduction\": 1,\n        \"size\": 5632155322,\n        \"thin_provisioning\": 1,\n        \"total_reduction\": 1\n    },\n    {\n        \"total\": \"\",\n        \"name\": \"ethanTestVG\",\n        \"snapshots\": \"\",\n        \"volumes\": 0,\n        \"data_reduction\": 1,\n        \"size\": 5632155322,\n        \"thin_provisioning\": 1,\n        \"total_reduction\": 1\n    }\n]\nreset_connection_info = {\n    \"username\": \"username\",\n    \"status\": 200\n}\nhosts_info = [\n    {\n        \"iqn\": [\n            \"iqn.1996-04.de.suse:01:ca9f3bcaf47\"\n        ],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"host\",\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"wxth\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [\n            \"iqn.1991-05.com.microsoft:win3\"\n        ],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"huhuitest\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"testGroup\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [\n            \"21000024FF2C9524\",\n            \"21000024FF2C9525\"\n        ],\n        \"nqn\": [],\n        \"name\": \"windows223\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [\n            \"10000000C9D5BC06\",\n            \"10000000C9D5BC07\"\n        ],\n        \"nqn\": [],\n        \"name\": \"CL-B06-RH2288HV3-8-44-157-33\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [\n            \"21000024FF76D0CC\",\n            \"21000024FF76D0CD\"\n        ],\n        \"nqn\": [],\n        \"name\": \"CL-C21-RH5885HV3-8-44-165-22\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [\n            \"iqn.1996-04.de.suse:01:66bf70288332\"\n        ],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"test-1s\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"rhev125\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [\n            \"210034800D6E7ADE\",\n            \"210034800D6E7ADF\"\n        ],\n        \"nqn\": [],\n        \"name\": \"QIB\",\n        \"hgroup\": \"QIB\"\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [\n            \"20090002D2937E9F\",\n            \"20190002D2937E9F\"\n        ],\n        \"nqn\": [],\n        \"name\": \"v6-8-44-128-21\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [\n            \"iqn.1994-05.com.redhat:1a9eaa70b558\"\n        ],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"host135\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [\n            \"2200CC05777C3EDF\",\n            \"2210CC05777C3EDF\"\n        ],\n        \"nqn\": [],\n        \"name\": \"zty-doradoV6\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [\n            \"iqn.1994-05.com.redhat:71cfb5b97df\"\n        ],\n        \"wwn\": [\n            \"21000024FF76D0CF\"\n        ],\n        \"nqn\": [],\n        \"name\": \"CL-Test1\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [\n            \"iqn.1994-05.com.redhat:80c412848b94\"\n        ],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"host137\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"hsesxi\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [\n            \"21000024FF40272A\",\n            \"21000024FF40272B\"\n        ],\n        \"nqn\": [],\n        \"name\": \"zty-windows\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"hosttest\",\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [\n            \"21000024FF5351F0\",\n            \"21000024FF5351F1\"\n        ],\n        \"nqn\": [],\n        \"name\": \"hswin41\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"ztj201\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"test123\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [],\n        \"nqn\": [],\n        \"name\": \"zsytest\",\n        \"hgroup\": None\n    },\n    {\n        \"iqn\": [],\n        \"wwn\": [],\n        \"nqn\": [\n            \"nqn.2021-12.org.nvmexpress.mytest\"\n        ],\n        \"name\": \"zhilong-host0000002130\",\n        \"hgroup\": None\n    }\n]\nHOSTS_PERSONALITY_INFO = [\n    {\n        \"name\": \"host\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"wxth\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"huhuitest\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"testGroup\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"windows223\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"CL-B06-RH2288HV3-8-44-157-33\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"CL-C21-RH5885HV3-8-44-165-22\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"test-1s\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"rhev125\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"QIB\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"v6-8-44-128-21\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"host135\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"zty-doradoV6\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"CL-Test1\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"host137\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"hsesxi\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"zty-windows\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"hosttest\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"hswin41\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"ztj201\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"test123\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"zsytest\",\n        \"personality\": None\n    },\n    {\n        \"name\": \"zhilong-host0000002130\",\n        \"personality\": \"aix\"\n    }\n]\nHGROUP_INFO = [\n    {\n        \"hosts\": [],\n        \"name\": \"podgroup\"\n    },\n    {\n        \"hosts\": [],\n        \"name\": \"NewTest\"\n    },\n    {\n        \"hosts\": [\n            \"QIB\"\n        ],\n        \"name\": \"QIB\"\n    },\n    {\n        \"hosts\": [\n            \"host\",\n            \"hosttest\"\n        ],\n        \"name\": \"HGTest\"\n    }\n]\n\nVOLUME_GROUP_INFO = [\n    {\n        \"name\": \"vvol-pure-VM1-072e131e-vg\",\n        \"volumes\": []\n    },\n    {\n        \"name\": \"vvol-pure-vm2-e48a0ef8-vg\",\n        \"volumes\": []\n    },\n    {\n        \"name\": \"vvol-pure-vm3-65d42a4e-vg\",\n        \"volumes\": []\n    },\n    {\n        \"name\": \"vvol-pure-vm4-17c41971-vg\",\n        \"volumes\": []\n    },\n    {\n        \"name\": \"Volume-Group\",\n        \"volumes\": [\n            \"Volume-Group/voltest001\",\n            \"Volume-Group/voltest002\",\n            \"Volume-Group/voltest003\",\n            \"Volume-Group/voltest004\",\n            \"Volume-Group/voltest005\"\n        ]\n    },\n    {\n        \"name\": \"test1\",\n        \"volumes\": []\n    },\n    {\n        \"name\": \"tangxuan\",\n        \"volumes\": []\n    }\n]\nHOSTS_CONNECT_INFO = [\n    {\n        \"vol\": \"huhuitest\",\n        \"name\": \"huhuitest\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"test\",\n        \"name\": \"wxth\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"test\",\n        \"name\": \"testGroup\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"win2016_223\",\n        \"name\": \"windows223\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"pure-protocol-endpoint\",\n        \"name\": \"CL-C21-RH5885HV3-8-44-165-22\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"CL_VOLUME_1_remote\",\n        \"name\": \"CL-C21-RH5885HV3-8-44-165-22\",\n        \"lun\": 2,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"lun-test1s\",\n        \"name\": \"test-1s\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"QIB1\",\n        \"name\": \"QIB\",\n        \"lun\": 254,\n        \"hgroup\": \"QIB\"\n    },\n    {\n        \"vol\": \"QIB1\",\n        \"name\": \"zty-windows\",\n        \"lun\": 254,\n        \"hgroup\": \"QIB\"\n    },\n    {\n        \"vol\": \"QIB2\",\n        \"name\": \"zty-windows\",\n        \"lun\": 253,\n        \"hgroup\": \"QIB\"\n    },\n    {\n        \"vol\": \"QIB2\",\n        \"name\": \"QIB\",\n        \"lun\": 253,\n        \"hgroup\": \"QIB\"\n    },\n    {\n        \"vol\": \"yzw_iotest\",\n        \"name\": \"host135\",\n        \"lun\": 2,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000003\",\n        \"name\": \"host137\",\n        \"lun\": 3,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000009\",\n        \"name\": \"host135\",\n        \"lun\": 3,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000012\",\n        \"name\": \"host135\",\n        \"lun\": 6,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"v6-8-44-128-21\",\n        \"name\": \"v6-8-44-128-21\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"V6-8-44-128-21-002\",\n        \"name\": \"v6-8-44-128-21\",\n        \"lun\": 2,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000007\",\n        \"name\": \"host137\",\n        \"lun\": 7,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000010\",\n        \"name\": \"host135\",\n        \"lun\": 4,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000013\",\n        \"name\": \"host137\",\n        \"lun\": 2,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000000\",\n        \"name\": \"host135\",\n        \"lun\": 5,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000001\",\n        \"name\": \"host137\",\n        \"lun\": 4,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000016\",\n        \"name\": \"host137\",\n        \"lun\": 5,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000018\",\n        \"name\": \"host135\",\n        \"lun\": 7,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000015\",\n        \"name\": \"host135\",\n        \"lun\": 8,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000020\",\n        \"name\": \"host137\",\n        \"lun\": 6,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000021\",\n        \"name\": \"host135\",\n        \"lun\": 9,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000022\",\n        \"name\": \"host137\",\n        \"lun\": 8,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000019\",\n        \"name\": \"host135\",\n        \"lun\": 10,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000026\",\n        \"name\": \"host137\",\n        \"lun\": 9,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000028\",\n        \"name\": \"host135\",\n        \"lun\": 11,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000024\",\n        \"name\": \"host137\",\n        \"lun\": 10,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"hsboot\",\n        \"name\": \"hsesxi\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"hszdata\",\n        \"name\": \"hsesxi\",\n        \"lun\": 2,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun16\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun15\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 2,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun13\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 3,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun11\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 4,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun14\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 5,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun2\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 6,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun5\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 7,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun4\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 8,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun1\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 9,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun3\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 10,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun6\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 11,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun12\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 12,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun10\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 13,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun8\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 14,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun7\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 15,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"zty_lun9\",\n        \"name\": \"zty-doradoV6\",\n        \"lun\": 16,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"Volume-Group/voltest005\",\n        \"name\": \"hosttest\",\n        \"lun\": 254,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"Volume-Group/voltest005\",\n        \"name\": \"host\",\n        \"lun\": 254,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"Volume-Group/voltest001\",\n        \"name\": \"host\",\n        \"lun\": 253,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"Volume-Group/voltest001\",\n        \"name\": \"hosttest\",\n        \"lun\": 253,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"Volume-Group/voltest002\",\n        \"name\": \"host\",\n        \"lun\": 252,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"Volume-Group/voltest002\",\n        \"name\": \"hosttest\",\n        \"lun\": 252,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"Volume-Group/voltest003\",\n        \"name\": \"host\",\n        \"lun\": 251,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"Volume-Group/voltest003\",\n        \"name\": \"hosttest\",\n        \"lun\": 251,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"Volume-Group/voltest004\",\n        \"name\": \"hosttest\",\n        \"lun\": 250,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"Volume-Group/voltest004\",\n        \"name\": \"host\",\n        \"lun\": 250,\n        \"hgroup\": \"HGTest\"\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000001-u\",\n        \"name\": \"CL-B06-RH2288HV3-8-44-157-33\",\n        \"lun\": 4,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"Volume-Group/voltest001\",\n        \"name\": \"CL-B06-RH2288HV3-8-44-157-33\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"hswin4102\",\n        \"name\": \"zhilong-host0000002130\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"tangxuan/tt001\",\n        \"name\": \"host135\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"hswin\",\n        \"name\": \"CL-Test1\",\n        \"lun\": 1,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000000-u\",\n        \"name\": \"zhilong-host0000002130\",\n        \"lun\": 2,\n        \"hgroup\": None\n    },\n    {\n        \"vol\": \"nc::136_connect\",\n        \"name\": \"hosttest\",\n        \"lun\": 1,\n        \"hgroup\": None\n    }\n]\nHGROUP_CONNECT_INFO = [\n    {\n        \"vol\": \"QIB1\",\n        \"name\": \"QIB\",\n        \"lun\": 254\n    },\n    {\n        \"vol\": \"QIB2\",\n        \"name\": \"QIB\",\n        \"lun\": 253\n    },\n    {\n        \"vol\": \"Volume-Group/voltest005\",\n        \"name\": \"HGTest\",\n        \"lun\": 254\n    },\n    {\n        \"vol\": \"Volume-Group/voltest001\",\n        \"name\": \"HGTest\",\n        \"lun\": 253\n    },\n    {\n        \"vol\": \"Volume-Group/voltest002\",\n        \"name\": \"HGTest\",\n        \"lun\": 252\n    },\n    {\n        \"vol\": \"Volume-Group/voltest003\",\n        \"name\": \"HGTest\",\n        \"lun\": 251\n    },\n    {\n        \"vol\": \"Volume-Group/voltest004\",\n        \"name\": \"HGTest\",\n        \"lun\": 250\n    },\n    {\n        \"vol\": \"homelab-pso-db_0000000002\",\n        \"name\": \"NewTest\",\n        \"lun\": 254\n    },\n    {\n        \"vol\": \"yzw_test0\",\n        \"name\": \"zhilong-hg\",\n        \"lun\": 254\n    }\n]\nvolume_data = [\n    {'native_volume_id': 'oracl_ail', 'name': 'oracl_ail',\n     'total_capacity': 2156324555567, 'used_capacity': 116272464547,\n     'free_capacity': 2040052091020, 'storage_id': '12345', 'status': 'normal',\n     'type': 'thin'},\n    {'native_volume_id': 'wxt1', 'name': 'wxt1', 'total_capacity': 1073741824,\n     'used_capacity': 0, 'free_capacity': 1073741824, 'storage_id': '12345',\n     'status': 'normal', 'type': 'thin'}]\nstorage_data = {\n    'model': 'FA-m20r2', 'total_capacity': 122276719419392,\n    'raw_capacity': 3083686627641, 'used_capacity': 324829845504,\n    'free_capacity': 121951889573888, 'vendor': 'PURE', 'name': 'pure01',\n    'serial_number': 'dlmkk15xcfdf4v5', 'firmware_version': '4.6.7',\n    'status': 'normal'}\nlist_alert_data = [\n    {'occur_time': 1526122521000, 'alert_id': 135, 'severity': 'Warning',\n     'category': 'Fault', 'location': 'ct1.eth0', 'type': 'EquipmentAlarm',\n     'resource_type': 'Storage', 'alert_name': 'failure',\n     'match_key': '7f1de29e6da19d22b51c68001e7e0e54',\n     'description': '(hardware:ct1.eth0): failure'},\n    {'occur_time': 1526122521000, 'alert_id': 10088786, 'severity': 'Warning',\n     'category': 'Fault', 'location': 'ct1.ntpd', 'type': 'EquipmentAlarm',\n     'resource_type': 'Storage', 'alert_name': 'server unreachable',\n     'match_key': 'b35a0c63d4cd82256b95f51522c6ba32',\n     'description': '(process:ct1.ntpd): server unreachable'}]\nparse_alert_data = {\n    'alert_id': '30007589', 'severity': 'Informational', 'category': 'Fault',\n    'occur_time': 1644833673861, 'description': '(None:cto): server error',\n    'location': 'cto', 'type': 'EquipmentAlarm', 'resource_type': 'Storage',\n    'alert_name': 'cto.server error', 'sequence_number': '30007589',\n    'match_key': '11214c87bb6efcf8dc2aed1095271774'}\ncontrollers_data = [\n    {'name': 'CT0', 'status': 'unknown', 'soft_version': '5.3.0',\n     'storage_id': '12345', 'native_controller_id': 'CT0', 'location': 'CT0'},\n    {'name': 'CT1', 'status': 'unknown', 'soft_version': '5.3.0',\n     'storage_id': '12345', 'native_controller_id': 'CT1', 'location': 'CT1'}]\ndisk_data = [\n    {'name': 'CH0.BAY1', 'physical_type': 'ssd', 'status': 'normal',\n     'storage_id': '12345', 'capacity': 1027895542547, 'speed': None,\n     'model': None, 'serial_number': None, 'native_disk_id': 'CH0.BAY1',\n     'location': 'CH0.BAY1', 'manufacturer': 'PURE', 'firmware': ''},\n    {'name': 'CH0.BAY2', 'physical_type': 'ssd', 'status': 'normal',\n     'storage_id': '12345', 'capacity': 1027895542547, 'speed': None,\n     'model': None, 'serial_number': None, 'native_disk_id': 'CH0.BAY2',\n     'location': 'CH0.BAY2', 'manufacturer': 'PURE', 'firmware': ''},\n    {'name': 'CH0.BAY3', 'physical_type': 'ssd', 'status': 'normal',\n     'storage_id': '12345', 'capacity': 1027895542547, 'speed': None,\n     'model': None, 'serial_number': None, 'native_disk_id': 'CH0.BAY3',\n     'location': 'CH0.BAY3', 'manufacturer': 'PURE', 'firmware': ''}]\nport_data = [\n    {'type': 'fc', 'name': 'CTO.FC1', 'native_port_id': 'CTO.FC1',\n     'storage_id': '12345', 'location': 'CTO.FC1',\n     'connection_status': 'disconnected', 'speed': 0,\n     'health_status': 'normal', 'wwn': '43:dd:ff:45:gg:g4:rt:y',\n     'mac_address': None, 'logical_type': 'management',\n     'ipv4_mask': '100.12.253.23:4563', 'ipv4': '45233662jksndj'},\n    {'type': 'eth', 'name': 'CTO.ETH15', 'native_port_id': 'CTO.ETH15',\n     'storage_id': '12345', 'location': 'CTO.ETH15',\n     'connection_status': 'connected', 'speed': 1000000,\n     'health_status': 'normal', 'wwn': 'iqn.2016-11-01.com.pure',\n     'mac_address': None, 'logical_type': 'management',\n     'ipv4_mask': '100.12.253.23:4563', 'ipv4': '45233662jksndj'}]\ninitiator_data = [\n    {'native_storage_host_initiator_id': 'iqn.1996-04.de.suse:01:ca9f3bcaf47',\n     'native_storage_host_id': 'host',\n     'name': 'iqn.1996-04.de.suse:01:ca9f3bcaf47', 'type': 'iscsi',\n     'status': 'unknown', 'wwn': 'iqn.1996-04.de.suse:01:ca9f3bcaf47',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': 'iqn.1991-05.com.microsoft:win3',\n     'native_storage_host_id': 'huhuitest',\n     'name': 'iqn.1991-05.com.microsoft:win3', 'type': 'iscsi',\n     'status': 'unknown', 'wwn': 'iqn.1991-05.com.microsoft:win3',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:00:24:FF:2C:95:24',\n     'native_storage_host_id': 'windows223', 'name': '21:00:00:24:FF:2C:95:24',\n     'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:2C:95:24',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:00:24:FF:2C:95:25',\n     'native_storage_host_id': 'windows223', 'name': '21:00:00:24:FF:2C:95:25',\n     'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:2C:95:25',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '10:00:00:00:C9:D5:BC:06',\n     'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33',\n     'name': '10:00:00:00:C9:D5:BC:06', 'type': 'fc', 'status': 'unknown',\n     'wwn': '10:00:00:00:C9:D5:BC:06', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '10:00:00:00:C9:D5:BC:07',\n     'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33',\n     'name': '10:00:00:00:C9:D5:BC:07', 'type': 'fc', 'status': 'unknown',\n     'wwn': '10:00:00:00:C9:D5:BC:07', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:00:24:FF:76:D0:CC',\n     'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22',\n     'name': '21:00:00:24:FF:76:D0:CC', 'type': 'fc', 'status': 'unknown',\n     'wwn': '21:00:00:24:FF:76:D0:CC', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:00:24:FF:76:D0:CD',\n     'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22',\n     'name': '21:00:00:24:FF:76:D0:CD', 'type': 'fc', 'status': 'unknown',\n     'wwn': '21:00:00:24:FF:76:D0:CD', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': 'iqn.1996-04.de.suse:01:66bf70288332',\n     'native_storage_host_id': 'test-1s',\n     'name': 'iqn.1996-04.de.suse:01:66bf70288332', 'type': 'iscsi',\n     'status': 'unknown', 'wwn': 'iqn.1996-04.de.suse:01:66bf70288332',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:34:80:0D:6E:7A:DE',\n     'native_storage_host_id': 'QIB', 'name': '21:00:34:80:0D:6E:7A:DE',\n     'type': 'fc', 'status': 'unknown', 'wwn': '21:00:34:80:0D:6E:7A:DE',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:34:80:0D:6E:7A:DF',\n     'native_storage_host_id': 'QIB', 'name': '21:00:34:80:0D:6E:7A:DF',\n     'type': 'fc', 'status': 'unknown', 'wwn': '21:00:34:80:0D:6E:7A:DF',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '20:09:00:02:D2:93:7E:9F',\n     'native_storage_host_id': 'v6-8-44-128-21',\n     'name': '20:09:00:02:D2:93:7E:9F', 'type': 'fc', 'status': 'unknown',\n     'wwn': '20:09:00:02:D2:93:7E:9F', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '20:19:00:02:D2:93:7E:9F',\n     'native_storage_host_id': 'v6-8-44-128-21',\n     'name': '20:19:00:02:D2:93:7E:9F', 'type': 'fc', 'status': 'unknown',\n     'wwn': '20:19:00:02:D2:93:7E:9F', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': 'iqn.1994-05.com.redhat:1a9eaa70b558',\n     'native_storage_host_id': 'host135',\n     'name': 'iqn.1994-05.com.redhat:1a9eaa70b558', 'type': 'iscsi',\n     'status': 'unknown', 'wwn': 'iqn.1994-05.com.redhat:1a9eaa70b558',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '22:00:CC:05:77:7C:3E:DF',\n     'native_storage_host_id': 'zty-doradoV6',\n     'name': '22:00:CC:05:77:7C:3E:DF', 'type': 'fc', 'status': 'unknown',\n     'wwn': '22:00:CC:05:77:7C:3E:DF', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '22:10:CC:05:77:7C:3E:DF',\n     'native_storage_host_id': 'zty-doradoV6',\n     'name': '22:10:CC:05:77:7C:3E:DF', 'type': 'fc', 'status': 'unknown',\n     'wwn': '22:10:CC:05:77:7C:3E:DF', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': 'iqn.1994-05.com.redhat:71cfb5b97df',\n     'native_storage_host_id': 'CL-Test1',\n     'name': 'iqn.1994-05.com.redhat:71cfb5b97df', 'type': 'iscsi',\n     'status': 'unknown', 'wwn': 'iqn.1994-05.com.redhat:71cfb5b97df',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:00:24:FF:76:D0:CF',\n     'native_storage_host_id': 'CL-Test1', 'name': '21:00:00:24:FF:76:D0:CF',\n     'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:76:D0:CF',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': 'iqn.1994-05.com.redhat:80c412848b94',\n     'native_storage_host_id': 'host137',\n     'name': 'iqn.1994-05.com.redhat:80c412848b94', 'type': 'iscsi',\n     'status': 'unknown', 'wwn': 'iqn.1994-05.com.redhat:80c412848b94',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:00:24:FF:40:27:2A',\n     'native_storage_host_id': 'zty-windows',\n     'name': '21:00:00:24:FF:40:27:2A', 'type': 'fc', 'status': 'unknown',\n     'wwn': '21:00:00:24:FF:40:27:2A', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:00:24:FF:40:27:2B',\n     'native_storage_host_id': 'zty-windows',\n     'name': '21:00:00:24:FF:40:27:2B', 'type': 'fc', 'status': 'unknown',\n     'wwn': '21:00:00:24:FF:40:27:2B', 'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:00:24:FF:53:51:F0',\n     'native_storage_host_id': 'hswin41', 'name': '21:00:00:24:FF:53:51:F0',\n     'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:53:51:F0',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': '21:00:00:24:FF:53:51:F1',\n     'native_storage_host_id': 'hswin41', 'name': '21:00:00:24:FF:53:51:F1',\n     'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:53:51:F1',\n     'storage_id': '12345'},\n    {'native_storage_host_initiator_id': 'nqn.2021-12.org.nvmexpress.mytest',\n     'native_storage_host_id': 'zhilong-host0000002130',\n     'name': 'nqn.2021-12.org.nvmexpress.mytest', 'type': 'nvme-of',\n     'status': 'unknown', 'wwn': 'nqn.2021-12.org.nvmexpress.mytest',\n     'storage_id': '12345'}]\nhost_data = [\n    {'name': 'host', 'storage_id': '12345', 'native_storage_host_id': 'host',\n     'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'wxth', 'storage_id': '12345', 'native_storage_host_id': 'wxth',\n     'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'huhuitest', 'storage_id': '12345',\n     'native_storage_host_id': 'huhuitest', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'testGroup', 'storage_id': '12345',\n                           'native_storage_host_id': 'testGroup',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'windows223', 'storage_id': '12345',\n     'native_storage_host_id': 'windows223', 'os_type': 'Unknown',\n     'status': 'normal'},\n    {'name': 'CL-B06-RH2288HV3-8-44-157-33', 'storage_id': '12345',\n     'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33',\n     'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'CL-C21-RH5885HV3-8-44-165-22', 'storage_id': '12345',\n     'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22',\n     'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'test-1s', 'storage_id': '12345',\n     'native_storage_host_id': 'test-1s', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'rhev125', 'storage_id': '12345',\n                           'native_storage_host_id': 'rhev125',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'QIB', 'storage_id': '12345', 'native_storage_host_id': 'QIB',\n     'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'v6-8-44-128-21', 'storage_id': '12345',\n     'native_storage_host_id': 'v6-8-44-128-21', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'host135', 'storage_id': '12345',\n                           'native_storage_host_id': 'host135',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'zty-doradoV6', 'storage_id': '12345',\n     'native_storage_host_id': 'zty-doradoV6', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'CL-Test1', 'storage_id': '12345',\n                           'native_storage_host_id': 'CL-Test1',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'host137', 'storage_id': '12345',\n     'native_storage_host_id': 'host137', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'hsesxi', 'storage_id': '12345',\n                           'native_storage_host_id': 'hsesxi',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'zty-windows', 'storage_id': '12345',\n     'native_storage_host_id': 'zty-windows', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'hosttest', 'storage_id': '12345',\n                           'native_storage_host_id': 'hosttest',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'hswin41', 'storage_id': '12345',\n     'native_storage_host_id': 'hswin41', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'ztj201', 'storage_id': '12345',\n                           'native_storage_host_id': 'ztj201',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'test123', 'storage_id': '12345',\n     'native_storage_host_id': 'test123', 'os_type': 'Unknown',\n     'status': 'normal'}, {'name': 'zsytest', 'storage_id': '12345',\n                           'native_storage_host_id': 'zsytest',\n                           'os_type': 'Unknown', 'status': 'normal'},\n    {'name': 'zhilong-host0000002130', 'storage_id': '12345',\n     'native_storage_host_id': 'zhilong-host0000002130', 'os_type': 'AIX',\n     'status': 'normal'}]\nhost_group_data = {\n    'storage_host_groups':\n        [\n            {'native_storage_host_group_id': 'podgroup', 'name': 'podgroup',\n             'storage_id': '12345'},\n            {'native_storage_host_group_id': 'NewTest', 'name': 'NewTest',\n             'storage_id': '12345'},\n            {'native_storage_host_group_id': 'QIB', 'name': 'QIB',\n             'storage_id': '12345'},\n            {'native_storage_host_group_id': 'HGTest', 'name': 'HGTest',\n             'storage_id': '12345'}],\n    'storage_host_grp_host_rels': [\n        {'native_storage_host_group_id': 'QIB', 'storage_id': '12345',\n         'native_storage_host_id': 'QIB'},\n        {'native_storage_host_group_id': 'HGTest', 'storage_id': '12345',\n         'native_storage_host_id': 'host'},\n        {'native_storage_host_group_id': 'HGTest', 'storage_id': '12345',\n         'native_storage_host_id': 'hosttest'}]\n}\nvolume_group_data = {\n    'volume_groups':\n        [\n            {'name': 'vvol-pure-VM1-072e131e-vg', 'storage_id': '12345',\n             'native_volume_group_id': 'vvol-pure-VM1-072e131e-vg'},\n            {'name': 'vvol-pure-vm2-e48a0ef8-vg', 'storage_id': '12345',\n             'native_volume_group_id': 'vvol-pure-vm2-e48a0ef8-vg'},\n            {'name': 'vvol-pure-vm3-65d42a4e-vg', 'storage_id': '12345',\n             'native_volume_group_id': 'vvol-pure-vm3-65d42a4e-vg'},\n            {'name': 'vvol-pure-vm4-17c41971-vg', 'storage_id': '12345',\n             'native_volume_group_id': 'vvol-pure-vm4-17c41971-vg'},\n            {'name': 'Volume-Group', 'storage_id': '12345',\n             'native_volume_group_id': 'Volume-Group'},\n            {'name': 'test1', 'storage_id': '12345',\n             'native_volume_group_id': 'test1'},\n            {'name': 'tangxuan', 'storage_id': '12345',\n             'native_volume_group_id': 'tangxuan'}\n        ],\n    'vol_grp_vol_rels': [\n        {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group',\n         'native_volume_id': 'Volume-Group/voltest001'},\n        {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group',\n         'native_volume_id': 'Volume-Group/voltest002'},\n        {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group',\n         'native_volume_id': 'Volume-Group/voltest003'},\n        {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group',\n         'native_volume_id': 'Volume-Group/voltest004'},\n        {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group',\n         'native_volume_id': 'Volume-Group/voltest005'}]\n}\nviews_data = [\n    {'native_masking_view_id': 'QIBQIB1', 'name': 'QIBQIB1',\n     'native_storage_host_group_id': 'QIB', 'native_volume_id': 'QIB1',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'QIBQIB2', 'name': 'QIBQIB2',\n     'native_storage_host_group_id': 'QIB', 'native_volume_id': 'QIB2',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'HGTestVolume-Group/voltest005',\n     'name': 'HGTestVolume-Group/voltest005',\n     'native_storage_host_group_id': 'HGTest',\n     'native_volume_id': 'Volume-Group/voltest005', 'storage_id': '12345'},\n    {'native_masking_view_id': 'HGTestVolume-Group/voltest001',\n     'name': 'HGTestVolume-Group/voltest001',\n     'native_storage_host_group_id': 'HGTest',\n     'native_volume_id': 'Volume-Group/voltest001', 'storage_id': '12345'},\n    {'native_masking_view_id': 'HGTestVolume-Group/voltest002',\n     'name': 'HGTestVolume-Group/voltest002',\n     'native_storage_host_group_id': 'HGTest',\n     'native_volume_id': 'Volume-Group/voltest002', 'storage_id': '12345'},\n    {'native_masking_view_id': 'HGTestVolume-Group/voltest003',\n     'name': 'HGTestVolume-Group/voltest003',\n     'native_storage_host_group_id': 'HGTest',\n     'native_volume_id': 'Volume-Group/voltest003', 'storage_id': '12345'},\n    {'native_masking_view_id': 'HGTestVolume-Group/voltest004',\n     'name': 'HGTestVolume-Group/voltest004',\n     'native_storage_host_group_id': 'HGTest',\n     'native_volume_id': 'Volume-Group/voltest004', 'storage_id': '12345'},\n    {'native_masking_view_id': 'NewTesthomelab-pso-db_0000000002',\n     'name': 'NewTesthomelab-pso-db_0000000002',\n     'native_storage_host_group_id': 'NewTest',\n     'native_volume_id': 'homelab-pso-db_0000000002', 'storage_id': '12345'},\n    {'native_masking_view_id': 'zhilong-hgyzw_test0',\n     'name': 'zhilong-hgyzw_test0',\n     'native_storage_host_group_id': 'zhilong-hg',\n     'native_volume_id': 'yzw_test0', 'storage_id': '12345'},\n    {'native_masking_view_id': 'huhuitestNonehuhuitest',\n     'name': 'huhuitestNonehuhuitest', 'native_storage_host_id': 'huhuitest',\n     'native_volume_id': 'huhuitest', 'storage_id': '12345'},\n    {'native_masking_view_id': 'wxthNonetest', 'name': 'wxthNonetest',\n     'native_storage_host_id': 'wxth', 'native_volume_id': 'test',\n     'storage_id': '12345'}, {'native_masking_view_id': 'testGroupNonetest',\n                              'name': 'testGroupNonetest',\n                              'native_storage_host_id': 'testGroup',\n                              'native_volume_id': 'test',\n                              'storage_id': '12345'},\n    {'native_masking_view_id': 'windows223Nonewin2016_223',\n     'name': 'windows223Nonewin2016_223',\n     'native_storage_host_id': 'windows223', 'native_volume_id': 'win2016_223',\n     'storage_id': '12345'}, {\n        'native_masking_view_id':\n            'CL-C21-RH5885HV3-8-44-165-22Nonepure-protocol-endpoint',\n        'name': 'CL-C21-RH5885HV3-8-44-165-22Nonepure-protocol-endpoint',\n        'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22',\n        'native_volume_id': 'pure-protocol-endpoint', 'storage_id': '12345'}, {\n        'native_masking_view_id':\n            'CL-C21-RH5885HV3-8-44-165-22NoneCL_VOLUME_1_remote',\n        'name': 'CL-C21-RH5885HV3-8-44-165-22NoneCL_VOLUME_1_remote',\n        'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22',\n        'native_volume_id': 'CL_VOLUME_1_remote', 'storage_id': '12345'},\n    {'native_masking_view_id': 'test-1sNonelun-test1s',\n     'name': 'test-1sNonelun-test1s', 'native_storage_host_id': 'test-1s',\n     'native_volume_id': 'lun-test1s', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Noneyzw_iotest',\n     'name': 'host135Noneyzw_iotest', 'native_storage_host_id': 'host135',\n     'native_volume_id': 'yzw_iotest', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000003',\n     'name': 'host137Nonehomelab-pso-db_0000000003',\n     'native_storage_host_id': 'host137',\n     'native_volume_id': 'homelab-pso-db_0000000003', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000009',\n     'name': 'host135Nonehomelab-pso-db_0000000009',\n     'native_storage_host_id': 'host135',\n     'native_volume_id': 'homelab-pso-db_0000000009', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000012',\n     'name': 'host135Nonehomelab-pso-db_0000000012',\n     'native_storage_host_id': 'host135',\n     'native_volume_id': 'homelab-pso-db_0000000012', 'storage_id': '12345'},\n    {'native_masking_view_id': 'v6-8-44-128-21Nonev6-8-44-128-21',\n     'name': 'v6-8-44-128-21Nonev6-8-44-128-21',\n     'native_storage_host_id': 'v6-8-44-128-21',\n     'native_volume_id': 'v6-8-44-128-21', 'storage_id': '12345'},\n    {'native_masking_view_id': 'v6-8-44-128-21NoneV6-8-44-128-21-002',\n     'name': 'v6-8-44-128-21NoneV6-8-44-128-21-002',\n     'native_storage_host_id': 'v6-8-44-128-21',\n     'native_volume_id': 'V6-8-44-128-21-002', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000007',\n     'name': 'host137Nonehomelab-pso-db_0000000007',\n     'native_storage_host_id': 'host137',\n     'native_volume_id': 'homelab-pso-db_0000000007', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000010',\n     'name': 'host135Nonehomelab-pso-db_0000000010',\n     'native_storage_host_id': 'host135',\n     'native_volume_id': 'homelab-pso-db_0000000010', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000013',\n     'name': 'host137Nonehomelab-pso-db_0000000013',\n     'native_storage_host_id': 'host137',\n     'native_volume_id': 'homelab-pso-db_0000000013', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000000',\n     'name': 'host135Nonehomelab-pso-db_0000000000',\n     'native_storage_host_id': 'host135',\n     'native_volume_id': 'homelab-pso-db_0000000000', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000001',\n     'name': 'host137Nonehomelab-pso-db_0000000001',\n     'native_storage_host_id': 'host137',\n     'native_volume_id': 'homelab-pso-db_0000000001', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000016',\n     'name': 'host137Nonehomelab-pso-db_0000000016',\n     'native_storage_host_id': 'host137',\n     'native_volume_id': 'homelab-pso-db_0000000016', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000018',\n     'name': 'host135Nonehomelab-pso-db_0000000018',\n     'native_storage_host_id': 'host135',\n     'native_volume_id': 'homelab-pso-db_0000000018', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000015',\n     'name': 'host135Nonehomelab-pso-db_0000000015',\n     'native_storage_host_id': 'host135',\n     'native_volume_id': 'homelab-pso-db_0000000015', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000020',\n     'name': 'host137Nonehomelab-pso-db_0000000020',\n     'native_storage_host_id': 'host137',\n     'native_volume_id': 'homelab-pso-db_0000000020', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000021',\n     'name': 'host135Nonehomelab-pso-db_0000000021',\n     'native_storage_host_id': 'host135',\n     'native_volume_id': 'homelab-pso-db_0000000021', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000022',\n     'name': 'host137Nonehomelab-pso-db_0000000022',\n     'native_storage_host_id': 'host137',\n     'native_volume_id': 'homelab-pso-db_0000000022', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000019',\n     'name': 'host135Nonehomelab-pso-db_0000000019',\n     'native_storage_host_id': 'host135',\n     'native_volume_id': 'homelab-pso-db_0000000019', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000026',\n     'name': 'host137Nonehomelab-pso-db_0000000026',\n     'native_storage_host_id': 'host137',\n     'native_volume_id': 'homelab-pso-db_0000000026', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000028',\n     'name': 'host135Nonehomelab-pso-db_0000000028',\n     'native_storage_host_id': 'host135',\n     'native_volume_id': 'homelab-pso-db_0000000028', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000024',\n     'name': 'host137Nonehomelab-pso-db_0000000024',\n     'native_storage_host_id': 'host137',\n     'native_volume_id': 'homelab-pso-db_0000000024', 'storage_id': '12345'},\n    {'native_masking_view_id': 'hsesxiNonehsboot', 'name': 'hsesxiNonehsboot',\n     'native_storage_host_id': 'hsesxi', 'native_volume_id': 'hsboot',\n     'storage_id': '12345'}, {'native_masking_view_id': 'hsesxiNonehszdata',\n                              'name': 'hsesxiNonehszdata',\n                              'native_storage_host_id': 'hsesxi',\n                              'native_volume_id': 'hszdata',\n                              'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun16',\n     'name': 'zty-doradoV6Nonezty_lun16',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun16',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun15',\n     'name': 'zty-doradoV6Nonezty_lun15',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun15',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun13',\n     'name': 'zty-doradoV6Nonezty_lun13',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun13',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun11',\n     'name': 'zty-doradoV6Nonezty_lun11',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun11',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun14',\n     'name': 'zty-doradoV6Nonezty_lun14',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun14',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun2',\n     'name': 'zty-doradoV6Nonezty_lun2',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun2',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun5',\n     'name': 'zty-doradoV6Nonezty_lun5',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun5',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun4',\n     'name': 'zty-doradoV6Nonezty_lun4',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun4',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun1',\n     'name': 'zty-doradoV6Nonezty_lun1',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun1',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun3',\n     'name': 'zty-doradoV6Nonezty_lun3',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun3',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun6',\n     'name': 'zty-doradoV6Nonezty_lun6',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun6',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun12',\n     'name': 'zty-doradoV6Nonezty_lun12',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun12',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun10',\n     'name': 'zty-doradoV6Nonezty_lun10',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun10',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun8',\n     'name': 'zty-doradoV6Nonezty_lun8',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun8',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun7',\n     'name': 'zty-doradoV6Nonezty_lun7',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun7',\n     'storage_id': '12345'},\n    {'native_masking_view_id': 'zty-doradoV6Nonezty_lun9',\n     'name': 'zty-doradoV6Nonezty_lun9',\n     'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun9',\n     'storage_id': '12345'}, {\n        'native_masking_view_id':\n            'CL-B06-RH2288HV3-8-44-157-33Nonehomelab-pso-db_0000000001-u',\n        'name': 'CL-B06-RH2288HV3-8-44-157-33Nonehomelab-pso-db_0000000001-u',\n        'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33',\n        'native_volume_id': 'homelab-pso-db_0000000001-u',\n        'storage_id': '12345'}, {\n        'native_masking_view_id':\n            'CL-B06-RH2288HV3-8-44-157-33NoneVolume-Group/voltest001',\n        'name': 'CL-B06-RH2288HV3-8-44-157-33NoneVolume-Group/voltest001',\n        'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33',\n        'native_volume_id': 'Volume-Group/voltest001', 'storage_id': '12345'},\n    {'native_masking_view_id': 'zhilong-host0000002130Nonehswin4102',\n     'name': 'zhilong-host0000002130Nonehswin4102',\n     'native_storage_host_id': 'zhilong-host0000002130',\n     'native_volume_id': 'hswin4102', 'storage_id': '12345'},\n    {'native_masking_view_id': 'host135Nonetangxuan/tt001',\n     'name': 'host135Nonetangxuan/tt001', 'native_storage_host_id': 'host135',\n     'native_volume_id': 'tangxuan/tt001', 'storage_id': '12345'},\n    {'native_masking_view_id': 'CL-Test1Nonehswin',\n     'name': 'CL-Test1Nonehswin', 'native_storage_host_id': 'CL-Test1',\n     'native_volume_id': 'hswin', 'storage_id': '12345'}, {\n        'native_masking_view_id':\n            'zhilong-host0000002130Nonehomelab-pso-db_0000000000-u',\n        'name': 'zhilong-host0000002130Nonehomelab-pso-db_0000000000-u',\n        'native_storage_host_id': 'zhilong-host0000002130',\n        'native_volume_id': 'homelab-pso-db_0000000000-u',\n        'storage_id': '12345'},\n    {'native_masking_view_id': 'hosttestNonenc::136_connect',\n     'name': 'hosttestNonenc::136_connect',\n     'native_storage_host_id': 'hosttest',\n     'native_volume_id': 'nc::136_connect', 'storage_id': '12345'}]\nstorage_resource_metrics = {\n    constants.ResourceType.STORAGE: consts.STORAGE_CAP,\n}\nvolume_resource_metrics = {\n    constants.ResourceType.VOLUME: consts.VOLUME_CAP\n}\ndrive_metrics = [\n    {\n        \"writes_per_sec\": 0,\n        \"output_per_sec\": 0,\n        \"usec_per_write_op\": 0,\n        \"local_queue_usec_per_op\": 0,\n        \"time\": \"2022-04-25T02:24:46Z\",\n        \"reads_per_sec\": 0,\n        \"input_per_sec\": 0,\n        \"usec_per_read_op\": 0,\n        \"queue_depth\": 0\n    }, {\n        \"writes_per_sec\": 1856,\n        \"output_per_sec\": 0,\n        \"usec_per_write_op\": 653021.569741,\n        \"local_queue_usec_per_op\": 43158,\n        \"time\": \"2022-04-25T02:25:46Z\",\n        \"reads_per_sec\": 0,\n        \"input_per_sec\": 0,\n        \"usec_per_read_op\": 5360,\n        \"queue_depth\": 0\n    }]\nvolume_metrics_info = [{\n    \"writes_per_sec\": 1864,\n    \"name\": \"136_connect\",\n    \"usec_per_write_op\": 46200000,\n    \"output_per_sec\": 0,\n    \"reads_per_sec\": 0,\n    \"input_per_sec\": 5620302,\n    \"time\": \"2022-04-12T02:12:16Z\",\n    \"usec_per_read_op\": 0\n}, {\n    \"writes_per_sec\": 1864,\n    \"name\": \"136_connect\",\n    \"usec_per_write_op\": 46200000,\n    \"output_per_sec\": 0,\n    \"reads_per_sec\": 0,\n    \"input_per_sec\": 5620302,\n    \"time\": \"2022-04-12T02:13:16Z\",\n    \"usec_per_read_op\": 0\n}]\n\n\ndef create_driver():\n    RestHandler.login = mock.Mock(\n        return_value={None})\n    return PureFlashArrayDriver(**ACCESS_INFO)\n\n\nclass test_PureFlashArrayDriver(TestCase):\n    driver = create_driver()\n\n    def test_init(self):\n        RestHandler.login = mock.Mock(\n            return_value={\"\"})\n        PureFlashArrayDriver(**ACCESS_INFO)\n\n    def test_list_volumes(self):\n        RestHandler.get_volumes = mock.Mock(\n            side_effect=[volumes_info])\n        volume = self.driver.list_volumes(context)\n        self.assertEqual(volume, volume_data)\n\n    def test_get_storage(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[storage_info, hardware_info, drive_info,\n                         storage_id_info, controllers_info])\n        storage_object = self.driver.get_storage(context)\n        self.assertEqual(storage_object, storage_data)\n\n    def test_list_alerts(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[alerts_info])\n        list_alerts = self.driver.list_alerts(context)\n        self.assertEqual(list_alerts, list_alert_data)\n\n    def test_parse_alert(self):\n        parse_alert = self.driver.parse_alert(context, parse_alert_info)\n        parse_alert_data['occur_time'] = parse_alert.get('occur_time')\n        self.assertDictEqual(parse_alert, parse_alert_data)\n\n    def test_list_controllers(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[controllers_info, hardware_info])\n        list_controllers = self.driver.list_controllers(context)\n        self.assertListEqual(list_controllers, controllers_data)\n\n    def test_list_disks(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[hardware_info, drive_info])\n        list_disks = self.driver.list_disks(context)\n        self.assertListEqual(list_disks, disk_data)\n\n    def test_list_ports(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[port_network_info, port_info, hardware_info])\n        list_ports = self.driver.list_ports(context)\n        self.assertListEqual(list_ports, port_data)\n\n    def test_list_storage_pools(self):\n        list_storage_pools = self.driver.list_storage_pools(context)\n        self.assertEqual(list_storage_pools, [])\n\n    def test_reset_connection(self):\n        RestHandler.logout = mock.Mock(side_effect=None)\n        RestHandler.login = mock.Mock(side_effect=None)\n        username = None\n        try:\n            self.driver.reset_connection(context)\n        except Exception as e:\n            LOG.error(\"test_reset_connection error: %s\", six.text_type(e))\n            username = reset_connection_info.get('username')\n        self.assertEqual(username, None)\n\n    def test_list_storage_host_initiators(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[hosts_info])\n        hosts = self.driver.list_storage_host_initiators(context)\n        self.assertEqual(hosts, initiator_data)\n\n    def test_list_storage_hosts(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[HOSTS_PERSONALITY_INFO])\n        hosts = self.driver.list_storage_hosts(context)\n        self.assertListEqual(hosts, host_data)\n\n    def test_list_storage_host_groups(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[HGROUP_INFO])\n        hgroup = self.driver.list_storage_host_groups(context)\n        self.assertDictEqual(hgroup, host_group_data)\n\n    def test_list_volume_groups(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[VOLUME_GROUP_INFO])\n        v_group = self.driver.list_volume_groups(context)\n        self.assertDictEqual(v_group, volume_group_data)\n\n    def test_list_masking_views(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[HGROUP_CONNECT_INFO, HOSTS_CONNECT_INFO])\n        views = self.driver.list_masking_views(context)\n        self.assertListEqual(views, views_data)\n\n    # def test_collect_perf_metrics(self):\n    #     RestHandler.rest_call = mock.Mock(\n    #         side_effect=[storage_id_info, drive_metrics])\n    #     localtime = time.mktime(time.localtime()) * units.k\n    #     storage_id = 12345\n    #     start_time = localtime - 1000 * 60 * 60 * 24 * 364\n    #     end_time = localtime\n    #     metrics = self.driver.collect_perf_metrics(\n    #         context, storage_id, storage_resource_metrics, start_time,\n    #         end_time)\n    #     storage_metrics = [\n    #         constants.metric_struct(\n    #             name='iops',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'storage',\n    #                 'resource_id': 'dlmkk15xcfdf4v5',\n    #                 'resource_name': 'pure01',\n    #                 'type': 'RAW',\n    #                 'unit': 'IOPS'},\n    #             values={1650853440000: 0, 1650853500000: 1856}\n    #         ),\n    #         constants.metric_struct(\n    #             name='readIops',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'storage',\n    #                 'resource_id': 'dlmkk15xcfdf4v5',\n    #                 'resource_name': 'pure01',\n    #                 'type': 'RAW',\n    #                 'unit': 'IOPS'},\n    #             values={1650853440000: 0, 1650853500000: 0}\n    #         ),\n    #         constants.metric_struct(\n    #             name='writeIops',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'storage',\n    #                 'resource_id': 'dlmkk15xcfdf4v5',\n    #                 'resource_name': 'pure01',\n    #                 'type': 'RAW',\n    #                 'unit': 'IOPS'},\n    #             values={1650853440000: 0, 1650853500000: 1856}\n    #         ),\n    #         constants.metric_struct(\n    #             name='throughput',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'storage',\n    #                 'resource_id': 'dlmkk15xcfdf4v5',\n    #                 'resource_name': 'pure01',\n    #                 'type': 'RAW',\n    #                 'unit': 'MB/s'},\n    #             values={1650853440000: 0.0, 1650853500000: 0.0}\n    #         ),\n    #         constants.metric_struct(\n    #             name='readThroughput',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'storage',\n    #                 'resource_id': 'dlmkk15xcfdf4v5',\n    #                 'resource_name': 'pure01',\n    #                 'type': 'RAW',\n    #                 'unit': 'MB/s'},\n    #             values={1650853440000: 0.0, 1650853500000: 0.0}\n    #         ),\n    #         constants.metric_struct(\n    #             name='writeThroughput',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'storage',\n    #                 'resource_id': 'dlmkk15xcfdf4v5',\n    #                 'resource_name': 'pure01',\n    #                 'type': 'RAW',\n    #                 'unit': 'MB/s'},\n    #             values={1650853440000: 0.0, 1650853500000: 0.0}\n    #         ),\n    #         constants.metric_struct(\n    #             name='readResponseTime',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'storage',\n    #                 'resource_id': 'dlmkk15xcfdf4v5',\n    #                 'resource_name': 'pure01',\n    #                 'type': 'RAW',\n    #                 'unit': 'ms'},\n    #             values={1650853440000: 0.0, 1650853500000: 5.36}\n    #         ),\n    #         constants.metric_struct(\n    #             name='writeResponseTime',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'storage',\n    #                 'resource_id': 'dlmkk15xcfdf4v5',\n    #                 'resource_name': 'pure01',\n    #                 'type': 'RAW',\n    #                 'unit': 'ms'},\n    #             values={1650853440000: 0.0, 1650853500000: 653.022}\n    #         )\n    #     ]\n    #     self.assertListEqual(metrics, storage_metrics)\n    #     volume_metrics = [\n    #         constants.metric_struct(\n    #             name='iops',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'volume',\n    #                 'resource_id': '136_connect',\n    #                 'resource_name': '136_connect',\n    #                 'type': 'RAW',\n    #                 'unit': 'IOPS'},\n    #             values={1649729520000: 1864, 1649729580000: 1864}\n    #         ),\n    #         constants.metric_struct(\n    #             name='readIops',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'volume',\n    #                 'resource_id': '136_connect',\n    #                 'resource_name': '136_connect',\n    #                 'type': 'RAW',\n    #                 'unit': 'IOPS'},\n    #             values={1649729520000: 0, 1649729580000: 0}\n    #         ),\n    #         constants.metric_struct(\n    #             name='writeIops',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'volume',\n    #                 'resource_id': '136_connect',\n    #                 'resource_name': '136_connect',\n    #                 'type': 'RAW',\n    #                 'unit': 'IOPS'},\n    #             values={1649729520000: 1864, 1649729580000: 1864}\n    #         ),\n    #         constants.metric_struct(\n    #             name='throughput',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'volume',\n    #                 'resource_id': '136_connect',\n    #                 'resource_name': '136_connect',\n    #                 'type': 'RAW',\n    #                 'unit': 'MB/s'},\n    #             values={1649729520000: 5.36, 1649729580000: 5.36}\n    #         ),\n    #         constants.metric_struct(\n    #             name='readThroughput',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'volume',\n    #                 'resource_id': '136_connect',\n    #                 'resource_name': '136_connect',\n    #                 'type': 'RAW',\n    #                 'unit': 'MB/s'},\n    #             values={1649729520000: 0.0, 1649729580000: 0.0}\n    #         ),\n    #         constants.metric_struct(\n    #             name='writeThroughput',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'volume',\n    #                 'resource_id': '136_connect',\n    #                 'resource_name': '136_connect',\n    #                 'type': 'RAW',\n    #                 'unit': 'MB/s'},\n    #             values={1649729520000: 5.36, 1649729580000: 5.36}\n    #         ),\n    #         constants.metric_struct(\n    #             name='readResponseTime',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'volume',\n    #                 'resource_id': '136_connect',\n    #                 'resource_name': '136_connect',\n    #                 'type': 'RAW',\n    #                 'unit': 'ms'},\n    #             values={1649729520000: 0.0, 1649729580000: 0.0}\n    #         ),\n    #         constants.metric_struct(\n    #             name='writeResponseTime',\n    #             labels={\n    #                 'storage_id': 12345,\n    #                 'resource_type': 'volume',\n    #                 'resource_id': '136_connect',\n    #                 'resource_name': '136_connect',\n    #                 'type': 'RAW',\n    #                 'unit': 'ms'},\n    #             values={1649729520000: 46200.0, 1649729580000: 46200.0}\n    #         )\n    #     ]\n    #     RestHandler.rest_call = mock.Mock(\n    #         side_effect=[volume_metrics_info])\n    #     metrics = self.driver.collect_perf_metrics(\n    #         context, storage_id, volume_resource_metrics, start_time,\n    #         end_time)\n    #     self.assertListEqual(metrics, volume_metrics)\n\n    def test_get_capabilities(self):\n        err = None\n        try:\n            self.driver.get_capabilities(context)\n        except Exception as e:\n            err = six.text_type(e)\n            LOG.error(\"test_get_capabilities error: %s\", err)\n        self.assertEqual(err, None)\n\n    def test_get_latest_perf_timestamp(self):\n        RestHandler.rest_call = mock.Mock(\n            side_effect=[drive_metrics])\n        timestamp = self.driver.get_latest_perf_timestamp(context)\n        times = 1650853500000\n        self.assertEqual(timestamp, times)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/test_api.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport copy\nfrom unittest import TestCase, mock\n\nimport sys\n\nfrom delfin import context\n# from delfin import exception\nfrom delfin.common import config, constants  # noqa\nfrom delfin.drivers.api import API\nfrom delfin.drivers.fake_storage import FakeStorageDriver\n\nsys.modules['delfin.cryptor'] = mock.Mock()\n\n\nclass Request:\n    def __init__(self):\n        self.environ = {'delfin.context': context.RequestContext()}\n        pass\n\n\nACCESS_INFO = {\n    \"storage_id\": \"12345\",\n    \"vendor\": \"fake_storage\",\n    \"model\": \"fake_driver\",\n    \"rest\": {\n        \"host\": \"10.0.0.1\",\n        \"port\": \"8443\",\n        \"username\": \"user\",\n        \"password\": \"pass\"\n    },\n    \"extra_attributes\": {\n        \"array_id\": \"00112233\"\n    }\n}\n\nSTORAGE = {\n    'name': 'fake_driver',\n    'description': 'it is a fake driver.',\n    'vendor': 'fake_vendor',\n    'model': 'fake_model',\n    'status': 'normal',\n    'serial_number': '2102453JPN12KA000011',\n    'firmware_version': '1.0.0',\n    'location': 'HK',\n    'total_capacity': 1024 * 1024,\n    'used_capacity': 3126,\n    'free_capacity': 1045449,\n}\n\n\nclass TestDriverAPI(TestCase):\n\n    def test_init(self):\n        api = API()\n        self.assertIsNotNone(api.driver_manager)\n\n    # @mock.patch('delfin.db.storage_get')\n    # @mock.patch('delfin.db.storage_create')\n    # @mock.patch('delfin.db.access_info_create')\n    # @mock.patch('delfin.db.storage_get_all')\n    # def test_discover_storage(self, mock_storage, mock_access_info,\n    #                           mock_storage_create, mock_get_storage):\n    #     # Case: Positive scenario for fake driver discovery\n    #     storage = copy.deepcopy(STORAGE)\n    #     storage['id'] = '12345'\n    #     mock_storage.return_value = None\n    #     mock_access_info.return_value = ACCESS_INFO\n    #     mock_storage_create.return_value = storage\n    #     api = API()\n    #     api.discover_storage(context, ACCESS_INFO)\n    #     mock_storage.assert_called()\n    #     mock_access_info.assert_called_with(context, ACCESS_INFO)\n    #     mock_storage_create.assert_called()\n    #     mock_get_storage.return_value = None\n\n    #     # Case: Register already existing storage\n    #     with self.assertRaises(exception.StorageAlreadyExists) as exc:\n    #         mock_storage.return_value = storage\n    #         api.discover_storage(context, ACCESS_INFO)\n    #     self.assertIn('Storage already exists', str(exc.exception))\n    #     mock_storage.return_value = None\n\n    #     # Case: Storage without serial number\n    #     wrong_storage = copy.deepcopy(STORAGE)\n    #     wrong_storage.pop('serial_number')\n    #     wrong_storage['id'] = '12345'\n    #     m = mock.Mock()\n    #     with mock.patch.object(FakeStorageDriver, 'get_storage') as m:\n    #         with self.assertRaises(exception.InvalidResults) as exc:\n    #             m.return_value = wrong_storage\n    #             api.discover_storage(context, ACCESS_INFO)\n    #         self.assertIn('Serial number should be provided by storage',\n    #                       str(exc.exception))\n\n    #         # Case: No Storage found\n    #         with self.assertRaises(exception.StorageBackendNotFound) as exc:\n    #             m.return_value = None\n    #             api.discover_storage(context, ACCESS_INFO)\n    #         self.assertIn('Storage backend could not be found',\n    #                       str(exc.exception))\n\n    #     # Case: Test access info without 'storage_id' for driver\n    #     test_access_info = copy.deepcopy(ACCESS_INFO)\n    #     test_access_info.pop('storage_id')\n\n    #     s = api.discover_storage(context, ACCESS_INFO)\n    #     self.assertDictEqual(s, storage)\n\n    #     # Case: Wrong access info (model) for driver\n    #     wrong_access_info = copy.deepcopy(ACCESS_INFO)\n    #     wrong_access_info['model'] = 'wrong_model'\n    #     with self.assertRaises(exception.StorageDriverNotFound) as exc:\n    #         api.discover_storage(context, wrong_access_info)\n\n    #     msg = \"Storage driver 'fake_storage wrong_model'could not be found\"\n    #     self.assertIn(msg, str(exc.exception))\n\n    # @mock.patch.object(FakeStorageDriver, 'get_storage')\n    # @mock.patch('delfin.db.storage_update')\n    # @mock.patch('delfin.db.access_info_update')\n    # @mock.patch('delfin.db.storage_get')\n    # def test_update_access_info(self, mock_storage_get,\n    #                             mock_access_info_update,\n    #                             mock_storage_update,\n    #                             mock_storage):\n    #     storage = copy.deepcopy(STORAGE)\n    #     access_info_new = copy.deepcopy(ACCESS_INFO)\n    #     access_info_new['rest']['username'] = 'new_user'\n\n    #     mock_storage_get.return_value = storage\n    #     mock_access_info_update.return_value = access_info_new\n    #     mock_storage_update.return_value = None\n    #     mock_storage.return_value = storage\n    #     api = API()\n    #     updated = api.update_access_info(context, access_info_new)\n\n    #     storage_id = '12345'\n    #     mock_storage_get.assert_called_with(\n    #         context, storage_id)\n\n    #     mock_access_info_update.assert_called_with(\n    #         context, storage_id, access_info_new)\n\n    #     mock_storage_update.assert_called_with(\n    #         context, storage_id, storage)\n\n    #     access_info_new['rest']['password'] = \"cGFzc3dvcmQ=\"\n    #     self.assertDictEqual(access_info_new, updated)\n\n    #     # Case: Wrong storage serial number\n    #     wrong_storage = copy.deepcopy(STORAGE)\n    #     wrong_storage['serial_number'] = '00000'\n    #     mock_storage.return_value = wrong_storage\n    #     with self.assertRaises(exception.StorageSerialNumberMismatch) as exc:\n    #         api.update_access_info(context, access_info_new)\n\n    #     msg = \"Serial number 00000 does not match \" \\\n    #           \"the existing storage serial number\"\n    #     self.assertIn(msg, str(exc.exception))\n\n    #     # Case: No storage serial number\n    #     wrong_storage.pop('serial_number')\n    #     mock_storage.return_value = wrong_storage\n    #     with self.assertRaises(exception.InvalidResults) as exc:\n    #         api.update_access_info(context, access_info_new)\n\n    #     msg = \"Serial number should be provided by storage\"\n    #     self.assertIn(msg, str(exc.exception))\n\n    #     # Case: No storage\n    #     mock_storage.return_value = None\n    #     with self.assertRaises(exception.StorageBackendNotFound) as exc:\n    #         api.update_access_info(context, access_info_new)\n\n    #     msg = \"Storage backend could not be found\"\n    #     self.assertIn(msg, str(exc.exception))\n\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    @mock.patch('delfin.db.storage_get')\n    @mock.patch('delfin.db.storage_create')\n    @mock.patch('delfin.db.access_info_create')\n    @mock.patch('delfin.db.storage_get_all')\n    def test_remove_storage(self, mock_storage, mock_access_info,\n                            mock_storage_create, mock_get_storage,\n                            mock_dm):\n        storage = copy.deepcopy(STORAGE)\n        storage['id'] = '12345'\n        mock_storage.return_value = None\n        mock_access_info.return_value = ACCESS_INFO\n        mock_storage_create.return_value = storage\n        api = API()\n        api.discover_storage(context, ACCESS_INFO)\n        mock_get_storage.return_value = None\n        mock_dm.return_value = FakeStorageDriver()\n\n        storage_id = '12345'\n\n        # Verify that driver instance not added to factory\n        driver = api.driver_manager.driver_factory.get(storage_id, None)\n        self.assertIsNone(driver)\n\n        api.remove_storage(context, storage_id)\n\n        driver = api.driver_manager.driver_factory.get(storage_id, None)\n        self.assertIsNone(driver)\n\n    @mock.patch.object(FakeStorageDriver, 'get_storage')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_get_storage(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        storage = copy.deepcopy(STORAGE)\n        storage['id'] = '12345'\n        mock_fake.return_value = storage\n        api = API()\n\n        storage_id = '12345'\n\n        api.get_storage(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called()\n\n    @mock.patch.object(FakeStorageDriver, 'list_storage_pools')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_storage_pools(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n\n        storage_id = '12345'\n\n        api.list_storage_pools(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_volumes')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_volumes(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_volumes(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_controllers')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_controllers(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_controllers(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_disks')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_disks(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_disks(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_ports')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_ports(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_ports(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_filesystems')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_filesystems(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_filesystems(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_qtrees')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_qtrees(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_qtrees(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_shares')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_shares(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_shares(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'parse_alert')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    @mock.patch('delfin.db.access_info_get')\n    def test_parse_alert(self, mock_access_info,\n                         driver_manager, mock_fake):\n        mock_access_info.return_value = ACCESS_INFO\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n\n        storage_id = '12345'\n\n        api.parse_alert(context, storage_id, 'alert')\n        mock_access_info.assert_called_once()\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_get_capabilities(self, driver_manager):\n        driver_manager.return_value = FakeStorageDriver()\n        storage_id = '12345'\n        capabilities = API().get_capabilities(context, storage_id)\n\n        self.assertTrue('resource_metrics' in capabilities)\n        driver_manager.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_storage_host_initiators')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_storage_host_initiators(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_storage_host_initiators(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_storage_hosts')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_storage_hosts(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_storage_hosts(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_storage_host_groups')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_storage_host_groups(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_storage_host_groups(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_port_groups')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_port_groups(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_port_groups(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_volume_groups')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_volume_groups(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_volume_groups(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch.object(FakeStorageDriver, 'list_masking_views')\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_list_masking_views(self, driver_manager, mock_fake):\n        driver_manager.return_value = FakeStorageDriver()\n        mock_fake.return_value = []\n        api = API()\n        storage_id = '12345'\n\n        api.list_masking_views(context, storage_id)\n        driver_manager.assert_called_once()\n        mock_fake.assert_called_once()\n\n    @mock.patch('delfin.drivers.manager.DriverManager.get_driver')\n    def test_collect_perf_metrics(self, driver_manager):\n        driver_manager.return_value = FakeStorageDriver()\n        storage_id = '12345'\n        capabilities = API().get_capabilities(context, storage_id)\n\n        metrics = API().collect_perf_metrics(context, storage_id,\n                                             capabilities['resource_metrics'],\n                                             1622808000000, 1622808000001)\n        self.assertTrue('resource_metrics' in capabilities)\n        self.assertTrue(True, isinstance(metrics[0], constants.metric_struct))\n        self.assertEqual(driver_manager.call_count, 2)\n"
  },
  {
    "path": "delfin/tests/unit/drivers/test_manager.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nfrom unittest import TestCase, mock\nfrom delfin.common import config # noqa\nfrom delfin.drivers.manager import DriverManager\n\nsys.modules['delfin.cryptor'] = mock.Mock()\n\n\nclass TestDriverManager(TestCase):\n\n    def test_init(self):\n        manager = DriverManager()\n        self.assertIsNotNone(manager.driver_factory)\n"
  },
  {
    "path": "delfin/tests/unit/exporter/prometheus/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/exporter/prometheus/test_prometheus.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nfrom unittest import TestCase\n\nfrom delfin.exporter.prometheus import prometheus\nfrom delfin.common.constants import metric_struct\n\nfake_metrics = [metric_struct(name='throughput',\n                              labels={'storage_id': '12345',\n                                      'resource_type': 'storage',\n                                      'resource_id': 'storage0',\n                                      'type': 'RAW', 'unit': 'MB/s'},\n                              values={1622808000000: 61.9388895680357})]\n\n\nclass TestPrometheusExporter(TestCase):\n\n    def test_push_to_prometheus(self):\n        prometheus_obj = prometheus.PrometheusExporter()\n        prometheus_obj.metrics_dir = os.getcwd()\n        prometheus_obj.push_to_prometheus(fake_metrics)\n        self.assertTrue(glob.glob(prometheus_obj.metrics_dir + '/' + '*.prom'))\n"
  },
  {
    "path": "delfin/tests/unit/fake_data.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom delfin.db.sqlalchemy import models\n\n\ndef fake_storage_pool_create():\n    fake_storage_pools = [models.StoragePool(), models.StoragePool()]\n\n    fake_storage_pools[0] = {'id': '14155a1f-f053-4ccb-a846-ed67e4387428',\n                             'storage_id': '12c2d52f-01bc-41f5-b73f'\n                                           '-7abf6f38a2a6',\n                             'name': 'SRP_1',\n                             'status': 'normal',\n                             'created_at': '2020-06-10T07:17:08.707356',\n                             'updated_at': '2020-06-10T07:17:08.707356',\n                             'native_storage_pool_id': 'SRP_1',\n                             'storage_type': 'block',\n                             'total_capacity': 26300318136401,\n                             'used_capacity': 19054536509358,\n                             'free_capacity': 7245781627043,\n                             'subscribed_capacity': 219902325555200,\n                             \"description\": \"fake storage Pool\", }\n\n    fake_storage_pools[1] = {'id': \"95f7b7ed-bd7f-426e-b05f-f1ffeb4f09df\",\n                             'storage_id': \"84d0c5f7-2349-401c-8672\"\n                                           \"-f76214d13cab\",\n                             'name': \"SRP_2\",\n                             'status': \"normal\",\n                             'created_at': \"2020-06-10T07:17:08.707356\",\n                             'updated_at': \"2020-06-10T07:17:08.707356\",\n                             'native_storage_pool_id': \"SRP_2\",\n                             'extra': \"extra attrib\",  # invalid key\n                             'storage_type': \"block\",\n                             'total_capacity': 26300318136401,\n                             'used_capacity': 19054536509358,\n                             'free_capacity': 7245781627043,\n                             'subscribed_capacity': 219902325555200,\n                             'description': \"fake storage Pool\", }\n\n    return fake_storage_pools\n\n\ndef fake_expected_storage_pool_create():\n    expected = [\n        {\n            \"created_at\": \"2020-06-10T07:17:08.707356\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n            \"name\": \"SRP_1\",\n            \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n            \"native_storage_pool_id\": \"SRP_1\",\n            \"description\": \"fake storage Pool\",\n            \"status\": \"normal\",\n            \"storage_type\": \"block\",\n            \"total_capacity\": 26300318136401,\n            \"used_capacity\": 19054536509358,\n            \"free_capacity\": 7245781627043,\n            'subscribed_capacity': 219902325555200\n        },\n        {\n            \"created_at\": \"2020-06-10T07:17:08.707359\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"95f7b7ed-bd7f-426e-b05f-f1ffeb4f09df\",\n            \"name\": \"SRP_2\",\n            \"storage_id\": '84d0c5f7-2349-401c-8672-f76214d13cab',\n            \"native_storage_pool_id\": \"SRP_2\",\n            \"description\": \"fake storage Pool\",\n            \"status\": \"normal\",\n            \"storage_type\": \"block\",\n            \"total_capacity\": 26300318136401,\n            \"used_capacity\": 19054536509358,\n            \"free_capacity\": 7245781627043,\n            'subscribed_capacity': 219902325555200,\n        }\n    ]\n    return expected\n\n\ndef fake_storage_host_initiator_create():\n    fake_storage_host_initiators = [models.StorageHostInitiator()]\n\n    fake_storage_host_initiators[0] = {\n        'id': '14155a1f-f053-4ccb-a846-ed67e4387428',\n        'storage_id': '12c2d52f-01bc-41f5-b73f'\n                      '-7abf6f38a2a6',\n        'name': 'storage_host_initiator_1',\n        'description': 'storage_host_initiator_1',\n        'alias': 'storage_host_initiator_1',\n        'status': 'normal',\n        'created_at': '2020-06-10T07:17:08.707356',\n        'updated_at': '2020-06-10T07:17:08.707356',\n        'native_storage_host_initiator_id': 'storage_host_initiator_1',\n        'native_storage_host_id': 'storage_host_1',\n        'wwn': 'wwn1', 'type': 'fc'}\n    return fake_storage_host_initiators\n\n\ndef fake_expected_storage_host_initiator_create():\n    expected = [\n        {\n            \"created_at\": \"2020-06-10T07:17:08.707356\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n            \"name\": \"storage_host_initiator_1\",\n            \"description\": \"storage_host_initiator_1\",\n            \"alias\": \"storage_host_initiator_1\",\n            \"status\": \"normal\",\n            \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n            \"native_storage_host_initiator_id\": \"storage_host_initiator_1\",\n            \"native_storage_host_id\": \"storage_host_1\",\n            \"wwn\": \"wwn1\",\n            \"type\": \"fc\"\n\n        },\n    ]\n    return expected\n\n\ndef fake_storage_host_create():\n    fake_storage_hosts = [models.StorageHost()]\n\n    fake_storage_hosts[0] = {\n        'id': '14155a1f-f053-4ccb-a846-ed67e4387428',\n        'storage_id': '12c2d52f-01bc-41f5-b73f'\n                      '-7abf6f38a2a6',\n        'name': 'storage_host_1',\n        'description': 'storage_host_1',\n        'ip_address': '1.2.3.4',\n        'status': 'normal',\n        'os_type': 'linux',\n        'created_at': '2020-06-10T07:17:08.707356',\n        'updated_at': '2020-06-10T07:17:08.707356',\n        'native_storage_host_id': 'storage_host_1', }\n    return fake_storage_hosts\n\n\ndef fake_expected_storage_host_create():\n    expected = [\n        {\n            \"created_at\": \"2020-06-10T07:17:08.707356\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n            \"name\": \"storage_host_1\",\n            \"description\": \"storage_host_1\",\n            \"ip_address\": \"1.2.3.4\",\n            \"os_type\": \"linux\",\n            \"status\": \"normal\",\n            \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n            \"native_storage_host_id\": \"storage_host_1\",\n        },\n    ]\n    return expected\n\n\ndef fake_storage_host_group_create():\n    fake_storage_host_groups = [models.StorageHostGroup()]\n\n    fake_storage_host_groups[0] = {\n        'id': '14155a1f-f053-4ccb-a846-ed67e4387428',\n        'storage_id': '12c2d52f-01bc-41f5-b73f'\n                      '-7abf6f38a2a6',\n        'name': 'storage_host_group_1',\n        'description': 'storage_host_group_1',\n        'created_at': '2020-06-10T07:17:08.707356',\n        'updated_at': '2020-06-10T07:17:08.707356',\n        'native_storage_host_group_id': 'storage_host_group_1', }\n    return fake_storage_host_groups\n\n\ndef fake_expected_storage_host_group_create():\n    expected = [\n        {\n            \"created_at\": \"2020-06-10T07:17:08.707356\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n            \"name\": \"storage_host_group_1\",\n            \"description\": \"storage_host_group_1\",\n            \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n            \"native_storage_host_group_id\": \"storage_host_group_1\",\n        },\n    ]\n    return expected\n\n\ndef fake_port_group_create():\n    fake_port_groups = [models.PortGroup()]\n\n    fake_port_groups[0] = {\n        'id': '14155a1f-f053-4ccb-a846-ed67e4387428',\n        'storage_id': '12c2d52f-01bc-41f5-b73f'\n                      '-7abf6f38a2a6',\n        'name': 'port_group_1',\n        'description': 'port_group_1',\n        'created_at': '2020-06-10T07:17:08.707356',\n        'updated_at': '2020-06-10T07:17:08.707356',\n        'native_port_group_id': 'port_group_1', }\n    return fake_port_groups\n\n\ndef fake_expected_port_group_create():\n    expected = [\n        {\n            \"created_at\": \"2020-06-10T07:17:08.707356\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n            \"name\": \"port_group_1\",\n            \"description\": \"port_group_1\",\n            \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n            \"native_port_group_id\": \"port_group_1\",\n        },\n    ]\n    return expected\n\n\ndef fake_volume_group_create():\n    fake_volume_groups = [models.VolumeGroup()]\n\n    fake_volume_groups[0] = {\n        'id': '14155a1f-f053-4ccb-a846-ed67e4387428',\n        'storage_id': '12c2d52f-01bc-41f5-b73f'\n                      '-7abf6f38a2a6',\n        'name': 'volume_group_1',\n        'description': 'volume_group_1',\n        'created_at': '2020-06-10T07:17:08.707356',\n        'updated_at': '2020-06-10T07:17:08.707356',\n        'native_volume_group_id': 'volume_group_1', }\n    return fake_volume_groups\n\n\ndef fake_expected_volume_groups_create():\n    expected = [\n        {\n            \"created_at\": \"2020-06-10T07:17:08.707356\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n            \"name\": \"volume_group_1\",\n            \"description\": \"volume_group_1\",\n            \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n            \"native_volume_group_id\": \"volume_group_1\",\n        },\n    ]\n    return expected\n\n\ndef fake_masking_view_create():\n    fake_masking_views = [models.MaskingView()]\n\n    fake_masking_views[0] = {\n        'id': '14155a1f-f053-4ccb-a846-ed67e4387428',\n        'storage_id': '12c2d52f-01bc-41f5-b73f'\n                      '-7abf6f38a2a6',\n        'name': 'masking_view_1',\n        'description': 'masking_view_1',\n        'created_at': '2020-06-10T07:17:08.707356',\n        'updated_at': '2020-06-10T07:17:08.707356',\n        'native_storage_host_id': 'storage_host_1',\n        'native_volume_id': 'volume_1',\n        'native_masking_view_id': 'masking_view_1', }\n    return fake_masking_views\n\n\ndef fake_expected_masking_views_create():\n    expected = [\n        {\n            \"created_at\": \"2020-06-10T07:17:08.707356\",\n            \"updated_at\": \"2020-06-10T07:17:08.707356\",\n            \"id\": \"14155a1f-f053-4ccb-a846-ed67e4387428\",\n            \"name\": \"masking_view_1\",\n            \"description\": \"masking_view_1\",\n            \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n            \"native_storage_host_id\": \"storage_host_1\",\n            \"native_volume_id\": \"volume_1\",\n            \"native_storage_host_group_id\": None,\n            \"native_port_group_id\": None,\n            \"native_volume_group_id\": None,\n            \"native_masking_view_id\": \"masking_view_1\",\n        },\n    ]\n    return expected\n"
  },
  {
    "path": "delfin/tests/unit/fake_notifier.py",
    "content": "# Copyright 2014 Red Hat, Inc.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport collections\nimport functools\n\nimport oslo_messaging as messaging\nfrom oslo_serialization import jsonutils\n\nfrom delfin import rpc\n\nNOTIFICATIONS = []\n\n\ndef reset():\n    del NOTIFICATIONS[:]\n\n\nFakeMessage = collections.namedtuple(\n    'Message',\n    ['publisher_id', 'priority', 'event_type', 'payload'],\n)\n\n\nclass FakeNotifier(object):\n\n    def __init__(self, transport, publisher_id=None, serializer=None):\n        self.transport = transport\n        self.publisher_id = publisher_id\n        for priority in ['debug', 'info', 'warn', 'error', 'critical']:\n            setattr(self, priority,\n                    functools.partial(self._notify, priority.upper()))\n        self._serializer = serializer or messaging.serializer.NoOpSerializer()\n\n    def prepare(self, publisher_id=None):\n        if publisher_id is None:\n            publisher_id = self.publisher_id\n        return self.__class__(self.transport, publisher_id, self._serializer)\n\n    def _notify(self, priority, ctxt, event_type, payload):\n        payload = self._serializer.serialize_entity(ctxt, payload)\n        # NOTE(sileht): simulate the kombu serializer\n        # this permit to raise an exception if something have not\n        # been serialized correctly\n        jsonutils.to_primitive(payload)\n        msg = dict(publisher_id=self.publisher_id,\n                   priority=priority,\n                   event_type=event_type,\n                   payload=payload)\n        NOTIFICATIONS.append(msg)\n\n\ndef stub_notifier(testcase):\n    testcase.mock_object(messaging, 'Notifier', FakeNotifier)\n    if rpc.NOTIFIER:\n        serializer = getattr(rpc.NOTIFIER, '_serializer', None)\n        testcase.mock_object(rpc, 'NOTIFIER',\n                             FakeNotifier(rpc.NOTIFIER.transport,\n                                          rpc.NOTIFIER.publisher_id,\n                                          serializer=serializer))\n"
  },
  {
    "path": "delfin/tests/unit/leader_election/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/leader_election/distributor/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/leader_election/distributor/test_task_distributor.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\nfrom oslo_utils import uuidutils\n\nfrom delfin import context\nfrom delfin import db\nfrom delfin import test\nfrom delfin.common import constants\nfrom delfin.db.sqlalchemy.models import Task\nfrom delfin.leader_election.distributor.task_distributor import TaskDistributor\n\nfake_telemetry_job = {\n    Task.id.name: 2,\n    Task.storage_id.name: uuidutils.generate_uuid(),\n    Task.args.name: {},\n    Task.interval.name: 10,\n    Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD,\n    Task.last_run_time.name: None,\n    Task.deleted.name: 0,\n}\n\nfake_telemetry_jobs = [\n    fake_telemetry_job,\n]\n\n\nclass TestTaskDistributor(test.TestCase):\n\n    @mock.patch('delfin.coordination.ConsistentHashing.get_task_executor')\n    @mock.patch('delfin.coordination.ConsistentHashing.start')\n    @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.assign_job')\n    @mock.patch.object(db, 'task_update')\n    @mock.patch('delfin.coordination.ConsistentHashing.__init__',\n                mock.Mock(return_value=None))\n    def test_distribute_new_job(self, mock_task_update, mock_assign_job,\n                                mock_partitioner_start,\n                                mock_get_task_executor):\n        ctx = context.get_admin_context()\n        task_distributor = TaskDistributor(ctx)\n        task_distributor.distribute_new_job('fake_task_id')\n        self.assertEqual(mock_assign_job.call_count, 1)\n        self.assertEqual(mock_task_update.call_count, 1)\n        self.assertEqual(mock_partitioner_start.call_count, 1)\n        self.assertEqual(mock_get_task_executor.call_count, 1)\n"
  },
  {
    "path": "delfin/tests/unit/task_manager/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/task_manager/scheduler/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/task_manager/scheduler/schedulers/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/task_manager/scheduler/schedulers/telemetry/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/task_manager/scheduler/schedulers/telemetry/test_failed_performance_collection_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\nfrom unittest import mock\n\nfrom oslo_utils import uuidutils\n\nfrom delfin import context\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.common.constants import TelemetryCollection\nfrom delfin.common.constants import TelemetryTaskStatus, TelemetryJobStatus\nfrom delfin.db.sqlalchemy.models import FailedTask\nfrom delfin.db.sqlalchemy.models import Task\nfrom delfin.task_manager.scheduler.schedulers.telemetry. \\\n    failed_performance_collection_handler import \\\n    FailedPerformanceCollectionHandler\n\nfake_failed_job_id = 43\n\nfake_failed_job = {\n    FailedTask.id.name: fake_failed_job_id,\n    FailedTask.retry_count.name: 0,\n    FailedTask.result.name: \"Init\",\n    FailedTask.job_id.name: uuidutils.generate_uuid(),\n    FailedTask.task_id.name: uuidutils.generate_uuid(),\n    FailedTask.method.name: FailedPerformanceCollectionHandler.__module__ +\n                            '.' +\n                            FailedPerformanceCollectionHandler.__name__,\n    FailedTask.start_time.name: int(datetime.now().timestamp()),\n    FailedTask.end_time.name: int(datetime.now().timestamp()) + 20,\n    FailedTask.interval.name: 20,\n    FailedTask.deleted.name: False,\n    FailedTask.executor.name: 'node1',\n}\n\nfake_deleted_storage_failed_job = {\n    FailedTask.id.name: fake_failed_job_id,\n    FailedTask.retry_count.name: 0,\n    FailedTask.result.name: \"Init\",\n    FailedTask.job_id.name: uuidutils.generate_uuid(),\n    FailedTask.task_id.name: uuidutils.generate_uuid(),\n    FailedTask.method.name: FailedPerformanceCollectionHandler.__module__ +\n                            '.' +\n                            FailedPerformanceCollectionHandler.__name__,\n    FailedTask.start_time.name: int(datetime.now().timestamp()),\n    FailedTask.end_time.name: int(datetime.now().timestamp()) + 20,\n    FailedTask.interval.name: 20,\n    FailedTask.deleted.name: True,\n    FailedTask.executor.name: 'node1',\n}\n\nfake_telemetry_job = {\n    Task.id.name: 2,\n    Task.storage_id.name: uuidutils.generate_uuid(),\n    Task.args.name: {},\n    Task.executor.name: 'node1',\n}\n\n\ndef failed_task_not_found_exception(ctx, failed_task_id):\n    raise exception.FailedTaskNotFound(\"Failed Task not found.\")\n\n\nclass TestFailedPerformanceCollectionHandler(test.TestCase):\n\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch.object(db, 'failed_task_get',\n                       mock.Mock(return_value=fake_failed_job))\n    @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job')\n    @mock.patch('delfin.db.failed_task_update')\n    @mock.patch('delfin.task_manager.tasks.telemetry'\n                '.PerformanceCollectionTask.collect')\n    def test_failed_job_success(self, mock_collect_telemetry,\n                                mock_failed_task_update, mock_failed_job):\n        mock_collect_telemetry.return_value = TelemetryTaskStatus. \\\n            TASK_EXEC_STATUS_SUCCESS\n        ctx = context.get_admin_context()\n        failed_job_handler = FailedPerformanceCollectionHandler.get_instance(\n            ctx, fake_failed_job_id)\n        # call failed job\n        failed_job_handler()\n\n        self.assertEqual(mock_failed_job.call_count, 1)\n        mock_failed_task_update.assert_called_once_with(\n            ctx,\n            fake_failed_job_id,\n            {\n                FailedTask.retry_count.name: 1,\n                FailedTask.result.name:\n                    TelemetryJobStatus.FAILED_JOB_STATUS_SUCCESS})\n\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch.object(db, 'failed_task_get',\n                       mock.Mock(return_value=fake_failed_job))\n    @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job')\n    @mock.patch('delfin.db.failed_task_update')\n    @mock.patch('delfin.task_manager.rpcapi.TaskAPI.collect_telemetry')\n    def test_failed_job_failure(self, mock_collect_telemetry,\n                                mock_failed_task_update, mock_failed_job):\n        mock_collect_telemetry.return_value = TelemetryTaskStatus. \\\n            TASK_EXEC_STATUS_FAILURE\n        ctx = context.get_admin_context()\n        failed_job_handler = FailedPerformanceCollectionHandler.get_instance(\n            ctx, fake_failed_job_id)\n        # retry\n        # call failed job\n        failed_job_handler()\n\n        self.assertEqual(mock_failed_job.call_count, 0)\n        mock_failed_task_update.assert_called_once_with(\n            ctx,\n            fake_failed_job_id,\n            {\n                FailedTask.retry_count.name: 1,\n                FailedTask.result.name:\n                    TelemetryJobStatus.FAILED_JOB_STATUS_RETRYING})\n\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch.object(db, 'failed_task_get')\n    @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job')\n    @mock.patch('delfin.db.failed_task_update')\n    @mock.patch('delfin.task_manager.rpcapi.TaskAPI.collect_telemetry')\n    def test_failed_job_fail_max_times(self, mock_collect_telemetry,\n                                       mock_failed_task_update,\n                                       mock_remove_job,\n                                       mock_failed_task_get):\n        mock_collect_telemetry.return_value = TelemetryTaskStatus. \\\n            TASK_EXEC_STATUS_FAILURE\n\n        failed_job = fake_failed_job.copy()\n        failed_job[\n            FailedTask.retry_count.name] = \\\n            TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT - 1\n        # return with maximum retry count\n        mock_failed_task_get.return_value = failed_job\n\n        ctx = context.get_admin_context()\n        failed_job_handler = FailedPerformanceCollectionHandler.get_instance(\n            ctx, fake_failed_job_id)\n        # call failed job\n        failed_job_handler()\n\n        self.assertEqual(mock_remove_job.call_count, 1)\n        mock_failed_task_update.assert_called_once_with(\n            ctx,\n            fake_failed_job_id,\n            {\n                FailedTask.retry_count.name:\n                    TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT,\n                FailedTask.result.name:\n                    TelemetryJobStatus.FAILED_JOB_STATUS_INIT})\n\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch.object(db, 'failed_task_get',\n                       mock.Mock(return_value=fake_deleted_storage_failed_job))\n    @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job')\n    @mock.patch('delfin.db.failed_task_update')\n    @mock.patch('delfin.task_manager.rpcapi.TaskAPI.collect_telemetry')\n    def test_failed_job_deleted_storage(self, mock_collect_telemetry,\n                                        mock_failed_task_update,\n                                        mock_pause_job):\n        ctx = context.get_admin_context()\n        failed_job_handler = FailedPerformanceCollectionHandler.get_instance(\n            ctx, fake_failed_job_id)\n        failed_job_handler()\n\n        # Verify that no action performed for deleted storage failed tasks\n        self.assertEqual(mock_collect_telemetry.call_count, 0)\n        self.assertEqual(mock_failed_task_update.call_count, 0)\n\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch.object(db, 'failed_task_get', failed_task_not_found_exception)\n    @mock.patch(\n        'delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job',\n        mock.Mock())\n    @mock.patch('delfin.db.failed_task_update')\n    @mock.patch('delfin.task_manager.rpcapi.TaskAPI.collect_telemetry')\n    def test_deleted_storage_exception(self, mock_collect_telemetry,\n                                       mock_failed_task_update):\n        ctx = context.get_admin_context()\n        failed_job_handler = FailedPerformanceCollectionHandler(\n            ctx, 1122, '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', '',\n            1234, 2, 1122334400, 1122334800, 'node1')\n        failed_job_handler()\n\n        # Verify that no action performed for deleted storage failed tasks\n        self.assertEqual(mock_collect_telemetry.call_count, 0)\n        self.assertEqual(mock_failed_task_update.call_count, 0)\n"
  },
  {
    "path": "delfin/tests/unit/task_manager/scheduler/schedulers/telemetry/test_job_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\nfrom datetime import datetime\nfrom oslo_utils import uuidutils\n\nfrom delfin import context\nfrom delfin import db\nfrom delfin import test\nfrom delfin.common import constants\nfrom delfin.db.sqlalchemy.models import Task\nfrom delfin.task_manager.scheduler.schedulers.telemetry.job_handler import \\\n    JobHandler\nfrom delfin.task_manager.scheduler.schedulers.telemetry.job_handler import \\\n    FailedJobHandler\nfrom delfin.db.sqlalchemy.models import FailedTask\nfrom delfin.task_manager.scheduler.schedulers.telemetry. \\\n    failed_performance_collection_handler import \\\n    FailedPerformanceCollectionHandler\nfrom delfin.common.constants import TelemetryCollection\n\nfake_executor = 'node1'\nfake_telemetry_job = {\n    Task.id.name: 2,\n    Task.storage_id.name: uuidutils.generate_uuid(),\n    Task.args.name: {},\n    Task.interval.name: 10,\n    Task.job_id.name: None,\n    Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD,\n    Task.last_run_time.name: None,\n    Task.executor.name: fake_executor,\n    Task.deleted.name: False,\n}\n\nfake_telemetry_jobs = [\n    fake_telemetry_job,\n]\n\nfake_telemetry_job_deleted = {\n    Task.id.name: 2,\n    Task.storage_id.name: uuidutils.generate_uuid(),\n    Task.args.name: {},\n    Task.interval.name: 10,\n    Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD,\n    Task.last_run_time.name: None,\n    Task.deleted.name: True,\n    Task.executor.name: fake_executor,\n}\n\nfake_telemetry_jobs_deleted = [\n    fake_telemetry_job_deleted,\n]\n# With method name as None\nIncorrect_telemetry_job = {\n    Task.id.name: 2,\n    Task.storage_id.name: uuidutils.generate_uuid(),\n    Task.args.name: {},\n    Task.interval.name: 10,\n    Task.method.name: None,\n    Task.last_run_time.name: None,\n    Task.executor.name: None,\n}\n\nIncorrect_telemetry_jobs = [\n    Incorrect_telemetry_job,\n]\nfake_failed_job = {\n    FailedTask.id.name: 43,\n    FailedTask.retry_count.name: 0,\n    FailedTask.result.name: \"Init\",\n    FailedTask.job_id.name: \"fake_job_id\",\n    FailedTask.task_id.name: uuidutils.generate_uuid(),\n    FailedTask.method.name: FailedPerformanceCollectionHandler.__module__ +\n                            '.' +\n                            FailedPerformanceCollectionHandler.__name__,\n    FailedTask.start_time.name: int(datetime.now().timestamp()),\n    FailedTask.end_time.name: int(datetime.now().timestamp()) + 20,\n    FailedTask.interval.name: 20,\n    FailedTask.deleted.name: False,\n    FailedTask.executor.name: fake_executor,\n}\n\nfake_failed_jobs = [\n    fake_failed_job,\n]\n\n\nclass TestTelemetryJob(test.TestCase):\n\n    @mock.patch.object(db, 'task_get_all',\n                       mock.Mock(return_value=fake_telemetry_jobs))\n    @mock.patch.object(db, 'task_update',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch(\n        'apscheduler.schedulers.background.BackgroundScheduler.add_job')\n    def test_telemetry_job_scheduling(self, mock_add_job):\n        ctx = context.get_admin_context()\n        telemetry_job = JobHandler(ctx, fake_telemetry_job['id'],\n                                   fake_telemetry_job['storage_id'],\n                                   fake_telemetry_job['args'],\n                                   fake_telemetry_job['interval'])\n        # call telemetry job scheduling\n        telemetry_job.schedule_job(fake_telemetry_job['id'])\n        self.assertEqual(mock_add_job.call_count, 1)\n\n    @mock.patch.object(db, 'task_delete',\n                       mock.Mock())\n    @mock.patch.object(db, 'task_get_all',\n                       mock.Mock(return_value=fake_telemetry_jobs_deleted))\n    @mock.patch.object(db, 'task_update',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch(\n        'apscheduler.schedulers.background.BackgroundScheduler.add_job',\n        mock.Mock())\n    @mock.patch('logging.LoggerAdapter.error')\n    def test_telemetry_removal_success(self, mock_log_error):\n        ctx = context.get_admin_context()\n        telemetry_job = JobHandler(ctx, fake_telemetry_job['id'],\n                                   fake_telemetry_job['storage_id'],\n                                   fake_telemetry_job['args'],\n                                   fake_telemetry_job['interval'])\n        # call telemetry job scheduling\n        telemetry_job.remove_job(fake_telemetry_job['id'])\n        self.assertEqual(mock_log_error.call_count, 0)\n\n\nclass TestFailedTelemetryJob(test.TestCase):\n\n    @mock.patch.object(db, 'failed_task_get_all',\n                       mock.Mock(return_value=fake_failed_jobs))\n    @mock.patch.object(db, 'failed_task_update',\n                       mock.Mock(return_value=fake_failed_job))\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch.object(db, 'failed_task_get',\n                       mock.Mock(return_value=fake_failed_job))\n    @mock.patch(\n        'apscheduler.schedulers.background.BackgroundScheduler.add_job')\n    def test_failed_job_scheduling(self, mock_add_job):\n        failed_job = FailedJobHandler(context.get_admin_context())\n        # call failed job scheduling\n        failed_job.schedule_failed_job(fake_failed_job['id'])\n        self.assertEqual(mock_add_job.call_count, 1)\n\n    @mock.patch.object(db, 'failed_task_get',\n                       mock.Mock(return_value=fake_failed_job))\n    @mock.patch(\n        'apscheduler.schedulers.background.BackgroundScheduler.remove_job')\n    @mock.patch(\n        'apscheduler.schedulers.background.BackgroundScheduler.get_job')\n    @mock.patch.object(db, 'failed_task_delete')\n    @mock.patch.object(db, 'failed_task_get_all')\n    def test_failed_job_with_max_retry(self, mock_failed_get_all,\n                                       mock_failed_task_delete,\n                                       mock_get_job,\n                                       mock_remove_job):\n        # configure to return entry with max retry count\n        failed_jobs = fake_failed_jobs.copy()\n        failed_jobs[0][FailedTask.retry_count.name] = \\\n            TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT\n        mock_failed_get_all.return_value = failed_jobs\n\n        failed_job = FailedJobHandler(context.get_admin_context())\n        # call failed job scheduling\n        failed_job.schedule_failed_job(failed_jobs[0])\n\n        mock_get_job.return_value = True\n\n        # entry get deleted and job get removed\n        self.assertEqual(mock_failed_task_delete.call_count, 1)\n        self.assertEqual(mock_remove_job.call_count, 1)\n\n    @mock.patch(\n        'apscheduler.schedulers.background.BackgroundScheduler.get_job')\n    @mock.patch(\n        'apscheduler.schedulers.background.BackgroundScheduler.add_job')\n    @mock.patch.object(db, 'failed_task_get_all')\n    def test_failed_job_with_job_already_scheduled(self, mock_failed_get_all,\n                                                   mock_add_job,\n                                                   mock_get_job):\n        # configure to return entry with job id\n        failed_jobs = fake_failed_jobs.copy()\n        failed_jobs[0][FailedTask.job_id.name] = uuidutils.generate_uuid()\n        mock_failed_get_all.return_value = failed_jobs\n        # configure to have job in scheduler\n        mock_get_job.return_value = failed_jobs\n\n        failed_job = FailedJobHandler(context.get_admin_context())\n        # call failed job scheduling\n        failed_job.remove_failed_job(fake_failed_job['id'])\n\n        # the job will not be scheduled\n        self.assertEqual(mock_add_job.call_count, 0)\n\n    @mock.patch.object(db, 'failed_task_get',\n                       mock.Mock(return_value=fake_failed_job))\n    @mock.patch(\n        'apscheduler.schedulers.background.BackgroundScheduler.remove_job')\n    @mock.patch.object(db, 'failed_task_delete')\n    @mock.patch.object(db, 'failed_task_get_all')\n    def test_failed_job_scheduling_with_no_task(self, mock_failed_get_all,\n                                                mock_failed_task_delete,\n                                                mock_remove_job):\n        # configure to return entry with max retry count\n        failed_jobs = fake_failed_jobs.copy()\n        failed_jobs[0][FailedTask.job_id.name] = uuidutils.generate_uuid()\n        mock_failed_get_all.return_value = failed_jobs\n\n        failed_job = FailedJobHandler(context.get_admin_context())\n        # call failed job scheduling\n        failed_job.remove_failed_job(fake_failed_job)\n\n        # entry get deleted and job get removed\n        self.assertEqual(mock_failed_task_delete.call_count, 1)\n        self.assertEqual(mock_remove_job.call_count, 0)\n"
  },
  {
    "path": "delfin/tests/unit/task_manager/scheduler/schedulers/telemetry/test_performance_collection_handler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\nfrom oslo_utils import uuidutils\n\nfrom delfin import context\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.common import constants\nfrom delfin.common.constants import TelemetryTaskStatus\nfrom delfin.db.sqlalchemy.models import Task\nfrom delfin.task_manager.scheduler.schedulers.telemetry. \\\n    performance_collection_handler import \\\n    PerformanceCollectionHandler\n\nfake_task_id = 43\nfake_executor = 'node1'\nfake_storage_id = '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6'\nfake_telemetry_job = {\n    Task.id.name: 2,\n    Task.storage_id.name: uuidutils.generate_uuid(),\n    Task.args.name: {},\n    Task.interval.name: 10,\n    Task.deleted.name: False,\n    Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD,\n    Task.executor.name: fake_executor\n}\n\nfake_deleted_telemetry_job = {\n    Task.id.name: 2,\n    Task.storage_id.name: uuidutils.generate_uuid(),\n    Task.args.name: {},\n    Task.interval.name: 10,\n    Task.deleted.name: True,\n    Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD,\n    Task.executor.name: fake_executor\n}\n\n\ndef task_not_found_exception(ctx, task_id):\n    raise exception.TaskNotFound(\"Task not found.\")\n\n\nclass TestPerformanceCollectionHandler(test.TestCase):\n\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch('delfin.db.task_update')\n    @mock.patch('delfin.task_manager.tasks.telemetry'\n                '.PerformanceCollectionTask.collect')\n    def test_performance_collection_success(self, mock_collect_telemetry,\n                                            mock_task_update):\n        mock_collect_telemetry.return_value = TelemetryTaskStatus. \\\n            TASK_EXEC_STATUS_SUCCESS\n        ctx = context.get_admin_context()\n        perf_collection_handler = PerformanceCollectionHandler.get_instance(\n            ctx, fake_task_id)\n        # call performance collection handler\n        perf_collection_handler()\n\n        self.assertEqual(mock_collect_telemetry.call_count, 1)\n        self.assertEqual(mock_task_update.call_count, 1)\n\n    @mock.patch('delfin.db.task_update')\n    @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.assign_failed_job')\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_telemetry_job))\n    @mock.patch('delfin.db.failed_task_create')\n    @mock.patch('delfin.task_manager.tasks.telemetry'\n                '.PerformanceCollectionTask.collect')\n    @mock.patch('delfin.drivers.api.API.get_capabilities')\n    def test_performance_collection_failure(self, mock_get_capabilities,\n                                            mock_collect_telemetry,\n                                            mock_failed_task_create,\n                                            mock_assign_failed_job,\n                                            mock_task_update):\n\n        mock_get_capabilities.return_value = {}\n        mock_collect_telemetry.return_value = TelemetryTaskStatus. \\\n            TASK_EXEC_STATUS_FAILURE\n        ctx = context.get_admin_context()\n        perf_collection_handler = PerformanceCollectionHandler.get_instance(\n            ctx, fake_task_id)\n        # call performance collection handler\n        perf_collection_handler()\n\n        # Verify that failed task create is called if collect telemetry fails\n        self.assertEqual(mock_failed_task_create.call_count, 1)\n        self.assertEqual(mock_assign_failed_job.call_count, 1)\n        self.assertEqual(mock_task_update.call_count, 1)\n\n    @mock.patch.object(db, 'task_get',\n                       mock.Mock(return_value=fake_deleted_telemetry_job))\n    @mock.patch('delfin.db.task_update')\n    @mock.patch('delfin.task_manager.tasks.telemetry'\n                '.PerformanceCollectionTask.collect')\n    def test_performance_collection_deleted_storage(self,\n                                                    mock_collect_telemetry,\n                                                    mock_task_update):\n        mock_collect_telemetry.return_value = TelemetryTaskStatus. \\\n            TASK_EXEC_STATUS_SUCCESS\n        ctx = context.get_admin_context()\n        perf_collection_handler = PerformanceCollectionHandler.get_instance(\n            ctx, fake_task_id)\n        perf_collection_handler()\n\n        # Verify that collect telemetry and db updated is not called\n        # for deleted storage\n        self.assertEqual(mock_collect_telemetry.call_count, 0)\n        self.assertEqual(mock_task_update.call_count, 0)\n\n    @mock.patch('delfin.db.task_get', task_not_found_exception)\n    @mock.patch('delfin.task_manager.tasks.telemetry'\n                '.PerformanceCollectionTask.collect')\n    def test_deleted_storage_exception(self,\n                                       mock_collect_telemetry):\n        ctx = context.get_admin_context()\n        perf_collection_handler = PerformanceCollectionHandler(ctx,\n                                                               fake_task_id,\n                                                               fake_storage_id,\n                                                               \"\", 100,\n                                                               fake_executor)\n        perf_collection_handler()\n\n        # Verify that collect telemetry for deleted storage\n        self.assertEqual(mock_collect_telemetry.call_count, 0)\n"
  },
  {
    "path": "delfin/tests/unit/task_manager/scheduler/test_scheduler.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\nfrom delfin import db\nfrom delfin import test\nfrom delfin.coordination import ConsistentHashing\nfrom delfin.leader_election.distributor.task_distributor \\\n    import TaskDistributor\nfrom delfin.task_manager.metrics_rpcapi import TaskAPI\nfrom delfin.task_manager.scheduler import schedule_manager\n\nFAKE_TASKS = [\n    {\n        'id': 1,\n        'executor': 'node1'\n    },\n    {\n        'id': 2,\n        'executor': 'node2'\n    },\n    {\n        'id': 3,\n        'executor': 'node1'\n    }\n]\n\n\nclass TestScheduler(test.TestCase):\n\n    def test_scheduler_manager_singleton(self):\n        first_instance = schedule_manager.SchedulerManager().get_scheduler()\n        self.assertIsInstance(first_instance, BackgroundScheduler)\n\n        second_instance = schedule_manager.SchedulerManager().get_scheduler()\n        self.assertIsInstance(second_instance, BackgroundScheduler)\n\n        self.assertEqual(first_instance, second_instance)\n\n    @mock.patch.object(BackgroundScheduler, 'start')\n    def test_start(self, mock_scheduler_start):\n        manager = schedule_manager.SchedulerManager()\n        manager.start()\n        self.assertEqual(mock_scheduler_start.call_count, 1)\n        manager.start()\n        self.assertEqual(mock_scheduler_start.call_count, 1)\n\n    @mock.patch('tooz.coordination.get_coordinator', mock.Mock())\n    @mock.patch.object(ConsistentHashing, 'get_task_executor')\n    @mock.patch.object(TaskAPI, 'remove_job')\n    @mock.patch.object(TaskDistributor, 'distribute_new_job')\n    @mock.patch.object(db, 'task_get_all')\n    def test_on_node_join(self, mock_task_get_all, mock_distribute_new_job,\n                          mock_remove_job, mock_get_task_executor):\n        node1_job_count = 0\n        node2_job_count = 0\n        for job in FAKE_TASKS:\n            if job['executor'] == 'node1':\n                node1_job_count += 1\n            elif job['executor'] == 'node2':\n                node2_job_count += 1\n        mock_task_get_all.return_value = FAKE_TASKS\n        mock_get_task_executor.return_value = 'node1'\n        manager = schedule_manager.SchedulerManager()\n        manager.on_node_join(mock.Mock(member_id=b'fake_member_id',\n                                       group_id='node1'))\n        self.assertEqual(mock_task_get_all.call_count, 1)\n        self.assertEqual(mock_distribute_new_job.call_count,\n                         node1_job_count + node2_job_count)\n        self.assertEqual(mock_remove_job.call_count, node2_job_count)\n        self.assertEqual(mock_get_task_executor.call_count,\n                         node1_job_count + node2_job_count)\n\n    @mock.patch.object(TaskDistributor, 'distribute_new_job')\n    @mock.patch.object(db, 'task_get_all')\n    def test_on_node_leave(self, mock_task_get_all, mock_distribute_new_job):\n        mock_task_get_all.return_value = FAKE_TASKS\n        manager = schedule_manager.SchedulerManager()\n        manager.on_node_leave(mock.Mock(member_id=b'fake_member_id',\n                                        group_id='fake_group_id'))\n        self.assertEqual(mock_task_get_all.call_count, 1)\n        self.assertEqual(mock_distribute_new_job.call_count, len(FAKE_TASKS))\n\n    @mock.patch.object(TaskDistributor, 'distribute_new_job')\n    @mock.patch.object(db, 'task_get_all')\n    def test_recover_job(self, mock_task_get_all, mock_distribute_new_job):\n        mock_task_get_all.return_value = FAKE_TASKS\n        manager = schedule_manager.SchedulerManager()\n        manager.recover_job()\n        self.assertEqual(mock_task_get_all.call_count, 1)\n        self.assertEqual(mock_distribute_new_job.call_count, len(FAKE_TASKS))\n"
  },
  {
    "path": "delfin/tests/unit/task_manager/test_alert_task.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom unittest import mock\n\nfrom delfin import context\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.common import constants\nfrom delfin.task_manager.tasks import alerts\n\nfake_storage = {\n    'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    'name': 'fake_driver',\n    'description': 'it is a fake driver.',\n    'vendor': 'fake_vendor',\n    'model': 'fake_model',\n    'status': 'normal',\n    'serial_number': '2102453JPN12KA000011',\n    'firmware_version': '1.0.0',\n    'location': 'HK',\n    'total_capacity': 1024 * 1024,\n    'used_capacity': 3126,\n    'free_capacity': 1045449,\n}\n\nfake_alerts = [\n    {\n        'alert_id': '1050',\n        'alert_name': 'SAMPLE_ALERT_NAME',\n        'severity': constants.Severity.WARNING,\n        'category': constants.Category.NOT_SPECIFIED,\n        'type': constants.EventType.EQUIPMENT_ALARM,\n        'sequence_number': 79,\n        'description': 'Diagnostic event trace triggered.',\n        'recovery_advice': 'NA',\n        'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n        'location': 'Array id=000192601409,Component type=location1 '\n                    'Group,Component name=comp1,Event source=symmetrix',\n    },\n    {\n        'alert_id': '2000',\n        'alert_name': 'SAMPLE_ALERT_NAME_2',\n        'severity': constants.Severity.CRITICAL,\n        'category': constants.Category.RECOVERY,\n        'type': constants.EventType.PROCESSING_ERROR_ALARM,\n        'sequence_number': 50,\n        'description': 'This is a fake alert.',\n        'recovery_advice': 'NA',\n        'resource_type': constants.DEFAULT_RESOURCE_TYPE,\n        'location': 'Array id=000192601409,Component type=location1 '\n                    'Group,Component name=comp1,Event source=symmetrix',\n    },\n]\n\n\nclass TestAlertTask(test.TestCase):\n\n    @mock.patch.object(db, 'storage_get',\n                       mock.Mock(return_value=fake_storage))\n    @mock.patch('delfin.exporter.base_exporter.AlertExporterManager.dispatch')\n    @mock.patch('delfin.common.alert_util.fill_storage_attributes')\n    @mock.patch('delfin.drivers.api.API.list_alerts')\n    def test_sync_alerts(self, mock_list_alerts,\n                         mock_fill_storage_attributes, mock_dispatch):\n        task = alerts.AlertSyncTask()\n        storage_id = fake_storage['id']\n        # No alert\n        mock_list_alerts.return_value = []\n        task.sync_alerts(context, storage_id, None)\n        self.assertEqual(db.storage_get.call_count, 1)\n        self.assertEqual(mock_list_alerts.call_count, 1)\n        self.assertEqual(mock_dispatch.call_count, 0)\n        self.assertEqual(mock_fill_storage_attributes.call_count, 0)\n        # Has alert\n        mock_list_alerts.return_value = fake_alerts\n        task.sync_alerts(context, storage_id, None)\n        self.assertEqual(db.storage_get.call_count, 2)\n        self.assertEqual(mock_list_alerts.call_count, 2)\n        self.assertEqual(mock_dispatch.call_count, 1)\n        self.assertEqual(mock_fill_storage_attributes.call_count,\n                         len(fake_alerts))\n\n    @mock.patch('delfin.drivers.api.API.clear_alert')\n    def test_clear_alerts(self, mock_clear_alert):\n        task = alerts.AlertSyncTask()\n        storage_id = fake_storage['id']\n        task.clear_alerts(context, storage_id, [])\n        self.assertEqual(mock_clear_alert.call_count, 0)\n\n        sequence_number_list = ['sequence_number_1', 'sequence_number_2']\n        task.clear_alerts(context, storage_id, sequence_number_list)\n        self.assertEqual(mock_clear_alert.call_count,\n                         len(sequence_number_list))\n\n        mock_clear_alert.side_effect = \\\n            exception.AccessInfoNotFound(storage_id)\n        ret = task.clear_alerts(context, storage_id, sequence_number_list)\n        self.assertEqual(ret, [])\n\n        mock_clear_alert.side_effect = \\\n            exception.Invalid('Fake exception')\n        ret = task.clear_alerts(context, storage_id, sequence_number_list)\n        self.assertEqual(ret, sequence_number_list)\n"
  },
  {
    "path": "delfin/tests/unit/task_manager/test_resources.py",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom unittest import mock\nfrom delfin.common import config # noqa\nfrom delfin.drivers import fake_storage\nfrom delfin.task_manager.tasks import resources\nfrom delfin.task_manager.tasks.resources import StorageDeviceTask\n\nfrom delfin import test, context, coordination\n\nstorage = {\n    'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    'name': 'fake_driver',\n    'description': 'it is a fake driver.',\n    'vendor': 'fake_vendor',\n    'model': 'fake_model',\n    'status': 'normal',\n    'serial_number': '2102453JPN12KA000011',\n    'firmware_version': '1.0.0',\n    'location': 'HK',\n    'total_capacity': 1024 * 1024,\n    'used_capacity': 3126,\n    'free_capacity': 1045449,\n}\n\npools_list = [{\n    'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    \"name\": \"fake_pool_\" + str(id),\n    \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    \"native_storage_pool_id\": \"fake_original_id_\" + str(id),\n    \"description\": \"Fake Pool\",\n    \"status\": \"normal\",\n    \"total_capacity\": 1024 * 1024,\n    \"used_capacity\": 3126,\n    \"free_capacity\": 1045449,\n}\n]\n\nvols_list = [{\n    'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a340',\n    \"name\": \"fake_vol_\" + str(id),\n    \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    \"description\": \"Fake Volume\",\n    \"status\": \"normal\",\n    \"native_volume_id\": \"fake_original_id_\" + str(id),\n    \"wwn\": \"fake_wwn_\" + str(id),\n    \"total_capacity\": 1024 * 1024,\n    \"used_capacity\": 3126,\n    \"free_capacity\": 1045449,\n}\n]\n\nports_list = [{\n    'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    \"name\": \"fake_pool_\" + str(id),\n    \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    \"native_port_id\": \"fake_original_id_\" + str(id),\n    \"location\": \"location_25\",\n    \"connection_status\": \"disconnected\",\n    \"health_status\": \"normal\",\n    \"type\": \"iscsi\",\n    \"logical_type\": \"service\",\n    \"speed\": 1000,\n    \"max_speed\": 7200,\n    \"native_parent_id\": \"parent_id\",\n    \"wwn\": \"wwn\",\n    \"mac_address\": \"mac_352\",\n    \"ipv4\": \"127.0.0.1\",\n    \"ipv4_mask\": \"255.255.255.0\",\n    \"ipv6\": \"\",\n    \"ipv6_mask\": \"\"\n}\n]\n\ncontrollers_list = [{\n    'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a222',\n    \"name\": \"fake_controller_\" + str(id),\n    \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    \"native_controller_id\": \"fake_original_id_\" + str(id),\n    \"status\": \"normal\",\n    \"location\": \"loc_100\",\n    \"soft_version\": \"ver_321\",\n    \"cpu_info\": \"Intel Xenon\",\n    \"memory_size\": 200000,\n}\n]\n\ndisks_list = [{\n    'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    \"name\": \"fake_pool_\" + str(id),\n    \"storage_id\": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    \"native_disk_id\": \"fake_original_id_\" + str(id),\n    \"serial_number\": \"serial_3299\",\n    \"manufacturer\": \"Intel\",\n    \"model\": \"model_4565\",\n    \"firmware\": \"firmware_9541\",\n    \"speed\": 751,\n    \"capacity\": 1074,\n    \"status\": \"offline\",\n    \"physical_type\": \"sata\",\n    \"logical_type\": \"cache\",\n    \"health_score\": 34,\n    \"native_disk_group_id\": \"\",\n}\n]\n\n\nquotas_list = [{\n    \"id\": \"251594c5-aac4-46ad-842f-3daca9176938\",\n    \"native_quota_id\": \"fake_original_id_\" + str(id),\n    \"name\": \"fake_qutoa_\" + str(id),\n    \"storage_id\": \"793b26f9-6f16-4fd5-a6a2-d7453f050a41\",\n    \"native_filesystem_id\": \"fake_filesystem_id_\" + str(id),\n    \"native_qtree_id\": \"fake_qtree_id_\" + str(id),\n    \"capacity_hard_limit\": 1000,\n    \"capacity_soft_limit\": 100,\n    \"file_hard_limit\": 1000,\n    \"file_soft_limit\": 100,\n    \"file_count\": 10000,\n    \"used_capacity\": 10000,\n    \"type\": \"user\"\n}\n]\n\n\nfilesystems_list = [{\n    \"id\": \"fe760f5c-7b4c-42b2-b1ed-ecb4f0b6d6bc\",\n    \"name\": \"fake_filesystem_\" + str(id),\n    \"storage_id\": \"793b26f9-6f16-4fd5-a6a2-d7453f050a41\",\n    \"native_filesystem_id\": \"fake_original_id_\" + str(id),\n    \"status\": \"normal\",\n    \"type\": \"thin\",\n    \"security_mode\": \"unix\",\n    \"total_capacity\": 1055,\n    \"used_capacity\": 812,\n    \"free_capacity\": 243,\n    \"compressed\": True,\n    \"deduplicated\": False,\n    \"worm\": \"non_worm\"\n}\n]\n\n\nqtrees_list = [{\n    \"id\": \"251594c5-aac4-46ad-842f-3daca9176938\",\n    \"name\": \"fake_qtree_\" + str(id),\n    \"storage_id\": \"793b26f9-6f16-4fd5-a6a2-d7453f050a41\",\n    \"native_qtree_id\": \"fake_original_id_\" + str(id),\n    \"native_filesystem_id\": \"fake_filesystem_id_\" + str(id),\n    \"path\": \"/\",\n    \"security_mode\": \"native\"\n}\n]\n\n\nshares_list = [{\n    \"id\": \"4e62c66a-39ef-43f2-9690-e936ca876574\",\n    \"name\": \"fake_share_\" + str(id),\n    \"storage_id\": \"793b26f9-6f16-4fd5-a6a2-d7453f050a41\",\n    \"native_share_id\": \"fake_original_id_\" + str(id),\n    \"native_filesystem_id\": \"fake_filesystem_id_\" + str(id),\n    \"native_qtree_id\": \"859\",\n    \"protocol\": \"nfs\",\n    \"path\": \"/\"\n}\n]\n\nstorage_host_initiators_list = [{\n    \"id\": \"4e62c66a-39ef-43f2-9690-e936ca876574\",\n    \"name\": \"storage_host_initiator_\" + str(id),\n    \"description\": \"storage_host_initiator_\" + str(id),\n    \"alias\": \"storage_host_initiator_\" + str(id),\n    \"storage_id\": \"c5c91c98-91aa-40e6-85ac-37a1d3b32bda\",\n    \"native_storage_host_initiator_id\": \"storage_host_initiator_\" + str(id),\n    \"wwn\": \"wwn_\" + str(id),\n    \"status\": \"Normal\",\n    \"native_storage_host_id\": \"storage_host_\" + str(id),\n}\n]\n\nstorage_hosts_list = [{\n    \"id\": \"4e62c66a-39ef-43f2-9690-e936ca876574\",\n    \"name\": \"storage_host_\" + str(id),\n    \"description\": \"storage_host_\" + str(id),\n    \"storage_id\": \"c5c91c98-91aa-40e6-85ac-37a1d3b32bda\",\n    \"native_storage_host_id\": \"storage_host_\" + str(id),\n    \"os_type\": \"linux\",\n    \"status\": \"Normal\",\n    \"ip_address\": \"1.2.3.4\"\n}\n]\n\nstorage_hg_list = [{\n    \"id\": \"4e62c66a-39ef-43f2-9690-e936ca876574\",\n    \"name\": \"storage_host_group_\" + str(id),\n    \"description\": \"storage_host_group_\" + str(id),\n    \"storage_id\": \"c5c91c98-91aa-40e6-85ac-37a1d3b32bda\",\n    \"native_storage_host_group_id\": \"storage_host_group_\" + str(id),\n}\n]\n\nstorage_host_groups_list = {\n    'storage_host_groups': storage_hg_list,\n    'storage_host_grp_host_rels': ''\n}\nempty_shgs_list = {\n    'storage_host_groups': list(),\n    'storage_host_grp_host_rels': ''\n}\n\npg_list = [{\n    \"id\": \"4e62c66a-39ef-43f2-9690-e936ca876574\",\n    \"name\": \"port_group_\" + str(id),\n    \"description\": \"port_group_\" + str(id),\n    \"storage_id\": \"c5c91c98-91aa-40e6-85ac-37a1d3b32bda\",\n    \"native_port_group_id\": \"port_group_\" + str(id),\n}\n]\n\nport_groups_list = {\n    \"port_groups\": pg_list,\n    \"port_grp_port_rels\": '',\n}\n\nempty_port_groups_list = {\n    \"port_groups\": list(),\n    \"port_grp_port_rels\": '',\n}\n\nvg_list = [{\n    \"id\": \"4e62c66a-39ef-43f2-9690-e936ca876574\",\n    \"name\": \"volume_group_\" + str(id),\n    \"description\": \"volume_group_\" + str(id),\n    \"storage_id\": \"c5c91c98-91aa-40e6-85ac-37a1d3b32bda\",\n    \"native_volume_group_id\": \"volume_group_\" + str(id),\n}\n]\n\nvolume_groups_list = {\n    'volume_groups': vg_list,\n    'vol_grp_vol_rels': ''\n}\n\nempty_volume_groups_list = {\n    'volume_groups': list(),\n    'vol_grp_vol_rels': ''\n}\n\nmasking_views_list = [{\n    \"id\": \"4e62c66a-39ef-43f2-9690-e936ca876574\",\n    \"name\": \"masking_view_\" + str(id),\n    \"description\": \"masking_view_\" + str(id),\n    \"storage_id\": \"c5c91c98-91aa-40e6-85ac-37a1d3b32bda\",\n    \"native_masking_view_id\": \"masking_view_\" + str(id),\n}\n]\n\n\nclass TestStorageDeviceTask(test.TestCase):\n    def setUp(self):\n        super(TestStorageDeviceTask, self).setUp()\n        self.driver_api = mock.Mock()\n        self.task_manager = StorageDeviceTask(\n            context, \"12c2d52f-01bc-41f5-b73f-7abf6f38a2a6\")\n        self.mock_object(self.task_manager, 'driver_api', self.driver_api)\n\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.get_storage')\n    @mock.patch('delfin.db.storage_update')\n    @mock.patch('delfin.db.storage_get')\n    @mock.patch('delfin.db.storage_delete')\n    @mock.patch('delfin.db.access_info_delete')\n    @mock.patch('delfin.db.alert_source_delete')\n    def test_sync_successful(self, alert_source_delete, access_info_delete,\n                             mock_storage_delete, mock_storage_get,\n                             mock_storage_update, mock_get_storage, get_lock):\n        storage_obj = resources.StorageDeviceTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n\n        storage_obj.sync()\n        self.assertTrue(get_lock.called)\n        self.assertTrue(mock_storage_get.called)\n        self.assertTrue(mock_storage_delete.called)\n        self.assertTrue(access_info_delete.called)\n        self.assertTrue(alert_source_delete.called)\n        self.assertTrue(mock_storage_update.called)\n        mock_get_storage.assert_called_with(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n        mock_get_storage.return_value = fake_storage_obj.get_storage(context)\n        storage_obj.sync()\n\n    @mock.patch('delfin.db.storage_delete')\n    @mock.patch('delfin.db.alert_source_delete')\n    def test_successful_remove(self, mock_alert_del, mock_strg_del):\n        storage_obj = resources.StorageDeviceTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        storage_obj.remove()\n\n        mock_strg_del.assert_called_with(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        mock_alert_del.assert_called_with(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n\n\nclass TestStoragePoolTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_storage_pools')\n    @mock.patch('delfin.db.storage_pool_get_all')\n    @mock.patch('delfin.db.storage_pools_delete')\n    @mock.patch('delfin.db.storage_pools_update')\n    @mock.patch('delfin.db.storage_pools_create')\n    def test_sync_successful(self, mock_pool_create, mock_pool_update,\n                             mock_pool_del, mock_pool_get_all,\n                             mock_list_pools, get_lock):\n        pool_obj = resources.StoragePoolTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        pool_obj.sync()\n\n        self.assertTrue(mock_list_pools.called)\n        self.assertTrue(mock_pool_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # collect the pools from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # add the new pool to DB\n        mock_list_pools.return_value = fake_storage_obj.list_storage_pools(\n            context)\n        mock_pool_get_all.return_value = list()\n        pool_obj.sync()\n        self.assertTrue(mock_pool_create.called)\n\n        # update the new pool of DB\n        mock_list_pools.return_value = pools_list\n        mock_pool_get_all.return_value = pools_list\n        pool_obj.sync()\n        self.assertTrue(mock_pool_update.called)\n\n        # delete the new pool to DB\n        mock_list_pools.return_value = list()\n        mock_pool_get_all.return_value = pools_list\n        pool_obj.sync()\n        self.assertTrue(mock_pool_del.called)\n\n    @mock.patch('delfin.db.storage_pool_delete_by_storage')\n    def test_remove(self, mock_pool_del):\n        pool_obj = resources.StoragePoolTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        pool_obj.remove()\n        self.assertTrue(mock_pool_del.called)\n\n\nclass TestStorageVolumeTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_volumes')\n    @mock.patch('delfin.db.volume_get_all')\n    @mock.patch('delfin.db.volumes_delete')\n    @mock.patch('delfin.db.volumes_update')\n    @mock.patch('delfin.db.volumes_create')\n    def test_sync_successful(self, mock_vol_create, mock_vol_update,\n                             mock_vol_del, mock_vol_get_all, mock_list_vols,\n                             get_lock):\n        vol_obj = resources.StorageVolumeTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        vol_obj.sync()\n        self.assertTrue(mock_list_vols.called)\n        self.assertTrue(mock_vol_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # collect the volumes from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # add the volumes to DB\n        mock_list_vols.return_value = fake_storage_obj.list_volumes(context)\n        mock_vol_get_all.return_value = list()\n        vol_obj.sync()\n        self.assertTrue(mock_vol_create.called)\n\n        # update the volumes to DB\n        mock_list_vols.return_value = vols_list\n        mock_vol_get_all.return_value = vols_list\n        vol_obj.sync()\n        self.assertTrue(mock_vol_update.called)\n\n        # delete the volumes to DB\n        mock_list_vols.return_value = list()\n        mock_vol_get_all.return_value = vols_list\n        vol_obj.sync()\n        self.assertTrue(mock_vol_del.called)\n\n    @mock.patch('delfin.db.volume_delete_by_storage')\n    def test_remove(self, mock_vol_del):\n        vol_obj = resources.StorageVolumeTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        vol_obj.remove()\n        self.assertTrue(mock_vol_del.called)\n\n\nclass TestStoragecontrollerTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_controllers')\n    @mock.patch('delfin.db.controller_get_all')\n    @mock.patch('delfin.db.controllers_delete')\n    @mock.patch('delfin.db.controllers_update')\n    @mock.patch('delfin.db.controllers_create')\n    def test_sync_successful(self,\n                             mock_controller_create, mock_controller_update,\n                             mock_controller_del, mock_controller_get_all,\n                             mock_list_controllers, get_lock):\n        controller_obj = resources.StorageControllerTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        controller_obj.sync()\n\n        self.assertTrue(mock_list_controllers.called)\n        self.assertTrue(mock_controller_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # collect the controllers from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # add the new controller to DB\n        mock_list_controllers.return_value = \\\n            fake_storage_obj.list_controllers(context)\n        mock_controller_get_all.return_value = list()\n        controller_obj.sync()\n        self.assertTrue(mock_controller_create.called)\n\n        # update the new controller of DB\n        mock_list_controllers.return_value = controllers_list\n        mock_controller_get_all.return_value = controllers_list\n        controller_obj.sync()\n        self.assertTrue(mock_controller_update.called)\n\n        # delete the new controller to DB\n        mock_list_controllers.return_value = list()\n        mock_controller_get_all.return_value = controllers_list\n        controller_obj.sync()\n        self.assertTrue(mock_controller_del.called)\n\n    @mock.patch('delfin.db.controller_delete_by_storage')\n    def test_remove(self, mock_controller_del):\n        controller_obj = resources.StorageControllerTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        controller_obj.remove()\n        self.assertTrue(mock_controller_del.called)\n\n\nclass TestStoragePortTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_ports')\n    @mock.patch('delfin.db.port_get_all')\n    @mock.patch('delfin.db.ports_delete')\n    @mock.patch('delfin.db.ports_update')\n    @mock.patch('delfin.db.ports_create')\n    def test_sync_successful(self, mock_port_create, mock_port_update,\n                             mock_port_del, mock_port_get_all, mock_list_ports,\n                             get_lock):\n        port_obj = resources.StoragePortTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        port_obj.sync()\n        self.assertTrue(mock_list_ports.called)\n        self.assertTrue(mock_port_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # collect the ports from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # add the ports to DB\n        mock_list_ports.return_value = fake_storage_obj.list_ports(context)\n        mock_port_get_all.return_value = list()\n        port_obj.sync()\n        self.assertTrue(mock_port_create.called)\n\n        # update the ports to DB\n        mock_list_ports.return_value = ports_list\n        mock_port_get_all.return_value = ports_list\n        port_obj.sync()\n        self.assertTrue(mock_port_update.called)\n\n        # delete the ports to DB\n        mock_list_ports.return_value = list()\n        mock_port_get_all.return_value = ports_list\n        port_obj.sync()\n        self.assertTrue(mock_port_del.called)\n\n    @mock.patch('delfin.db.port_delete_by_storage')\n    def test_remove(self, mock_port_del):\n        port_obj = resources.StoragePortTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        port_obj.remove()\n        self.assertTrue(mock_port_del.called)\n\n\nclass TestStorageDiskTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_disks')\n    @mock.patch('delfin.db.disk_get_all')\n    @mock.patch('delfin.db.disks_delete')\n    @mock.patch('delfin.db.disks_update')\n    @mock.patch('delfin.db.disks_create')\n    def test_sync_successful(self, mock_disk_create, mock_disk_update,\n                             mock_disk_del, mock_disk_get_all, mock_list_disks,\n                             get_lock):\n        disk_obj = resources.StorageDiskTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        disk_obj.sync()\n        self.assertTrue(mock_list_disks.called)\n        self.assertTrue(mock_disk_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # collect the disks from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # add the disks to DB\n        mock_list_disks.return_value = fake_storage_obj.list_disks(context)\n        mock_disk_get_all.return_value = list()\n        disk_obj.sync()\n        self.assertTrue(mock_disk_create.called)\n\n        # update the disks to DB\n        mock_list_disks.return_value = disks_list\n        mock_disk_get_all.return_value = disks_list\n        disk_obj.sync()\n        self.assertTrue(mock_disk_update.called)\n\n        # delete the disks to DB\n        mock_list_disks.return_value = list()\n        mock_disk_get_all.return_value = disks_list\n        disk_obj.sync()\n        self.assertTrue(mock_disk_del.called)\n\n    @mock.patch('delfin.db.disk_delete_by_storage')\n    def test_remove(self, mock_disk_del):\n        disk_obj = resources.StorageDiskTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        disk_obj.remove()\n        self.assertTrue(mock_disk_del.called)\n\n\nclass TestStorageQuotaTask(test.TestCase):\n    # @mock.patch('delfin.drivers.api.API.list_quotas', 'get_lock')\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_quotas')\n    @mock.patch('delfin.db.quota_get_all')\n    @mock.patch('delfin.db.quotas_delete')\n    @mock.patch('delfin.db.quotas_update')\n    @mock.patch('delfin.db.quotas_create')\n    def test_sync_successful(self, mock_quota_create,\n                             mock_quota_update,\n                             mock_quota_del, mock_quota_get_all,\n                             mock_list_quotas,\n                             get_lock):\n        quota_obj = resources.StorageQuotaTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        quota_obj.sync()\n        self.assertTrue(mock_list_quotas.called)\n        self.assertTrue(mock_quota_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # collect the quotas from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # add the quotas to DB\n        mock_list_quotas.return_value =\\\n            fake_storage_obj.list_quotas(context)\n        mock_quota_get_all.return_value = list()\n        quota_obj.sync()\n        self.assertTrue(mock_quota_create.called)\n\n        # update the quotas to DB\n        mock_list_quotas.return_value = quotas_list\n        mock_quota_get_all.return_value = quotas_list\n        quota_obj.sync()\n        self.assertTrue(mock_quota_update.called)\n\n        # delete the quotas to DB\n        mock_list_quotas.return_value = list()\n        mock_quota_get_all.return_value = quotas_list\n        quota_obj.sync()\n        self.assertTrue(mock_quota_del.called)\n\n    @mock.patch('delfin.db.quota_delete_by_storage')\n    def test_remove(self, mock_quota_del):\n        quota_obj = resources.StorageQuotaTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        quota_obj.remove()\n        self.assertTrue(mock_quota_del.called)\n\n\nclass TestStorageFilesystemTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_filesystems')\n    @mock.patch('delfin.db.filesystem_get_all')\n    @mock.patch('delfin.db.filesystems_delete')\n    @mock.patch('delfin.db.filesystems_update')\n    @mock.patch('delfin.db.filesystems_create')\n    def test_sync_successful(self, mock_filesystem_create,\n                             mock_filesystem_update,\n                             mock_filesystem_del, mock_filesystem_get_all,\n                             mock_list_filesystems,\n                             get_lock):\n        filesystem_obj = resources.StorageFilesystemTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        filesystem_obj.sync()\n        self.assertTrue(mock_list_filesystems.called)\n        self.assertTrue(mock_filesystem_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # collect the filesystems from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # add the filesystems to DB\n        mock_list_filesystems.return_value =\\\n            fake_storage_obj.list_filesystems(context)\n        mock_filesystem_get_all.return_value = list()\n        filesystem_obj.sync()\n        self.assertTrue(mock_filesystem_create.called)\n\n        # update the filesystems to DB\n        mock_list_filesystems.return_value = filesystems_list\n        mock_filesystem_get_all.return_value = filesystems_list\n        filesystem_obj.sync()\n        self.assertTrue(mock_filesystem_update.called)\n\n        # delete the filesystems to DB\n        mock_list_filesystems.return_value = list()\n        mock_filesystem_get_all.return_value = filesystems_list\n        filesystem_obj.sync()\n        self.assertTrue(mock_filesystem_del.called)\n\n    @mock.patch('delfin.db.filesystem_delete_by_storage')\n    def test_remove(self, mock_filesystem_del):\n        filesystem_obj = resources.StorageFilesystemTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        filesystem_obj.remove()\n        self.assertTrue(mock_filesystem_del.called)\n\n\nclass TestStorageQtreeTask(test.TestCase):\n    # @mock.patch('delfin.drivers.api.API.list_qtrees', 'get_lock')\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_qtrees')\n    @mock.patch('delfin.db.qtree_get_all')\n    @mock.patch('delfin.db.qtrees_delete')\n    @mock.patch('delfin.db.qtrees_update')\n    @mock.patch('delfin.db.qtrees_create')\n    def test_sync_successful(self, mock_qtree_create,\n                             mock_qtree_update,\n                             mock_qtree_del, mock_qtree_get_all,\n                             mock_list_qtrees,\n                             get_lock):\n        qtree_obj = resources.StorageQtreeTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        qtree_obj.sync()\n        self.assertTrue(mock_list_qtrees.called)\n        self.assertTrue(mock_qtree_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # collect the qtrees from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # add the qtrees to DB\n        mock_list_qtrees.return_value =\\\n            fake_storage_obj.list_qtrees(context)\n        mock_qtree_get_all.return_value = list()\n        qtree_obj.sync()\n        self.assertTrue(mock_qtree_create.called)\n\n        # update the qtrees to DB\n        mock_list_qtrees.return_value = qtrees_list\n        mock_qtree_get_all.return_value = qtrees_list\n        qtree_obj.sync()\n        self.assertTrue(mock_qtree_update.called)\n\n        # delete the qtrees to DB\n        mock_list_qtrees.return_value = list()\n        mock_qtree_get_all.return_value = qtrees_list\n        qtree_obj.sync()\n        self.assertTrue(mock_qtree_del.called)\n\n    @mock.patch('delfin.db.qtree_delete_by_storage')\n    def test_remove(self, mock_qtree_del):\n        qtree_obj = resources.StorageQtreeTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        qtree_obj.remove()\n        self.assertTrue(mock_qtree_del.called)\n\n\nclass TestStorageShareTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_shares')\n    @mock.patch('delfin.db.share_get_all')\n    @mock.patch('delfin.db.shares_delete')\n    @mock.patch('delfin.db.shares_update')\n    @mock.patch('delfin.db.shares_create')\n    def test_sync_successful(self, mock_share_create, mock_share_update,\n                             mock_share_del, mock_share_get_all,\n                             mock_list_shares, get_lock):\n        share_obj = resources.StorageShareTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        share_obj.sync()\n        self.assertTrue(mock_list_shares.called)\n        self.assertTrue(mock_share_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # collect the shares from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # add the shares to DB\n        mock_list_shares.return_value = fake_storage_obj.list_shares(context)\n        mock_share_get_all.return_value = list()\n        share_obj.sync()\n        self.assertTrue(mock_share_create.called)\n\n        # update the shares to DB\n        mock_list_shares.return_value = shares_list\n        mock_share_get_all.return_value = shares_list\n        share_obj.sync()\n        self.assertTrue(mock_share_update.called)\n\n        # delete the shares to DB\n        mock_list_shares.return_value = list()\n        mock_share_get_all.return_value = shares_list\n        share_obj.sync()\n        self.assertTrue(mock_share_del.called)\n\n    @mock.patch('delfin.db.share_delete_by_storage')\n    def test_remove(self, mock_share_del):\n        share_obj = resources.StorageShareTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        share_obj.remove()\n        self.assertTrue(mock_share_del.called)\n\n\nclass TestStorageHostInitiatorTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_storage_host_initiators')\n    @mock.patch('delfin.db.storage_host_initiators_delete_by_storage')\n    @mock.patch('delfin.db.storage_host_initiators_create')\n    def test_sync_successful(self, mock_storage_host_initiator_create,\n                             mock_storage_host_initiator_delete_by_storage,\n                             mock_list_storage_host_initiators, get_lock):\n        storage_host_initiator_obj = resources.StorageHostInitiatorTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n\n        # Collect the storage host initiators from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # Add the storage host initiators to DB\n        mock_list_storage_host_initiators.return_value \\\n            = fake_storage_obj.list_storage_host_initiators(context)\n        storage_host_initiator_obj.sync()\n        self.assertTrue(mock_storage_host_initiator_delete_by_storage.called)\n        self.assertTrue(mock_storage_host_initiator_create.called)\n\n    @mock.patch('delfin.db.storage_host_initiators_delete_by_storage')\n    def test_remove(self, mock_storage_host_initiators_del):\n        storage_host_initiator_obj = resources.StorageHostInitiatorTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        storage_host_initiator_obj.remove()\n        self.assertTrue(mock_storage_host_initiators_del.called)\n\n\nclass TestStorageHostTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_storage_hosts')\n    @mock.patch('delfin.db.storage_hosts_get_all')\n    @mock.patch('delfin.db.storage_hosts_delete')\n    @mock.patch('delfin.db.storage_hosts_update')\n    @mock.patch('delfin.db.storage_hosts_create')\n    def test_sync_successful(self, mock_storage_host_create,\n                             mock_storage_host_update,\n                             mock_storage_host_del,\n                             mock_storage_hosts_get_all,\n                             mock_list_storage_hosts, get_lock):\n        storage_host_obj = resources.StorageHostTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        storage_host_obj.sync()\n        self.assertTrue(mock_list_storage_hosts.called)\n        self.assertTrue(mock_storage_hosts_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # Collect the storage hosts from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # Add the storage hosts to DB\n        mock_list_storage_hosts.return_value \\\n            = fake_storage_obj.list_storage_hosts(context)\n        mock_storage_hosts_get_all.return_value = list()\n        storage_host_obj.sync()\n        self.assertTrue(mock_storage_host_create.called)\n\n        # Update the storage hosts to DB\n        mock_list_storage_hosts.return_value \\\n            = storage_hosts_list\n        mock_storage_hosts_get_all.return_value \\\n            = storage_hosts_list\n        storage_host_obj.sync()\n        self.assertTrue(mock_storage_host_update.called)\n\n        # Delete the storage hosts to DB\n        mock_list_storage_hosts.return_value = list()\n        mock_storage_hosts_get_all.return_value \\\n            = storage_hosts_list\n        storage_host_obj.sync()\n        self.assertTrue(mock_storage_host_del.called)\n\n    @mock.patch('delfin.db.storage_hosts_delete_by_storage')\n    def test_remove(self, mock_storage_hosts_del):\n        storage_host_obj = resources.StorageHostTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        storage_host_obj.remove()\n        self.assertTrue(mock_storage_hosts_del.called)\n\n\nclass TestStorageHostGroupTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_storage_host_groups')\n    @mock.patch('delfin.db.storage_host_groups_get_all')\n    @mock.patch('delfin.db.storage_host_groups_delete')\n    @mock.patch('delfin.db.storage_host_groups_update')\n    @mock.patch('delfin.db.storage_host_groups_create')\n    def test_sync_successful(self, mock_storage_host_group_create,\n                             mock_storage_host_group_update,\n                             mock_storage_host_group_del,\n                             mock_storage_host_groups_get_all,\n                             mock_list_storage_host_groups, get_lock):\n        storage_host_group_obj = resources.StorageHostGroupTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        storage_host_group_obj.sync()\n        self.assertTrue(mock_list_storage_host_groups.called)\n        self.assertTrue(mock_storage_host_groups_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # Collect the storage host groups from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # Add the storage host groups to DB\n        mock_list_storage_host_groups.return_value \\\n            = fake_storage_obj.list_storage_host_groups(context)\n        mock_storage_host_groups_get_all.return_value = list()\n        storage_host_group_obj.sync()\n        self.assertTrue(mock_storage_host_group_create.called)\n\n        # Update the storage host groups to DB\n        mock_list_storage_host_groups.return_value \\\n            = storage_host_groups_list\n        mock_storage_host_groups_get_all.return_value \\\n            = storage_hg_list\n        storage_host_group_obj.sync()\n        self.assertTrue(mock_storage_host_group_update.called)\n\n        # Delete the storage host groups to DB\n        mock_list_storage_host_groups.return_value = empty_shgs_list\n        mock_storage_host_groups_get_all.return_value \\\n            = storage_hg_list\n        storage_host_group_obj.sync()\n        self.assertTrue(mock_storage_host_group_del.called)\n\n    @mock.patch('delfin.db.storage_host_groups_delete_by_storage')\n    def test_remove(self, mock_storage_host_groups_del):\n        storage_host_group_obj = resources.StorageHostGroupTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        storage_host_group_obj.remove()\n        self.assertTrue(mock_storage_host_groups_del.called)\n\n\nclass TestVolumeGroupTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_volume_groups')\n    @mock.patch('delfin.db.volume_groups_get_all')\n    @mock.patch('delfin.db.volume_groups_delete')\n    @mock.patch('delfin.db.volume_groups_update')\n    @mock.patch('delfin.db.volume_groups_create')\n    def test_sync_successful(self, mock_volume_group_create,\n                             mock_volume_group_update,\n                             mock_volume_group_del,\n                             mock_volume_groups_get_all,\n                             mock_list_volume_groups, get_lock):\n        volume_group_obj = resources.VolumeGroupTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        volume_group_obj.sync()\n        self.assertTrue(mock_list_volume_groups.called)\n        self.assertTrue(mock_volume_groups_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # Collect the volume groups from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # Add the volume groups to DB\n        mock_list_volume_groups.return_value \\\n            = fake_storage_obj.list_volume_groups(context)\n        mock_volume_groups_get_all.return_value = list()\n        volume_group_obj.sync()\n        self.assertTrue(mock_volume_group_create.called)\n\n        # Update the volume groups to DB\n        mock_list_volume_groups.return_value \\\n            = volume_groups_list\n        mock_volume_groups_get_all.return_value \\\n            = vg_list\n        volume_group_obj.sync()\n        self.assertTrue(mock_volume_group_update.called)\n\n        # Delete the volume groups to DB\n        mock_list_volume_groups.return_value = empty_volume_groups_list\n        mock_volume_groups_get_all.return_value \\\n            = vg_list\n        volume_group_obj.sync()\n        self.assertTrue(mock_volume_group_del.called)\n\n    @mock.patch('delfin.db.volume_groups_delete_by_storage')\n    def test_remove(self, mock_volume_groups_del):\n        volume_group_obj = resources.VolumeGroupTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        volume_group_obj.remove()\n        self.assertTrue(mock_volume_groups_del.called)\n\n\nclass TestPortGroupTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_port_groups')\n    @mock.patch('delfin.db.port_groups_get_all')\n    @mock.patch('delfin.db.port_groups_delete')\n    @mock.patch('delfin.db.port_groups_update')\n    @mock.patch('delfin.db.port_groups_create')\n    def test_sync_successful(self, mock_port_group_create,\n                             mock_port_group_update,\n                             mock_port_group_del,\n                             mock_port_groups_get_all,\n                             mock_list_port_groups, get_lock):\n        ctxt = context.get_admin_context()\n        port_group_obj = resources.PortGroupTask(\n            ctxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        port_group_obj.sync()\n        self.assertTrue(mock_list_port_groups.called)\n        self.assertTrue(mock_port_groups_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # Collect the storage host groups from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # Add the storage host groups to DB\n        mock_list_port_groups.return_value \\\n            = fake_storage_obj.list_port_groups(context)\n        mock_port_groups_get_all.return_value = list()\n        port_group_obj.sync()\n        self.assertTrue(mock_port_group_create.called)\n\n        # Update the storage host groups to DB\n        mock_list_port_groups.return_value \\\n            = port_groups_list\n        mock_port_groups_get_all.return_value \\\n            = pg_list\n        port_group_obj.sync()\n        self.assertTrue(mock_port_group_update.called)\n\n        # Delete the storage host groups to DB\n        mock_list_port_groups.return_value = empty_port_groups_list\n        mock_port_groups_get_all.return_value \\\n            = pg_list\n        port_group_obj.sync()\n        self.assertTrue(mock_port_group_del.called)\n\n    @mock.patch('delfin.db.port_groups_delete_by_storage')\n    def test_remove(self, mock_port_groups_del):\n        port_group_obj = resources.PortGroupTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        port_group_obj.remove()\n        self.assertTrue(mock_port_groups_del.called)\n\n\nclass TestMaskingViewTask(test.TestCase):\n    @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\n    @mock.patch('delfin.drivers.api.API.list_masking_views')\n    @mock.patch('delfin.db.masking_views_get_all')\n    @mock.patch('delfin.db.masking_views_delete')\n    @mock.patch('delfin.db.masking_views_update')\n    @mock.patch('delfin.db.masking_views_create')\n    def test_sync_successful(self, mock_masking_view_create,\n                             mock_masking_view_update,\n                             mock_masking_view_del,\n                             mock_masking_views_get_all,\n                             mock_list_masking_views, get_lock):\n        cntxt = context.get_admin_context()\n        masking_view_obj = resources.MaskingViewTask(\n            cntxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        masking_view_obj.sync()\n        self.assertTrue(mock_list_masking_views.called)\n        self.assertTrue(mock_masking_views_get_all.called)\n        self.assertTrue(get_lock.called)\n\n        # Collect the volume groups from fake_storage\n        fake_storage_obj = fake_storage.FakeStorageDriver()\n\n        # Add the volume groups to DB\n        mock_list_masking_views.return_value \\\n            = fake_storage_obj.list_masking_views(context)\n        mock_masking_views_get_all.return_value = list()\n        masking_view_obj.sync()\n        self.assertTrue(mock_masking_view_create.called)\n\n        # Update the volume groups to DB\n        mock_list_masking_views.return_value \\\n            = masking_views_list\n        mock_masking_views_get_all.return_value \\\n            = masking_views_list\n        masking_view_obj.sync()\n        self.assertTrue(mock_masking_view_update.called)\n\n        # Delete the volume groups to DB\n        mock_list_masking_views.return_value = list()\n        mock_masking_views_get_all.return_value \\\n            = masking_views_list\n        masking_view_obj.sync()\n        self.assertTrue(mock_masking_view_del.called)\n\n    @mock.patch('delfin.db.masking_views_delete_by_storage')\n    def test_remove(self, mock_masking_views_del):\n        masking_view_obj = resources.MaskingViewTask(\n            context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda')\n        masking_view_obj.remove()\n        self.assertTrue(mock_masking_views_del.called)\n"
  },
  {
    "path": "delfin/tests/unit/task_manager/test_telemetry.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom unittest import mock\n\nfrom delfin import context\nfrom delfin import db\nfrom delfin import exception\nfrom delfin import test\nfrom delfin.task_manager.tasks import telemetry\nfrom delfin.task_manager.metrics_manager import MetricsTaskManager\nfrom delfin.task_manager.scheduler.schedulers.telemetry.job_handler \\\n    import JobHandler, FailedJobHandler\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom delfin.task_manager.subprocess_rpcapi import SubprocessAPI\n\n\nfake_storage = {\n    'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6',\n    'name': 'fake_driver',\n    'description': 'it is a fake driver.',\n    'vendor': 'fake_vendor',\n    'model': 'fake_model',\n    'status': 'normal',\n    'serial_number': '2102453JPN12KA000011',\n    'firmware_version': '1.0.0',\n    'location': 'HK',\n    'total_capacity': 1024 * 1024,\n    'used_capacity': 3126,\n    'free_capacity': 1045449,\n}\n\n\nclass TestPerformanceCollectionTask(test.TestCase):\n\n    @mock.patch.object(db, 'storage_get',\n                       mock.Mock(return_value=fake_storage))\n    @mock.patch('delfin.exporter.base_exporter.PerformanceExporterManager'\n                '.dispatch')\n    @mock.patch('delfin.drivers.api.API.collect_perf_metrics')\n    def test_performance_collection_success(self, mock_collect_perf_metrics,\n                                            mock_dispatch):\n        perf_task = telemetry.PerformanceCollectionTask()\n        storage_id = fake_storage['id']\n        mock_collect_perf_metrics.return_value = []\n        perf_task.collect(context, storage_id, [], 100800, 100900)\n        self.assertEqual(mock_collect_perf_metrics.call_count, 1)\n        self.assertEqual(mock_dispatch.call_count, 1)\n\n    @mock.patch.object(db, 'storage_get',\n                       mock.Mock(return_value=fake_storage))\n    @mock.patch('logging.LoggerAdapter.error')\n    @mock.patch('delfin.exporter.base_exporter.PerformanceExporterManager'\n                '.dispatch')\n    @mock.patch('delfin.drivers.api.API.collect_perf_metrics')\n    def test_performance_collection_failure(self, mock_collect_perf_metrics,\n                                            mock_dispatch, mock_log_error):\n        perf_task = telemetry.PerformanceCollectionTask()\n        storage_id = fake_storage['id']\n        # No alert\n        mock_collect_perf_metrics.return_value = []\n        mock_collect_perf_metrics.side_effect = \\\n            exception.Invalid('Fake exception')\n        perf_task.collect(context, storage_id, [], 100800, 100900)\n        # Verify that dispatch is not done and error is logged\n        # when collect metric fails\n        self.assertEqual(mock_dispatch.call_count, 0)\n        self.assertEqual(mock_log_error.call_count, 1)\n\n    @mock.patch.object(SubprocessAPI, 'assign_job_local')\n    @mock.patch.object(db, 'task_get')\n    @mock.patch.object(JobHandler, 'schedule_job')\n    @mock.patch.object(MetricsTaskManager, 'schedule_boot_jobs')\n    @mock.patch.object(MetricsTaskManager, 'create_process')\n    def test_metric_manager_assign_job(self, mock_create, mock_boot_job,\n                                       mock_job_schedule, mock_db,\n                                       mock_subprocess_api):\n        mock_db.return_value = {\n            'storage_id': 'storage_id1',\n            'args': 'args',\n            'interval': 10,\n        }\n        mock_create.return_value = None\n        mock_boot_job.return_value = None\n        mock_job_schedule.return_value = None\n        mock_subprocess_api.return_value = None\n\n        mgr = MetricsTaskManager()\n        mgr.enable_sub_process = False\n\n        mgr.assign_job('context', 'task_id1', 'host1')\n        self.assertEqual(mock_job_schedule.call_count, 1)\n\n        mgr.enable_sub_process = True\n        mgr.scheduler = BackgroundScheduler()\n        mgr.scheduler.start()\n        mgr.assign_job('context', 'task_id1', 'host1')\n        self.assertEqual(mock_job_schedule.call_count, 1)\n        self.assertEqual(mock_subprocess_api.call_count, 1)\n\n    @mock.patch.object(SubprocessAPI, 'remove_job_local')\n    @mock.patch.object(db, 'task_get')\n    @mock.patch.object(JobHandler, 'remove_job')\n    @mock.patch.object(MetricsTaskManager, 'schedule_boot_jobs')\n    @mock.patch.object(MetricsTaskManager, 'create_process')\n    def test_metric_manager_remove_job(self, mock_create, mock_boot_job,\n                                       mock_job_schedule, mock_db,\n                                       mock_subprocess_api):\n        mock_db.return_value = {\n            'storage_id': 'storage_id1',\n            'args': 'args',\n            'interval': 10,\n        }\n        mock_create.return_value = None\n        mock_boot_job.return_value = None\n        mock_job_schedule.return_value = None\n        mock_subprocess_api.return_value = None\n\n        mgr = MetricsTaskManager()\n        mgr.enable_sub_process = False\n\n        mgr.remove_job('context', 'task_id1', 'host1')\n        self.assertEqual(mock_job_schedule.call_count, 1)\n\n        mgr.enable_sub_process = True\n        mgr.executor_map = {\n            'host1': {\n                \"storages\": ['storage_id1'],\n            }\n        }\n        mgr.scheduler = BackgroundScheduler()\n        mgr.scheduler.start()\n        mgr.remove_job('context', 'task_id1', 'host1')\n        self.assertEqual(mock_job_schedule.call_count, 1)\n        self.assertEqual(mock_subprocess_api.call_count, 1)\n\n    @mock.patch.object(SubprocessAPI, 'assign_failed_job_local')\n    @mock.patch.object(db, 'failed_task_get')\n    @mock.patch.object(FailedJobHandler, 'schedule_failed_job')\n    @mock.patch.object(MetricsTaskManager, 'schedule_boot_jobs')\n    @mock.patch.object(MetricsTaskManager, 'create_process')\n    @mock.patch.object(MetricsTaskManager, 'get_local_executor')\n    def test_metric_manager_assign_failed_job(self, mock_executor,\n                                              mock_create,\n                                              mock_boot_job,\n                                              mock_job_schedule, mock_db,\n                                              mock_subprocess_api):\n        mock_db.return_value = {\n            'storage_id': 'storage_id1',\n            'args': 'args',\n            'interval': 10,\n        }\n        mock_create.return_value = None\n        mock_boot_job.return_value = None\n        mock_job_schedule.return_value = None\n        mock_subprocess_api.return_value = None\n        mock_executor.return_value = None\n\n        mgr = MetricsTaskManager()\n        mgr.enable_sub_process = False\n\n        mgr.assign_failed_job('context', 'task_id1', 'host1')\n        self.assertEqual(mock_job_schedule.call_count, 1)\n\n        mgr.enable_sub_process = True\n        mgr.scheduler = BackgroundScheduler()\n        mgr.scheduler.start()\n        mgr.assign_failed_job('context', 'task_id1', 'host1')\n        self.assertEqual(mock_job_schedule.call_count, 1)\n        self.assertEqual(mock_subprocess_api.call_count, 1)\n\n    @mock.patch.object(SubprocessAPI, 'remove_failed_job_local')\n    @mock.patch.object(db, 'failed_task_get')\n    @mock.patch.object(FailedJobHandler, 'remove_failed_job')\n    @mock.patch.object(MetricsTaskManager, 'schedule_boot_jobs')\n    @mock.patch.object(MetricsTaskManager, 'create_process')\n    def test_metric_manager_remove_failed_job(self, mock_create,\n                                              mock_boot_job,\n                                              mock_job_schedule, mock_db,\n                                              mock_subprocess_api):\n        mock_db.return_value = {\n            'storage_id': 'storage_id1',\n            'args': 'args',\n            'interval': 10,\n        }\n        mock_create.return_value = None\n        mock_boot_job.return_value = None\n        mock_job_schedule.return_value = None\n        mock_subprocess_api.return_value = None\n\n        mgr = MetricsTaskManager()\n        mgr.enable_sub_process = False\n\n        mgr.remove_failed_job('context', 'task_id1', 'host1')\n        self.assertEqual(mock_job_schedule.call_count, 1)\n\n        mgr.enable_sub_process = True\n        mgr.executor_map = {\n            'host1': {\n                \"storages\": ['storage_id1'],\n            }\n        }\n        mgr.scheduler = BackgroundScheduler()\n        mgr.scheduler.start()\n        mgr.remove_failed_job('context', 'task_id1', 'host1')\n        self.assertEqual(mock_job_schedule.call_count, 1)\n        self.assertEqual(mock_subprocess_api.call_count, 1)\n"
  },
  {
    "path": "delfin/tests/unit/test_context.py",
    "content": "#    Copyright 2020 The SODA Authors.\n#    Copyright 2011 OpenStack LLC\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nfrom delfin import context\nfrom delfin import test\n\n\nclass ContextTestCase(test.TestCase):\n\n    def test_request_context_elevated(self):\n        user_context = context.RequestContext(\n            'fake_user', 'fake_project', is_admin=False)\n        self.assertFalse(user_context.is_admin)\n        self.assertEqual([], user_context.roles)\n        admin_context = user_context.elevated()\n        self.assertFalse(user_context.is_admin)\n        self.assertTrue(admin_context.is_admin)\n        self.assertNotIn('admin', user_context.roles)\n        self.assertIn('admin', admin_context.roles)\n\n    def test_request_context_read_deleted(self):\n        ctxt = context.RequestContext('111',\n                                      '222',\n                                      read_deleted='yes')\n        self.assertEqual('yes', ctxt.read_deleted)\n\n        ctxt.read_deleted = 'no'\n        self.assertEqual('no', ctxt.read_deleted)\n\n    def test_request_context_read_deleted_invalid(self):\n        self.assertRaises(ValueError,\n                          context.RequestContext,\n                          '111',\n                          '222',\n                          read_deleted=True)\n\n        ctxt = context.RequestContext('111', '222')\n        self.assertRaises(ValueError,\n                          setattr,\n                          ctxt,\n                          'read_deleted',\n                          True)\n"
  },
  {
    "path": "delfin/tests/unit/test_coordination.py",
    "content": "# Copyright 2015 Intel\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport ddt\nfrom unittest import mock\nfrom tooz import coordination as tooz_coordination\nfrom tooz import locking as tooz_locking\n\nfrom delfin import coordination\nfrom delfin import test\n\n\nclass Locked(Exception):\n    pass\n\n\nclass MockToozLock(tooz_locking.Lock):\n    active_locks = set()\n\n    def acquire(self, blocking=True):\n        if self.name not in self.active_locks:\n            self.active_locks.add(self.name)\n            return True\n        elif not blocking:\n            return False\n        else:\n            raise Locked\n\n    def release(self):\n        self.active_locks.remove(self.name)\n\n\n@ddt.ddt\nclass CoordinatorTestCase(test.TestCase):\n\n    def setUp(self):\n        super(CoordinatorTestCase, self).setUp()\n        self.get_coordinator = self.mock_object(tooz_coordination,\n                                                'get_coordinator')\n\n    def test_coordinator_start(self):\n        crd = self.get_coordinator.return_value\n\n        agent = coordination.Coordinator()\n        agent.start()\n\n        self.assertTrue(self.get_coordinator.called)\n        self.assertTrue(crd.start.called)\n        self.assertTrue(agent.started)\n\n    def test_coordinator_stop(self):\n        crd = self.get_coordinator.return_value\n\n        agent = coordination.Coordinator()\n        agent.start()\n\n        self.assertIsNotNone(agent.coordinator)\n        agent.stop()\n\n        self.assertTrue(crd.stop.called)\n        self.assertIsNone(agent.coordinator)\n        self.assertFalse(agent.started)\n\n    def test_coordinator_lock(self):\n        crd = self.get_coordinator.return_value\n        crd.get_lock.side_effect = lambda n: MockToozLock(n)\n\n        agent1 = coordination.Coordinator()\n        agent1.start()\n        agent2 = coordination.Coordinator()\n        agent2.start()\n\n        lock_string = 'lock'\n        expected_lock = lock_string.encode('ascii')\n\n        self.assertNotIn(expected_lock, MockToozLock.active_locks)\n        with agent1.get_lock(lock_string):\n            self.assertIn(expected_lock, MockToozLock.active_locks)\n            self.assertRaises(Locked, agent1.get_lock(lock_string).acquire)\n            self.assertRaises(Locked, agent2.get_lock(lock_string).acquire)\n        self.assertNotIn(expected_lock, MockToozLock.active_locks)\n\n    def test_coordinator_offline(self):\n        crd = self.get_coordinator.return_value\n        crd.start.side_effect = tooz_coordination.ToozConnectionError('err')\n\n        agent = coordination.Coordinator()\n        self.assertRaises(tooz_coordination.ToozError, agent.start)\n        self.assertFalse(agent.started)\n\n\n@mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock')\nclass CoordinationTestCase(test.TestCase):\n    def test_lock(self, get_lock):\n        with coordination.Lock('lock'):\n            self.assertTrue(get_lock.called)\n\n    def test_synchronized(self, get_lock):\n        @coordination.synchronized('lock-{f_name}-{foo.val}-{bar[val]}')\n        def func(foo, bar):\n            pass\n\n        foo = mock.Mock()\n        foo.val = 7\n        bar = mock.MagicMock()\n        bar.__getitem__.return_value = 8\n        func(foo, bar)\n        get_lock.assert_called_with('lock-func-7-8')\n\n\nclass ConsistentHashingTestCase(test.TestCase):\n\n    def setUp(self):\n        super(ConsistentHashingTestCase, self).setUp()\n        self.get_coordinator = self.mock_object(tooz_coordination,\n                                                'get_coordinator')\n\n    def test_join_group(self):\n        crd = self.get_coordinator.return_value\n        part = coordination.ConsistentHashing()\n        part.start()\n        part.join_group()\n        self.assertTrue(crd.join_partitioned_group.called)\n\n    def test_register_watcher_func(self):\n        crd = self.get_coordinator.return_value\n        part = coordination.ConsistentHashing()\n        part.start()\n        part.register_watcher_func(mock.Mock(), mock.Mock())\n        self.assertTrue(crd.watch_join_group.called)\n        self.assertTrue(crd.watch_leave_group.called)\n\n    def test_watch_group_change(self):\n        crd = self.get_coordinator.return_value\n        part = coordination.ConsistentHashing()\n        part.start()\n        part.watch_group_change()\n        self.assertTrue(crd.run_watchers.called)\n"
  },
  {
    "path": "delfin/tests/unit/test_manager.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2014 Mirantis Inc.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Test of Base Manager for Manila.\"\"\"\n\nimport ddt\nfrom oslo_utils import importutils\n\nfrom delfin import manager\nfrom delfin import test\n\n\n@ddt.ddt\nclass ManagerTestCase(test.TestCase):\n\n    def setUp(self):\n        super(ManagerTestCase, self).setUp()\n        self.host = 'host'\n        self.db_driver = 'fake_driver'\n        self.mock_object(importutils, 'import_module')\n\n    def test_verify_manager_instance(self):\n        fake_manager = manager.Manager(self.host, self.db_driver)\n        self.assertTrue(hasattr(fake_manager, '_periodic_tasks'))\n        self.assertTrue(hasattr(fake_manager, 'additional_endpoints'))\n        self.assertTrue(hasattr(fake_manager, 'host'))\n        self.assertTrue(hasattr(fake_manager, 'periodic_tasks'))\n        self.assertTrue(hasattr(fake_manager, 'init_host'))\n        self.assertTrue(hasattr(fake_manager, 'service_version'))\n        self.assertTrue(hasattr(fake_manager, 'service_config'))\n        self.assertEqual(self.host, fake_manager.host)\n        importutils.import_module.assert_called_once_with(self.db_driver)\n\n    @ddt.data(True, False)\n    def test_periodic_tasks(self, raise_on_error):\n        fake_manager = manager.Manager(self.host, self.db_driver)\n        fake_context = 'fake_context'\n        self.mock_object(fake_manager, 'run_periodic_tasks')\n\n        fake_manager.periodic_tasks(fake_context, raise_on_error)\n\n        fake_manager.run_periodic_tasks.assert_called_once_with(\n            fake_context, raise_on_error=raise_on_error)\n"
  },
  {
    "path": "delfin/tests/unit/test_rpc.py",
    "content": "#    Copyright 2017 Red Hat, Inc.\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport ddt\nfrom unittest import mock\n\nfrom delfin import rpc\nfrom delfin import test\n\n\n@ddt.ddt\nclass RPCTestCase(test.TestCase):\n\n    @ddt.data([], ['noop'], ['noop', 'noop'])\n    @mock.patch('oslo_messaging.JsonPayloadSerializer', wraps=True)\n    def test_init_no_notifications(self, driver, serializer_mock):\n        self.override_config('driver', driver,\n                             group='oslo_messaging_notifications')\n        rpc.init(test.CONF)\n        self.assertEqual(rpc.utils.DO_NOTHING, rpc.NOTIFIER)\n        serializer_mock.assert_not_called()\n\n    @mock.patch.object(rpc, 'messaging')\n    def test_init_notifications(self, messaging_mock):\n        rpc.init(test.CONF)\n        self.assertTrue(messaging_mock.JsonPayloadSerializer.called)\n        self.assertTrue(messaging_mock.Notifier.called)\n        self.assertEqual(rpc.NOTIFIER, messaging_mock.Notifier.return_value)\n"
  },
  {
    "path": "delfin/tests/unit/utils.py",
    "content": "# Copyright 2021 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#   http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom delfin.db.sqlalchemy import models\n\ncase = unittest.TestCase()\n\n\ndef check_isinstance(obj, cls):\n    if isinstance(obj, cls):\n        return True\n    else:\n        assert isinstance(models.StoragePool, object)\n\n\ndef get_db_schema_attributes_list(schema):\n    db_attrib_lst = []\n    for i in schema.__dict__.keys():\n        if not i.startswith('_'):\n            db_attrib_lst.append(i)\n    return sorted(db_attrib_lst)\n\n\ndef validate_db_schema_model(got, model):\n    try:\n        res = check_isinstance(got, model)\n        if res:\n            attributes = get_db_schema_attributes_list(model)\n            lst = sorted(list(got.keys()))\n            case.assertListEqual(attributes, lst)\n            case.assertCountEqual(attributes, lst)\n    except AssertionError:\n        raise\n"
  },
  {
    "path": "delfin/tests/unit/wsgi/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/tests/unit/wsgi/test_common.py",
    "content": "# Copyright 2017 Mirantis Inc.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nfrom unittest import mock\n\nfrom delfin import test\nfrom delfin.wsgi import common\n\n\nclass FakeApp(common.Application):\n    def __init__(self, **kwargs):\n        for k, v in kwargs.items():\n            setattr(self, k, v)\n\n\nclass WSGICommonTestCase(test.TestCase):\n\n    def test_application_factory(self):\n        fake_global_config = mock.Mock()\n        kwargs = {\"k1\": \"v1\", \"k2\": \"v2\"}\n\n        result = FakeApp.factory(fake_global_config, **kwargs)\n\n        fake_global_config.assert_not_called()\n        self.assertIsInstance(result, FakeApp)\n        for k, v in kwargs.items():\n            self.assertTrue(hasattr(result, k))\n            self.assertEqual(getattr(result, k), v)\n\n    def test_application___call__(self):\n        self.assertRaises(\n            NotImplementedError,\n            common.Application(), 'fake_environ', 'fake_start_response')\n"
  },
  {
    "path": "delfin/utils.py",
    "content": "# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright 2011 Justin Santa Barbara\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Utilities and helper functions.\"\"\"\n\nimport functools\nimport inspect\nimport os\nimport pyclbr\nimport random\nimport re\nimport sys\nimport threading\n\nfrom eventlet import pools\nimport logging\nfrom oslo_concurrency import lockutils\nfrom oslo_concurrency import processutils\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_utils import encodeutils\nfrom oslo_utils import importutils\nfrom oslo_utils import netutils\nfrom oslo_utils import strutils\nfrom oslo_utils import timeutils\nimport paramiko\nimport retrying\nimport six\n\nfrom delfin import exception\nfrom delfin.i18n import _\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\nlock = threading.Lock()\nif hasattr('CONF', 'debug') and CONF.debug:\n    logging.getLogger(\"paramiko\").setLevel(logging.DEBUG)\n\n_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'\n_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'\n\nsynchronized = lockutils.synchronized_with_prefix('delfin-')\n\n\ndef isotime(at=None, subsecond=False):\n    \"\"\"Stringify time in ISO 8601 format.\"\"\"\n\n    # Python provides a similar instance method for datetime.datetime objects\n    # called isoformat(). The format of the strings generated by isoformat()\n    # have a couple of problems:\n    # 1) The strings generated by isotime are used in tokens and other public\n    #    APIs that we can't change without a deprecation period. The strings\n    #    generated by isoformat are not the same format, so we can't just\n    #    change to it.\n    # 2) The strings generated by isoformat do not include the microseconds if\n    #    the value happens to be 0. This will likely show up as random failures\n    #    as parsers may be written to always expect microseconds, and it will\n    #    parse correctly most of the time.\n\n    if not at:\n        at = timeutils.utcnow()\n    st = at.strftime(_ISO8601_TIME_FORMAT\n                     if not subsecond\n                     else _ISO8601_TIME_FORMAT_SUBSECOND)\n    tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'\n    # Need to handle either iso8601 or python UTC format\n    st += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz)\n    return st\n\n\ndef _get_root_helper():\n    return 'sudo delfin-rootwrap %s' % CONF.rootwrap_config\n\n\ndef execute(*cmd, **kwargs):\n    \"\"\"Convenience wrapper around oslo's execute() function.\"\"\"\n    if 'run_as_root' in kwargs and 'root_helper' not in kwargs:\n        kwargs['root_helper'] = _get_root_helper()\n    if hasattr('CONF', 'debug') and CONF.debug:\n        kwargs['loglevel'] = logging.DEBUG\n    return processutils.execute(*cmd, **kwargs)\n\n\nclass SSHPool(pools.Pool):\n    \"\"\"A simple eventlet pool to hold ssh connections.\"\"\"\n\n    def __init__(self, ip, port, conn_timeout, login, password=None,\n                 privatekey=None, *args, **kwargs):\n        self.ip = ip\n        self.port = port\n        self.login = login\n        self.password = password\n        self.conn_timeout = conn_timeout if conn_timeout else None\n        self.path_to_private_key = privatekey\n        super(SSHPool, self).__init__(*args, **kwargs)\n\n    def create(self):  # pylint: disable=method-hidden\n        ssh = paramiko.SSHClient()\n        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n        look_for_keys = True\n        if self.path_to_private_key:\n            self.path_to_private_key = os.path.expanduser(\n                self.path_to_private_key)\n            look_for_keys = False\n        elif self.password:\n            look_for_keys = False\n        try:\n            ssh.connect(self.ip,\n                        port=self.port,\n                        username=self.login,\n                        password=self.password,\n                        key_filename=self.path_to_private_key,\n                        look_for_keys=look_for_keys,\n                        timeout=self.conn_timeout,\n                        banner_timeout=self.conn_timeout)\n            if self.conn_timeout:\n                transport = ssh.get_transport()\n                transport.set_keepalive(self.conn_timeout)\n            return ssh\n        except Exception as e:\n            msg = _(\"Check whether private key or password are correctly \"\n                    \"set. Error connecting via ssh: %s\") % e\n            LOG.error(msg)\n            raise exception.SSHException(msg)\n\n    def get(self):\n        \"\"\"Return an item from the pool, when one is available.\n\n        This may cause the calling greenthread to block. Check if a\n        connection is active before returning it. For dead connections\n        create and return a new connection.\n        \"\"\"\n        if self.free_items:\n            conn = self.free_items.popleft()\n            if conn:\n                if conn.get_transport().is_active():\n                    return conn\n                else:\n                    conn.close()\n            return self.create()\n        if self.current_size < self.max_size:\n            created = self.create()\n            self.current_size += 1\n            return created\n        return self.channel.get()\n\n    def remove(self, ssh):\n        \"\"\"Close an ssh client and remove it from free_items.\"\"\"\n        ssh.close()\n        if ssh in self.free_items:\n            self.free_items.remove(ssh)\n            if self.current_size > 0:\n                self.current_size -= 1\n\n\ndef check_ssh_injection(cmd_list):\n    ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',\n                             '<']\n\n    # Check whether injection attacks exist\n    for arg in cmd_list:\n        arg = arg.strip()\n\n        # Check for matching quotes on the ends\n        is_quoted = re.match('^(?P<quote>[\\'\"])(?P<quoted>.*)(?P=quote)$', arg)\n        if is_quoted:\n            # Check for unescaped quotes within the quoted argument\n            quoted = is_quoted.group('quoted')\n            if quoted:\n                if (re.match('[\\'\"]', quoted) or\n                        re.search('[^\\\\\\\\][\\'\"]', quoted)):\n                    raise exception.SSHInjectionThreat(cmd_list)\n        else:\n            # We only allow spaces within quoted arguments, and that\n            # is the only special character allowed within quotes\n            if len(arg.split()) > 1:\n                raise exception.SSHInjectionThreat(cmd_list)\n\n        # Second, check whether danger character in command. So the shell\n        # special operator must be a single argument.\n        for c in ssh_injection_pattern:\n            if c not in arg:\n                continue\n\n            result = arg.find(c)\n            if not result == -1:\n                if result == 0 or not arg[result - 1] == '\\\\':\n                    raise exception.SSHInjectionThreat(cmd_list)\n\n\ndef monkey_patch():\n    \"\"\"Patch decorator.\n\n    If the Flags.monkey_patch set as True,\n    this function patches a decorator\n    for all functions in specified modules.\n    You can set decorators for each modules\n    using CONF.monkey_patch_modules.\n    The format is \"Module path:Decorator function\".\n    Example: 'delfin.api.ec2.cloud:' \\\n     delfin.common.common.notifier.api.notify_decorator'\n\n    Parameters of the decorator is as follows.\n    (See delfin.common.common.notifier.api.notify_decorator)\n\n    name - name of the function\n    function - object of the function\n    \"\"\"\n    # If CONF.monkey_patch is not True, this function do nothing.\n    if not CONF.monkey_patch:\n        return\n    # Get list of modules and decorators\n    for module_and_decorator in CONF.monkey_patch_modules:\n        module, decorator_name = module_and_decorator.split(':')\n        # Get decorator function\n        decorator = importutils.import_class(decorator_name)\n        __import__(module)\n        # Retrieve module information using pyclbr\n        module_data = pyclbr.readmodule_ex(module)\n        for key in module_data.keys():\n            # set the decorator for the class methods\n            if isinstance(module_data[key], pyclbr.Class):\n                clz = importutils.import_class(\"%s.%s\" % (module, key))\n                # NOTE(vponomaryov): we need to distinguish class methods types\n                # for py2 and py3, because the concept of 'unbound methods' has\n                # been removed from the python3.x\n                if six.PY3:\n                    member_type = inspect.isfunction\n                else:\n                    member_type = inspect.ismethod\n                for method, func in inspect.getmembers(clz, member_type):\n                    setattr(\n                        clz, method,\n                        decorator(\"%s.%s.%s\" % (module, key, method), func))\n            # set the decorator for the function\n            if isinstance(module_data[key], pyclbr.Function):\n                func = importutils.import_class(\"%s.%s\" % (module, key))\n                setattr(sys.modules[module], key,\n                        decorator(\"%s.%s\" % (module, key), func))\n\n\ndef file_open(*args, **kwargs):\n    \"\"\"Open file\n\n    see built-in open() documentation for more details\n\n    Note: The reason this is kept in a separate module is to easily\n          be able to provide a stub module that doesn't alter system\n          state at all (for unit tests)\n    \"\"\"\n    return open(*args, **kwargs)\n\n\ndef check_string_length(value, name, min_length=0, max_length=None,\n                        allow_all_spaces=True):\n    \"\"\"Check the length of specified string.\n\n    :param value: the value of the string\n    :param name: the name of the string\n    :param min_length: the min_length of the string\n    :param max_length: the max_length of the string\n    \"\"\"\n    try:\n        strutils.check_string_length(value, name=name,\n                                     min_length=min_length,\n                                     max_length=max_length)\n    except (ValueError, TypeError) as exc:\n        raise exception.InvalidInput(exc)\n\n    if not allow_all_spaces and value.isspace():\n        msg = _('%(name)s cannot be all spaces.') % name\n        raise exception.InvalidInput(msg)\n\n\ndef service_is_up(service):\n    \"\"\"Check whether a service is up based on last heartbeat.\"\"\"\n    last_heartbeat = service['updated_at'] or service['created_at']\n    # Timestamps in DB are UTC.\n    tdelta = timeutils.utcnow() - last_heartbeat\n    elapsed = tdelta.total_seconds()\n    return abs(elapsed) <= CONF.service_down_time\n\n\ndef walk_class_hierarchy(clazz, encountered=None):\n    \"\"\"Walk class hierarchy, yielding most derived classes first.\"\"\"\n    if not encountered:\n        encountered = []\n    for subclass in clazz.__subclasses__():\n        if subclass not in encountered:\n            encountered.append(subclass)\n            # drill down to leaves first\n            for subsubclass in walk_class_hierarchy(subclass, encountered):\n                yield subsubclass\n            yield subclass\n\n\ndef is_valid_ip_address(ip_address, ip_version):\n    ip_version = ([int(ip_version)] if not isinstance(ip_version, list)\n                  else ip_version)\n\n    if not set(ip_version).issubset(set([4, 6])):\n        raise exception.ImproperIPVersion(ip_version)\n\n    if 4 in ip_version:\n        if netutils.is_valid_ipv4(ip_address):\n            return True\n    if 6 in ip_version:\n        if netutils.is_valid_ipv6(ip_address):\n            return True\n\n    return False\n\n\ndef is_all_tenants(search_opts):\n    \"\"\"Checks to see if the all_tenants flag is in search_opts\n\n    :param dict search_opts: The search options for a request\n    :returns: boolean indicating if all_tenants are being requested or not\n    \"\"\"\n    all_tenants = search_opts.get('all_tenants')\n    if all_tenants:\n        try:\n            all_tenants = strutils.bool_from_string(all_tenants, True)\n        except ValueError as err:\n            raise exception.InvalidInput(six.text_type(err))\n    else:\n        # The empty string is considered enabling all_tenants\n        all_tenants = 'all_tenants' in search_opts\n    return all_tenants\n\n\nclass IsAMatcher(object):\n    def __init__(self, expected_value=None):\n        self.expected_value = expected_value\n\n    def __eq__(self, actual_value):\n        return isinstance(actual_value, self.expected_value)\n\n\nclass ComparableMixin(object):\n    def _compare(self, other, method):\n        try:\n            return method(self._cmpkey(), other._cmpkey())\n        except (AttributeError, TypeError):\n            # _cmpkey not implemented, or return different type,\n            # so I can't compare with \"other\".\n            return NotImplemented\n\n    def __lt__(self, other):\n        return self._compare(other, lambda s, o: s < o)\n\n    def __le__(self, other):\n        return self._compare(other, lambda s, o: s <= o)\n\n    def __eq__(self, other):\n        return self._compare(other, lambda s, o: s == o)\n\n    def __ge__(self, other):\n        return self._compare(other, lambda s, o: s >= o)\n\n    def __gt__(self, other):\n        return self._compare(other, lambda s, o: s > o)\n\n    def __ne__(self, other):\n        return self._compare(other, lambda s, o: s != o)\n\n\ndef retry(exception, interval=1, retries=10, backoff_rate=2,\n          wait_random=False, backoff_sleep_max=None):\n    \"\"\"A wrapper around retrying library.\n\n    This decorator allows to log and to check 'retries' input param.\n    Time interval between retries is calculated in the following way:\n    interval * backoff_rate ^ previous_attempt_number\n\n    :param exception: expected exception type. When wrapped function\n                      raises an exception of this type, the function\n                      execution is retried.\n    :param interval: param 'interval' is used to calculate time interval\n                     between retries:\n                     interval * backoff_rate ^ previous_attempt_number\n    :param retries: number of retries. Use 0 for an infinite retry loop.\n    :param backoff_rate: param 'backoff_rate' is used to calculate time\n                         interval between retries:\n                         interval * backoff_rate ^ previous_attempt_number\n    :param wait_random: boolean value to enable retry with random wait timer.\n    :param backoff_sleep_max: Maximum number of seconds for the calculated\n                              backoff sleep. Use None if no maximum is needed.\n    \"\"\"\n\n    def _retry_on_exception(e):\n        return isinstance(e, exception)\n\n    def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):\n        exp = backoff_rate ** previous_attempt_number\n        wait_for = max(0, interval * exp)\n\n        if wait_random:\n            wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)\n        else:\n            wait_val = wait_for * 1000.0\n\n        if backoff_sleep_max:\n            wait_val = min(backoff_sleep_max * 1000.0, wait_val)\n\n        LOG.debug(\"Sleeping for %s seconds.\", (wait_val / 1000.0))\n        return wait_val\n\n    def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):\n        delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0\n        LOG.debug(\"Failed attempt %s\", previous_attempt_number)\n        LOG.debug(\"Have been at this for %s seconds\",\n                  delay_since_first_attempt)\n        return retries > 0 and previous_attempt_number == retries\n\n    if retries < 0:\n        raise ValueError(_('Retries must be greater than or '\n                           'equal to 0 (received: %s).') % retries)\n\n    def _decorator(f):\n\n        @six.wraps(f)\n        def _wrapper(*args, **kwargs):\n            r = retrying.Retrying(retry_on_exception=_retry_on_exception,\n                                  wait_func=_backoff_sleep,\n                                  stop_func=_print_stop)\n            return r.call(f, *args, **kwargs)\n\n        return _wrapper\n\n    return _decorator\n\n\ndef get_bool_from_api_params(key, params, default=False, strict=True):\n    \"\"\"Parse bool value from request params.\n\n    HTTPBadRequest will be directly raised either of the cases below:\n    1. invalid bool string was found by key(with strict on).\n    2. key not found while default value is invalid(with strict on).\n    \"\"\"\n    param = params.get(key, default)\n    try:\n        param = strutils.bool_from_string(param,\n                                          strict=strict,\n                                          default=default)\n    except ValueError:\n        msg = _('Invalid value %(param)s for %(param_string)s. '\n                'Expecting a boolean.') % {'param': param,\n                                           'param_string': key}\n        raise exception.InvalidInput(msg)\n    return param\n\n\ndef check_params_exist(keys, params):\n    \"\"\"Validates if keys exist in params.\n\n    :param keys: List of keys to check\n    :param params: Parameters received from REST API\n    \"\"\"\n    if any(set(keys) - set(params)):\n        msg = _(\"Must specify all mandatory parameters: %s\") % keys\n        raise exception.InvalidInput(msg)\n\n\ndef check_params_are_boolean(keys, params, default=False):\n    \"\"\"Validates if keys in params are boolean.\n\n    :param keys: List of keys to check\n    :param params: Parameters received from REST API\n    :param default: default value when it does not exist\n    :return: a dictionary with keys and respective retrieved value\n    \"\"\"\n    result = {}\n    for key in keys:\n        value = get_bool_from_api_params(key, params, default, strict=True)\n        result[key] = value\n    return result\n\n\ndef convert_str(text):\n    \"\"\"Convert to native string.\n\n    Convert bytes and Unicode strings to native strings:\n\n    * convert to bytes on Python 2:\n      encode Unicode using encodeutils.safe_encode()\n    * convert to Unicode on Python 3: decode bytes from UTF-8\n    \"\"\"\n    if six.PY2:\n        return encodeutils.safe_encode(text)\n    else:\n        if isinstance(text, bytes):\n            return text.decode('utf-8')\n        else:\n            return text\n\n\nclass DoNothing(str):\n    \"\"\"Class that literrally does nothing.\n\n    We inherit from str in case it's called with json.dumps.\n    \"\"\"\n\n    def __call__(self, *args, **kwargs):\n        return self\n\n    def __getattr__(self, name):\n        return self\n\n\nDO_NOTHING = DoNothing()\n\n\ndef notifications_enabled(conf):\n    \"\"\"Check if oslo notifications are enabled.\"\"\"\n    notifications_driver = set(conf.oslo_messaging_notifications.driver)\n    return notifications_driver and notifications_driver != {'noop'}\n\n\ndef if_notifications_enabled(function):\n    \"\"\"Calls decorated method only if notifications are enabled.\"\"\"\n\n    @functools.wraps(function)\n    def wrapped(*args, **kwargs):\n        if notifications_enabled(CONF):\n            return function(*args, **kwargs)\n        return DO_NOTHING\n\n    return wrapped\n\n\ndef write_local_file(filename, contents, as_root=False):\n    tmp_filename = \"%s.tmp\" % filename\n    if as_root:\n        execute('tee', tmp_filename, run_as_root=True, process_input=contents)\n        execute('mv', '-f', tmp_filename, filename, run_as_root=True)\n    else:\n        with open(tmp_filename, 'w') as f:\n            f.write(contents)\n        os.rename(tmp_filename, filename)\n\n\ndef write_remote_file(ssh, filename, contents, as_root=False):\n    tmp_filename = \"%s.tmp\" % filename\n    if as_root:\n        cmd = 'sudo tee \"%s\" > /dev/null' % tmp_filename\n        cmd2 = 'sudo mv -f \"%s\" \"%s\"' % (tmp_filename, filename)\n    else:\n        cmd = 'cat > \"%s\"' % tmp_filename\n        cmd2 = 'mv -f \"%s\" \"%s\"' % (tmp_filename, filename)\n    stdin, __, __ = ssh.exec_command(cmd)\n    stdin.write(contents)\n    stdin.close()\n    stdin.channel.shutdown_write()\n    ssh.exec_command(cmd2)\n\n\nclass Singleton(type):\n    _instances = {}\n\n    def __call__(cls, *args, **kwargs):\n        if cls not in cls._instances:\n            with lock:\n                if cls not in cls._instances:\n                    cls._instances[cls] = super(Singleton,\n                                                cls).__call__(*args, **kwargs)\n        return cls._instances[cls]\n\n\ndef utcnow_ms():\n    return int(timeutils.utcnow(True).timestamp() * 1000)\n"
  },
  {
    "path": "delfin/version.py",
    "content": "#    Copyright 2020 The SODA Authors.\n#    Copyright 2011 OpenStack LLC\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nfrom pbr import version as pbr_version\n\nDELFIN_VENDOR = \"SODA Foundation\"\nDELFIN_PRODUCT = \"SODA Delfin\"\nDELFIN_PACKAGE = None  # OS distro package version suffix\n\nloaded = False\nversion_info = pbr_version.VersionInfo('delfin')\nversion_string = version_info.version_string\n"
  },
  {
    "path": "delfin/wsgi/__init__.py",
    "content": ""
  },
  {
    "path": "delfin/wsgi/common.py",
    "content": "# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright 2010 OpenStack LLC.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\n\"\"\"Utility methods for working with WSGI servers.\"\"\"\n\nimport webob.dec\nimport webob.exc\n\nfrom delfin.i18n import _\n\n\nclass Request(webob.Request):\n    pass\n\n\nclass Application(object):\n    \"\"\"Base WSGI application wrapper. Subclasses need to implement __call__.\"\"\"\n\n    @classmethod\n    def factory(cls, global_config, **local_config):\n        \"\"\"Used for paste app factories in paste.deploy config files.\n\n        Any local configuration (that is, values under the [app:APPNAME]\n        section of the paste config) will be passed into the `__init__` method\n        as kwargs.\n\n        A hypothetical configuration would look like:\n\n            [app:wadl]\n            latest_version = 1.3\n            paste.app_factory = delfin.api.fancy_api:Wadl.factory\n\n        which would result in a call to the `Wadl` class as\n\n            import delfin.api.fancy_api\n            fancy_api.Wadl(latest_version='1.3')\n\n        You could of course re-implement the `factory` method in subclasses,\n        but using the kwarg passing it shouldn't be necessary.\n\n        \"\"\"\n        return cls(**local_config)\n\n    def __call__(self, environ, start_response):\n        r\"\"\"Subclasses will probably want to implement __call__ like this:\n\n        @webob.dec.wsgify(RequestClass=Request)\n        def __call__(self, req):\n          # Any of the following objects work as responses:\n\n          # Option 1: simple string\n          res = 'message\\n'\n\n          # Option 2: a nicely formatted HTTP exception page\n          res = exc.HTTPForbidden(detail='Nice try')\n\n          # Option 3: a webob Response object (in case you need to play with\n          # headers, or you want to be treated like an iterable, or or or)\n          res = Response();\n          res.app_iter = open('somefile')\n\n          # Option 4: any wsgi app to be run next\n          res = self.application\n\n          # Option 5: you can get a Response object for a wsgi app, too, to\n          # play with headers etc\n          res = req.get_response(self.application)\n\n          # You can then just return your response...\n          return res\n          # ... or set req.response and return None.\n          req.response = res\n\n        See the end of http://pythonpaste.org/webob/modules/dec.html\n        for more info.\n\n        \"\"\"\n        raise NotImplementedError(_('You must implement __call__'))\n\n\nclass Middleware(Application):\n    \"\"\"Base WSGI middleware.\n\n    These classes require an application to be\n    initialized that will be called next.  By default the middleware will\n    simply call its wrapped app, or you can override __call__ to customize its\n    behavior.\n\n    \"\"\"\n\n    @classmethod\n    def factory(cls, global_config, **local_config):\n        \"\"\"Used for paste app factories in paste.deploy config files.\n\n        Any local configuration (that is, values under the [filter:APPNAME]\n        section of the paste config) will be passed into the `__init__` method\n        as kwargs.\n\n        A hypothetical configuration would look like:\n\n            [filter:analytics]\n            redis_host = 127.0.0.1\n            paste.filter_factory = delfin.api.analytics:Analytics.factory\n\n        which would result in a call to the `Analytics` class as\n\n            import delfin.api.analytics\n            analytics.Analytics(app_from_paste, redis_host='127.0.0.1')\n\n        You could of course re-implement the `factory` method in subclasses,\n        but using the kwarg passing it shouldn't be necessary.\n\n        \"\"\"\n        def _factory(app):\n            return cls(app, **local_config)\n        return _factory\n\n    def __init__(self, application):\n        self.application = application\n\n    def process_request(self, req):\n        \"\"\"Called on each request.\n\n        If this returns None, the next application down the stack will be\n        executed. If it returns a response then that response will be returned\n        and execution will stop here.\n\n        \"\"\"\n        return None\n\n    def process_response(self, response):\n        \"\"\"Do whatever you'd like to the response.\"\"\"\n        return response\n\n    @webob.dec.wsgify(RequestClass=Request)\n    def __call__(self, req):\n        # pylint: disable=assignment-from-none\n        response = self.process_request(req)\n        if response:\n            return response\n        response = req.get_response(self.application)\n        return self.process_response(response)\n"
  },
  {
    "path": "docker-compose.yml",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Installation Steps:\n# ------------------\n#\n# 1. Create the 'sodafoundation/delfin' docker image with Dockerfile in\n#    delfin project using command below. (Note: In future we will upload\n#    this image to docker-hub and it can be downloaded with out this step)\n#\n#    $ docker build -t sodafoundation/delfin .\n#\n# 2. Export (optional) environment vars for hostnames for redis, rabbitmq and credentials for rabbitmq. Eg.\n#    Here is example for exporting variable and its current values in setup.\n#\n#    $ export DELFIN_RABBITMQ_USER=delfinuser\n#    $ export DELFIN_RABBITMQ_PASS=delfinpass\n#    $ export DELFIN_RABBITMQ_HOSTNAME=rabbitmq\n#    $ export DELFIN_REDIS_HOSTNAME=redis\n#    $ export DELFIN_METRICS_DIR=/var/lib/delfin/metrics\n#\n# 3. Bring up delfin project using following command\n#\n#    $ docker-compose up -d\n#\n# 4. When finished using delfin project, bring down containers using following command\n#\n#    $ docker-compose down\n#\n# 5. To bring up delfin project with multiple service instances\n#\n#    $ docker-compose up -d --scale <<service-name>>=<<number of instances>>\n#\n#    example: Deploy delfin with 3 delfin-task and 2 delfin-alert instances\n#       $ docker-compose up -d --scale delfin-task=3 --scale delfin-alert=2\n#    Note: Multiple instances of delfin-api are not allowed\n\nversion: '3.3'\n\nservices:\n\n  redis:\n    image: redis\n    container_name: ${DELFIN_REDIS_HOSTNAME:-redis}\n    command: redis-server\n    ports:\n      - ${DELFIN_REDIS_PORT:-6379}:6379\n    restart: always\n\n  rabbitmq:\n      image: rabbitmq:3-management\n      container_name: ${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq}\n      environment:\n          RABBITMQ_DEFAULT_USER: ${DELFIN_RABBITMQ_USER:-delfinuser}\n          RABBITMQ_DEFAULT_PASS: ${DELFIN_RABBITMQ_PASS:-delfinpass}\n          RABBITMQ_DEFAULT_VHOST: \"/\"\n      ports:\n          - 5672:5672\n          - 15672:15672\n      restart: always\n\n  delfin-api:\n    image: sodafoundation/delfin\n    command: \"api\"\n    volumes:\n      - ./etc/delfin:/etc/delfin\n      - db_data:/var/lib/delfin\n    ports:\n      - 8190:8190\n    restart: always\n    environment:\n      - OS_COORDINATION__BACKEND_SERVER=${DELFIN_REDIS_HOSTNAME:-redis}:6379\n      - OS_DEFAULT__TRANSPORT_URL=rabbit://${DELFIN_RABBITMQ_USER:-delfinuser}:${DELFIN_RABBITMQ_PASS:-delfinpass}@${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq}:5672//\n    depends_on:\n      - redis\n      - rabbitmq\n\n  delfin-task:\n    image: sodafoundation/delfin\n    command: \"task\"\n    volumes:\n      - ./etc/delfin:/etc/delfin\n      - db_data:/var/lib/delfin\n      - metrics_dir:${DELFIN_METRICS_DIR:-/var/lib/delfin/metrics}\n    restart: always\n    environment:\n      - OS_COORDINATION__BACKEND_SERVER=${DELFIN_REDIS_HOSTNAME:-redis}:6379\n      - OS_DEFAULT__TRANSPORT_URL=rabbit://${DELFIN_RABBITMQ_USER:-delfinuser}:${DELFIN_RABBITMQ_PASS:-delfinpass}@${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq}:5672//\n      - OS_PROMETHEUS_EXPORTER__METRICS_DIR=${DELFIN_METRICS_DIR:-/var/lib/delfin/metrics}\n    depends_on:\n      - redis\n      - rabbitmq\n\n  delfin-alert:\n    image: sodafoundation/delfin\n    command: \"alert\"\n    volumes:\n      - ./etc/delfin:/etc/delfin\n      - db_data:/var/lib/delfin\n    restart: always\n    environment:\n      - OS_COORDINATION__BACKEND_SERVER=${DELFIN_REDIS_HOSTNAME:-redis}:6379\n      - OS_DEFAULT__TRANSPORT_URL=rabbit://${DELFIN_RABBITMQ_USER:-delfinuser}:${DELFIN_RABBITMQ_PASS:-delfinpass}@${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq}:5672//\n    depends_on:\n      - redis\n      - rabbitmq\n\n  delfin-exporter:\n    image: sodafoundation/delfin\n    command: \"exporter\"\n    volumes:\n      - ./etc/delfin:/etc/delfin\n      - metrics_dir:${DELFIN_METRICS_DIR:-/var/lib/delfin/metrics}\n    ports:\n      - 8195:8195\n    restart: always\n    environment:\n      - OS_DEFAULT__TRANSPORT_URL=rabbit://${DELFIN_RABBITMQ_USER:-delfinuser}:${DELFIN_RABBITMQ_PASS:-delfinpass}@${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq}:5672//\n      - OS_PROMETHEUS_EXPORTER__METRICS_DIR=${DELFIN_METRICS_DIR:-/var/lib/delfin/metrics}\n    depends_on:\n      - rabbitmq\n\nvolumes:\n  db_data: {}\n  metrics_dir: {}\n"
  },
  {
    "path": "etc/delfin/api-paste.ini",
    "content": "#############\n#  Delfin  #\n#############\n\n[composite:delfin]\nuse = call:delfin.api:root_app_factory\n/v1: delfin_api_v1\n\n[filter:http_proxy_to_wsgi]\npaste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory\n\n[pipeline:delfin_api_v1]\npipeline = cors http_proxy_to_wsgi context_wrapper delfin_api_v1app\n\n[app:delfin_api_v1app]\npaste.app_factory = delfin.api.v1.router:APIRouter.factory\n\n[filter:context_wrapper]\npaste.filter_factory = delfin.api.middlewares:ContextWrapper.factory\n\n[filter:cors]\npaste.filter_factory = oslo_middleware.cors:filter_factory\noslo_config_project = delfin\n"
  },
  {
    "path": "etc/delfin/delfin.conf",
    "content": "[DEFAULT]\napi_paste_config = /etc/delfin/api-paste.ini\ndelfin_cryptor = delfin.cryptor._Base64\napi_max_limit = 1000\n# Uncomment or add exporters\n# performance_exporters = PerformanceExporterPrometheus, PerformanceExporterKafka\n# alert_exporters = AlertExporterPrometheus\n\n[database]\nconnection = sqlite:////var/lib/delfin/delfin.sqlite\ndb_backend = sqlalchemy\n\n[TELEMETRY]\nperformance_collection_interval = 900\n\n[KAFKA_EXPORTER]\nkafka_topic_name = \"delfin-kafka\"\nkafka_ip = 'localhost'\nkafka_port = '9092'\n\n[PROMETHEUS_EXPORTER]\nmetric_server_ip = 0.0.0.0\nmetric_server_port = 8195\nmetrics_cache_file = /var/lib/delfin/delfin_exporter.txt\n\n[PROMETHEUS_ALERT_MANAGER_EXPORTER]\nalert_manager_host = 'localhost'\nalert_manager_port = '9093'\n"
  },
  {
    "path": "installer/README.md",
    "content": "# Delfin Installation Guide\n\nThe SODA Delfin supports two types of installation\n* Installation using Ansible\n* Installation using Bash scripts\n\n## Installation using Ansible\n\n* Supported OS: **Ubuntu 20.04, Ubuntu 18.04**\n* Prerequisite: **Python 3.6 or above** should be installed\n\n### Install steps\n\nEnsure no ansible & docker installed, OR Lastest ansible and docker tools are installed with versions listed below or later. If ansible & docker is not installed in the OS, script `install_dependencies.sh` will install it.\n\n```bash\nsudo apt-get update && sudo apt-get install -y git\ngit clone https://github.com/sodafoundation/delfin.git\n# git checkout <delfin-release-version v1.6.1+>\ncd delfin/installer\nchmod +x install_dependencies.sh && source install_dependencies.sh\ncd ansible\nexport PATH=$PATH:/home/$USER/.local/bin\nsudo -E env \"PATH=$PATH\" ansible-playbook site.yml -i local.hosts -v\n```\n\n**NOTE:** *Tools version used for verification of Delfin under Ubuntu 20.04*\n* ansible version: 5.10.0\n* docker version: 20.10.21\n* docker compose version: 2.12.2\n\n### Uninstall\n```bash\nsudo -E env \"PATH=$PATH\" ansible-playbook clean.yml -i local.hosts -v\n```\n\n\n### Logs\nDelfin processes execution logs can be found in /tmp/ folder\n* /tmp/api.log\n* /tmp/alert.log\n* /tmp/task.log\n* /tmp/exporter.log\n* /tmp/create_db.log\n\n### How to use Delfin\nDelfin can be used either through dashboard or REST APIs.\n\nPlease refer [user guides](https://docs.sodafoundation.io/guides/user-guides/delfin/dashboard/)\n\n\n\n## Installation using Bash Scripts\nThis is a standalone/non-containerized installer for SODA Infrastructure Manager (delfin) project.\nIt contains a script and options to check the environment feasible for installing delfin. Installs required dependent software/binaries.\n\n* Supported OS: **Ubuntu 20.04, Ubuntu 18.04**\n* Prerequisite:\n  * **Python 3.6 or above** should be installed\n  * Ensure the logged-in user has **root privileges**.\n\n#### Installation steps\n```bash\nsudo -i\napt-get install python3 python3-pip\ngit clone https://github.com/sodafoundation/delfin.git && git checkout <delfin-release-version>\ncd delfin\nexport PYTHONPATH=$(pwd)\n./installer/install\n```\nRefer below for installer options\n\n#### Uninstall\n```bash\n./installer/uninstall\n```\n\n- #### [Optional] Setup Prometheus (for monitor performance metric through prometheus)\n\n  Follow the below steps to setup delfin with prometheus. Once your setup is ready, you can register the storage devices for performance monitoring. Later, the performance metrics can be viewed on prometheus server. This example also guides you to configure and update the targets and interval for scraping the metrics.\n\n  Alternatively, you can also watch this [video](https://drive.google.com/file/d/1WMmLXQeNlToZd0DP5hCFtDZ1IbNJpO6B/view?usp=drivesdk) for more detail.\n\n  [Download the latest binaries from here](https://prometheus.io/download/) and run the below steps.\n\n     1. tar xvfz prometheus-*.tar.gz\n\n     2. cd prometheus-*\n     3. Edit the prometheus.yml and set the appropriate target, interval and metrics_api path. \n        Below is sample example of prometheus.yml\n        ###### prometheus.yml\n        ```\n        global:\n          scrape_interval: 10s\n        scrape_configs:\n          - job_name: delfin-prometheus\n            metrics_path: /metrics\n            static_configs:\n              - targets:\n                  - 'localhost:8195'\n        ```\n     4. ./prometheus\n        \n        Example:\n        ```sh\n         root@root:/prometheus/prometheus-2.20.0.linux-amd64$ ./prometheus\n        ```\n### Structure of the installer\nThis installer comes with options of pre-check, install and uninstall\npre-check: This script checks for the components required by delfin to function. If they are not present, precheck will install them.\nInstall: Installs and starts the delfin process\nUninstall: Uninstalls the delfin. Doesn't uninstall the required components. You may need to uninstall it explicitly using the native approach.\n\n### How to install\nTo get help, execute 'install -h'. It will show help information\n\nInstall script can be executed with three different switches to:\n- either do a pre-check [./install -p]\n- only run the installer without doing pre-check (if pre-check has been executed explicitly) [./install -s]\n- execute pre-check as well the install [./install]\n\n#### For the available options for install, you can execute 'install -h'\n```sh\n installer/install -h\n\n # Example\n root@root1:~/delfin-demo/delfin$ installer/install -h\n\n Usage install [--help|--precheck|--skip_precheck]\n Usage:\n     install [-h|--help]\n     install [-p|--precheck]\n     install [-s|--skip_precheck]\n Flags:\n     -h, --help Print the usage of install\n     -p, --precheck Only perform system software requirements for installation\n     -s, --skip_precheck If precheck is not required and directly install\n```\n\n#### For Pre-check, run below command\n```sh\ninstaller/install -p\n\n# Example\n\nroot@root1:~/delfin-demo/delfin$ installer/install -p\n                            OR\nroot@root1:~/delfin-demo/delfin/installer$ ./install --precheck\n```\n\n#### Install without pre-check\n```sh\ninstaller/install -s\n\n# Example\n\nroot@root1:~/delfin-demo/delfin$ installer/install -s\n```\n\n#### Execute install with precheck\n```sh\ninstaller/install\n\n# Example\nroot@root1:~/delfin-demo/delfin$ installer/install\n```\n\n#### Configure multiple instances of delfin components\nRespective environment variable required to set for running multiple instances \nof delfin component before executing install command\n\n```sh\n$ export DELFIN_<<delfin component name>>_INSTANCES=<<number of instances>>\n$ installer/install\n\n# Example: Deploy delfin with 3 task and 2 alert instances \n  $ export DELFIN_TASK_INSTANCES=3\n  $ export DELFIN_ALERT_INSTANCES=2\n  $ installer/install\n```\n\nNote: Multiple instances of exporter and api is not allowed currently.\n\n### Logs\nAll the installer logs are stored in the /var/log/soda directory.\nThe logs can be uniquely identified based upon the timestamp.\n\n\n## Test the running delfin setup/process\n  1. Make sure all delfin process are up and running\n     ```\n     ps -ef|grep delfin\n\n     # Example\n       root@root1:~/delfin-demo/delfin# ps -ef |grep delfin\n       root       25856    3570  0 00:21 pts/0    00:00:04 python3 /root/delfin-demo/delfin/installer/../delfin/cmd/api.py --config-file /etc/delfin/delfin.conf\n       root       25858    3570  0 00:21 pts/0    00:00:09 python3 /root/delfin-demo/delfin/installer/../delfin/cmd/task.py --config-file /etc/delfin/delfin.conf\n       root       25860    3570  0 00:21 pts/0    00:00:06 python3 /root/delfin-demo/delfin/installer/../delfin/cmd/alert.py --config-file /etc/delfin/delfin.conf\n       root       25862    3570  0 00:21 pts/0    00:00:00 python3 /root/delfin-demo/delfin/installer/../delfin/exporter/exporter_server.py --config-file /etc/delfin/delfin.conf\n\n     ```\n\n  2. Register storages\n\n     POST http://localhost:8190/v1/storages\n\n     body :\n     ```\n     {\n        \"vendor\":\"fake_storage\",\n        \"model\":\"fake_driver\",\n        \"rest\":{\n           \"host\":\"127.0.0.1\",\n           \"port\":8088,\n           \"username\":\"admin\",\n           \"password\":\"pass\"\n        },\n        \"extra_attributes\":{\n           \"array_id\":\"12345\"\n        }\n     }\n     ```\n  3. Run the GET API to get the registered storages. \n    \n     GET http://localhost:8190/v1/storages\n    \n     use storage_id for registering storage for performance collection or alert monitoring\n\n    \n  4. [Optional] If prometheus is configured, monitor the performance metrics on prometheus server at default location\n\n     http://localhost:9090/graph\n\n## Limitation\nLocal installation, unlike Ansible installer, does not support SODA Dashboard integration.\n"
  },
  {
    "path": "installer/ansible/clean.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n# Defines some clean processes when banishing the nodes.\n- name: Cleanup delfin installation\n  hosts: \n    - delfin-nodes\n  remote_user: root\n  vars_files:\n    - group_vars/delfin.yml\n  gather_facts: false\n  become: True\n  tasks:\n    - import_role:\n        name: cleaner"
  },
  {
    "path": "installer/ansible/group_vars/delfin.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n# Dummy variable to avoid error because ansible does not recognize the\n# file as a good configuration file when no variable in it.\ndummy:\n\n\n###########\n# GENERAL #\n###########\n\n# This field indicates local machine host ip\nhost_ip: 127.0.0.1\n\n# delfin installation types are: 'repository', 'release' and 'container''\ndelfin_installation_type: repository\n\n# These fields below will specify the tag based on install from type\ndelfin_branch: master\n\n# Delfin  projects release versions\ndelfin_release_version: v1.9.0\n\n# delete all source packages\nsource_purge: true\n\n# delete database\ndatabase_purge: true\n\n# URLs, Environment Variables, IP addresses and Ports list\nsoda_delfin_url: \"http://{{ host_ip }}:8190\"\n\n# These fields are NOT suggested to be modified\ndelfin_work_dir: /opt/delfin-linux-amd64\ndelfin_config_dir: /etc/delfin\nvenv: \"{{ delfin_work_dir }}/venv\"\n\ndelfin_redis_ip: 127.0.0.1\ndelfin_redis_port: 6379\n\ndelfin_rabbitmq_user: delfinuser\ndelfin_rabbitmq_pass: delfinpass\n\n# Configurable Perf collection interval in seconds\nperformance_collection_interval: 900\n\n# Enable dynamic subprocess optimization for Perf collection\nenable_dynamic_subprocess: false\n\n# Exporter configurations for Kafka, Prometheus & Alert Manager\n# Uncomment exporters to enable\nperformance_exporters: #PerformanceExporterPrometheus, PerformanceExporterKafka\nalert_exporters: #AlertExporterPrometheus\n\n# Exporter configurations for Kafka\ndelfin_exporter_kafka_ip: 'localhost'\ndelfin_exporter_kafka_port: 9092\ndelfin_exporter_kafka_topic: 'delfin-kafka'\n\n# Exporter configurations for Prometheus\ndelfin_exporter_prometheus_ip: 0.0.0.0\ndelfin_exporter_prometheus_port: 8195\ndelfin_exporter_prometheus_metrics_dir: '/var/lib/delfin/metrics'\n\n# Exporter configurations for Alert Manager\ndelfin_exporter_alertmanager_host: 'localhost'\ndelfin_exporter_alertmanager_port: 9093\n\n##############\n# REPOSITORY #\n##############\n\n# If user specifies intalling from repository, then he can choose the specific\n# repository branch\ndelfin_repo_branch: \"{{ delfin_branch }}\"\n\n# These fields are NOT suggested to be modified\ndelfin_remote_url: https://github.com/sodafoundation/delfin.git\n\n###########\n# RELEASE #\n###########\n\n# If user specifies intalling from release,then he can choose the specific version\ndelfin_release: \"{{ delfin_release_version }}\"\n\n# These fields are NOT suggested to be modified\ndelfin_download_url: https://github.com/sodafoundation/delfin/archive/{{ delfin_release }}.tar.gz\ndelfin_tarball_dir: /tmp/sodafoundation-delfin-{{ delfin_release }}-linux-amd64\n"
  },
  {
    "path": "installer/ansible/local.hosts",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[delfin-nodes]\nlocalhost ansible_connection=local\n"
  },
  {
    "path": "installer/ansible/roles/cleaner/scenarios/delfin.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Stop delfin containers, if started\n  shell: \"{{ item }}\"\n  with_items:\n    - docker compose down\n  become: yes\n  ignore_errors: yes\n  args:\n    chdir: \"{{ delfin_work_dir }}\"\n\n- name: Get running delfin processes\n  shell: \"ps -ef | grep -v grep | grep -i 'python3 /opt/delfin-linux-amd64/delfin/' | awk '{print $2}'\"\n  register: running_processes\n\n- name: Kill running delfin processes\n  shell: \"kill {{ item }}\"\n  with_items: \"{{ running_processes.stdout_lines }}\"\n  ignore_errors: yes\n\n- wait_for:\n    path: \"/proc/{{ item }}/status\"\n    state: absent\n  with_items: \"{{ running_processes.stdout_lines }}\"\n  ignore_errors: yes\n  register: killed_processes\n\n- name: Force kill stuck processes\n  shell: \"kill -9 {{ item }}\"\n  with_items: \"{{ killed_processes.results | select('failed') | map(attribute='item') | list }}\"\n\n- name: Stop service of delfin, if started\n  service:\n    name: \"{{ item }}\"\n    state: stopped\n  with_items:\n    - rabbitmq-server\n    - redis-server\n  become: yes\n  ignore_errors: yes\n\n- name: clean up all delfin directories\n  file:\n    path: \"{{ item }}\"\n    state: absent\n    force: yes\n  with_items:\n    - \"{{ delfin_work_dir }}\"\n    - \"{{ delfin_tarball_dir }}\"\n  ignore_errors: yes\n  tags:\n    - delfin\n    - clean\n  when:\n    - source_purge is undefined or source_purge != false\n"
  },
  {
    "path": "installer/ansible/roles/cleaner/scenarios/release.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: clean up all release files if installed from release\n  file:\n    path: \"{{ item }}\"\n    state: absent\n    force: yes\n  with_items:\n    - \"{{ delfin_tarball_dir }}\"\n  ignore_errors: yes\n  tags: clean\n"
  },
  {
    "path": "installer/ansible/roles/cleaner/tasks/main.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n- name: include scenarios/release.yml if installed from release\n  include_tasks: scenarios/release.yml\n  when: delfin_installation_type == \"release\"\n\n- name: include scenarios/delfin.yml for cleaning up delfin service\n  include_tasks: scenarios/delfin.yml\n  tags: delfin\n\n- name: clean all configuration and log files\n  file:\n    path: \"{{ item }}\"\n    state: absent\n    force: yes\n  with_items:\n    - \"{{ delfin_config_dir }}\"\n  ignore_errors: yes\n"
  },
  {
    "path": "installer/ansible/roles/delfin-installer/scenarios/container.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: build and install delfin using containerized deployment\n  shell: \"{{ item }}\"\n  with_items:\n    - docker build -t sodafoundation/delfin .\n    - DELFIN_REDIS_HOSTNAME=delfin_redis DELFIN_REDIS_PORT={{ delfin_redis_port }} DELFIN_METRICS_DIR={{ delfin_exporter_prometheus_metrics_dir }} DELFIN_HOST_IP={{ host_ip }} DELFIN_RABBITMQ_USER={{ delfin_rabbitmq_user }} DELFIN_RABBITMQ_PASS={{ delfin_rabbitmq_pass }} docker compose up -d\n  become: yes\n  args:\n    chdir: \"{{ delfin_work_dir }}\"\n"
  },
  {
    "path": "installer/ansible/roles/delfin-installer/scenarios/rabbitmq.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n- name: Check if RabbitMQ Service Exists\n  stat: path=/etc/init.d/rabbitmq-server\n  register: rabbitmqservice\n\n- name: Remove useless packages from the cache\n  apt:\n    autoclean: yes\n\n- name: Import RabbitMQ public key\n  apt_key:\n    url: https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc\n    state: present\n  become: yes\n  when:\n    - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false\n\n- name: Add Launchpad Erlang PPA key\n  apt_key:\n    keyserver: keyserver.ubuntu.com \n    id: F77F1EDA57EBB1CC\n  become: yes\n  when:\n    - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false\n\n- name: Add PackageCloud RabbitMQ repository\n  apt_key:\n    url: https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey\n    state: present\n  become: yes\n  when:\n    - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false\n\n- name: Add RabbitMQ Erlang official repo\n  apt_repository: \n    repo: deb http://ppa.launchpad.net/rabbitmq/rabbitmq-erlang/ubuntu {{ ansible_distribution_release }} main\n    state: present\n    filename: rabbitmq\n  become: yes\n  when:\n    - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false\n\n- name: Add RabbitMQ Server official repo\n  apt_repository: \n    repo: deb https://packagecloud.io/rabbitmq/rabbitmq-server/ubuntu/ {{ ansible_distribution_release }} main\n    state: present\n    filename: rabbitmq\n  become: yes\n  when:\n    - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false\n\n- name: Import Erlang public key\n  apt_key:\n    url: https://packages.erlang-solutions.com/debian/erlang_solutions.asc\n    state: present\n  become: yes\n  when:\n    - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false\n\n- name: Add Erlang official repo\n  apt_repository:\n    repo: deb https://binaries.erlang-solutions.com/debian {{ ansible_distribution_release }} contrib\n    state: present\n    filename: erlang\n  become: yes\n  when:\n    - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false\n\n- name: Install RabbitMQ package\n  apt: \n    name: rabbitmq-server\n    update_cache: yes\n    install_recommends: yes\n    allow_unauthenticated: yes\n    state: present\n  become: yes\n  when:\n    - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false\n\n- name: Start the RabbitMQ server\n  service:\n    name: rabbitmq-server\n    state: started\n  become: yes\n"
  },
  {
    "path": "installer/ansible/roles/delfin-installer/scenarios/redis.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n\n- name: Check if Redis Service Exists\n  stat: path=/etc/init.d/redis-server\n  register: redisservice\n\n- name: Ensure Redis is present\n  shell: \"{{ item }}\"\n  with_items:\n    - ulimit -n 65536\n    - apt-get install -y redis-server\n  when:\n    - redisservice.stat.exists is undefined or redisservice.stat.exists == false\n\n- name: Change the redis default port\n  replace:\n    path: /etc/redis/redis.conf\n    regexp: \"port 6379\"\n    replace: \"port {{ delfin_redis_port }}\"\n  become: yes\n\n- name: Ensure Redis is restarted\n  service: \n    name: redis-server\n    state: restarted\n  become: yes\n"
  },
  {
    "path": "installer/ansible/roles/delfin-installer/scenarios/source-code.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n# Install and start delfin\n- name: Check for delfin source code existed\n  stat:\n    path: \"{{ delfin_work_dir }}/setup.py\"\n  register: delfinexisted\n\n- name: Download delfin source code if not exists\n  git:\n    repo: \"{{ delfin_remote_url }}\"\n    dest: \"{{ delfin_work_dir }}\"\n    version: \"{{ delfin_repo_branch }}\"\n  when:\n    - delfin_installation_type != \"release\"\n    - delfinexisted.stat.exists is undefined or delfinexisted.stat.exists == false\n\n- name: ensure delfin directory exists\n  file:\n    path: \"{{ delfin_tarball_dir }}\"\n    state: directory\n  when:\n    - delfin_installation_type == \"release\"\n    - delfinexisted.stat.exists is undefined or delfinexisted.stat.exists == false\n\n- name: download and extract the delfin release tarball if not exists\n  unarchive:\n    src: \"{{ delfin_download_url }}\"\n    dest: \"{{ delfin_tarball_dir }}\"\n    remote_src: yes\n    extra_opts: [--strip-components=1]\n  when:\n    - delfin_installation_type == \"release\"\n    - delfinexisted.stat.exists is undefined or delfinexisted.stat.exists == false\n\n- name: copy delfin tarball into delfin work directory\n  copy:\n    src: \"{{ delfin_tarball_dir }}/\"\n    dest: \"{{ delfin_work_dir }}\"\n    mode: 0755\n  become: yes\n  when:\n    - delfin_installation_type == \"release\"\n    - delfinexisted.stat.exists is undefined or delfinexisted.stat.exists == false\n\n- name: Update redis ip & port configuration\n  ini_file:\n    create: no\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: coordination\n    option: backend_server\n    value: \"{{ delfin_redis_ip }}:{{ delfin_redis_port }}\"\n  become: yes\n  when: delfin_installation_type != \"container\"\n\n# Telemetry option for perf collection interval\n- name: Update Performance collection interval\n  ini_file:\n    create: no\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: TELEMETRY\n    option: performance_collection_interval\n    value: \"{{ performance_collection_interval }}\"\n  become: yes\n\n# Telemetry option for enabling dynamic processes\n- name: Enable dynamic sub-processes for performance collection\n  ini_file:\n    create: no\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: TELEMETRY\n    option: enable_dynamic_subprocess\n    value: \"{{ enable_dynamic_subprocess }}\"\n  become: yes\n\n# Performance Export Configurations\n- name: Check and remove performance exporters configs\n  ini_file:\n    create: no\n    state: absent\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: DEFAULT\n    option: performance_exporters\n    value: \"\"\n  become: yes\n  when:\n    - performance_exporters == None\n\n- name: Enable Performance Exporter configuration\n  ini_file:\n    create: no\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: DEFAULT\n    option: \"{{ item.option }}\"\n    value: \"{{ item.value }}\"\n  with_items:\n    - { option: performance_exporters, value: \"{{ performance_exporters }}\" }\n  become: yes\n  when:\n    - performance_exporters != None\n\n# Performance exporter - Kafka configuration\n- name: Update Kafka Exporter configuration\n  ini_file:\n    create: no\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: KAFKA_EXPORTER\n    option: \"{{ item.option }}\"\n    value: \"{{ item.value }}\"\n  with_items:\n    - { option: kafka_ip, value: \"{{ delfin_exporter_kafka_ip }}\" }\n    - { option: kafka_port, value: \"{{ delfin_exporter_kafka_port }}\" }\n    - { option: kafka_topic_name, value: \"{{ delfin_exporter_kafka_topic }}\" }\n  become: yes\n  when:\n    - performance_exporters != None\n    - \"'PerformanceExporterKafka' in performance_exporters\"\n\n# Performance exporter - Prometheus configuration\n- name: Update Prometheus Exporter configuration\n  ini_file:\n    create: no\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: PROMETHEUS_EXPORTER\n    option: \"{{ item.option }}\"\n    value: \"{{ item.value }}\"\n  with_items:\n    - { option: metric_server_ip, value: \"{{ delfin_exporter_prometheus_ip }}\" }\n    - { option: metric_server_port, value: \"{{ delfin_exporter_prometheus_port }}\" }\n    - { option: metrics_dir, value: \"{{ delfin_exporter_prometheus_metrics_dir }}\" }\n  become: yes\n  when:\n    - performance_exporters != None\n    - \"'PerformanceExporterPrometheus' in performance_exporters\"\n\n# Alert Exporter Configurations\n- name: Check and remove alert exporters configs\n  ini_file:\n    create: no\n    state: absent\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: DEFAULT\n    option: alert_exporters\n    value: \"\"\n  become: yes\n  when:\n    - alert_exporters == None\n\n- name: Enable AlertManager Exporter configuration\n  ini_file:\n    create: no\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: DEFAULT\n    option: \"{{ item.option }}\"\n    value: \"{{ item.value }}\"\n  with_items:\n        - { option: alert_exporters, value: \"{{ alert_exporters }}\" }\n  become: yes\n  when:\n    - alert_exporters != None\n\n- name: Update AlertManager Exporter configuration\n  ini_file:\n    create: no\n    path: \"{{ delfin_work_dir }}/etc/delfin/delfin.conf\"\n    section: PROMETHEUS_ALERT_MANAGER_EXPORTER\n    option: \"{{ item.option }}\"\n    value: \"{{ item.value }}\"\n  with_items:\n        - { option: alert_manager_host, value: \"{{ delfin_exporter_alertmanager_host }}\" }\n        - { option: alert_manager_port, value: \"{{ delfin_exporter_alertmanager_port }}\" }\n  become: yes\n  when:\n    - alert_exporters != None\n    - \"'AlertExporterPrometheus' in alert_exporters\"\n\n- name: Create delfin config dir in host\n  file:\n    path: \"{{ delfin_config_dir }}\"\n    state: directory\n    mode: 0755\n  become: yes\n\n- name: copy delfin configs to host\n  copy:\n    src: \"{{ delfin_work_dir }}/etc/delfin/{{ item }}\"\n    dest: \"{{ delfin_config_dir }}/{{ item }}\"\n    mode: 0755\n  become: yes\n  with_items:\n    - delfin.conf\n    - api-paste.ini\n"
  },
  {
    "path": "installer/ansible/roles/delfin-installer/scenarios/start-delfin.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n# Start delfin\n- name: Install sqlite3 package\n  apt: \n    name: sqlite3\n    state: present\n  become: yes\n\n- name: Install python virtual environment\n  pip:\n    name: virtualenv\n    state: latest\n    executable: pip3\n  become: yes\n\n- name: Install python requirements\n  pip:\n    requirements: '{{ delfin_work_dir }}/requirements.txt'\n    virtualenv: '{{ delfin_work_dir }}/venv'\n  become: yes\n\n- name: Install python virtual environment activate script\n  template:\n    src: ./script/virtualenv3_exec.j2\n    dest: '{{ venv }}/exec'\n    mode: 755\n  become: yes\n\n- name: Copy delfin db register script\n  copy:\n    src: ./script/create_db.py\n    dest: '{{ delfin_work_dir }}/delfin/cmd/create_db.py'\n  become: yes\n\n- name: Get previously running delfin processes\n  shell: \"ps -ef | grep -v grep | grep {{ delfin_work_dir }}/delfin/cmd/ | awk '{print $2}'\"\n  register: running_processes\n\n- name: Kill running delfin processes\n  shell: \"kill {{ item }}\"\n  with_items: \"{{ running_processes.stdout_lines }}\"\n\n- wait_for:\n    path: \"/proc/{{ item }}/status\"\n    state: absent\n  with_items: \"{{ running_processes.stdout_lines }}\"\n  ignore_errors: yes\n  register: killed_processes\n\n- name: Force kill stuck processes\n  shell: \"kill -9 {{ item }}\"\n  with_items: \"{{ killed_processes.results | select('failed') | map(attribute='item') | list }}\"\n\n- name: Execute delfin manager\n  shell: \"{{ item }}\"\n  become: yes\n  with_items:\n    - '{{ venv }}/exec {{ delfin_work_dir }}/setup.py install'\n    - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/cmd/create_db.py --config-file {{ delfin_config_dir }}/delfin.conf  >/tmp/create_db.log 2>&1 &'\n    - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/cmd/api.py --config-file {{ delfin_config_dir }}/delfin.conf  >/tmp/api.log 2>&1 &'\n    - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/cmd/task.py --config-file {{ delfin_config_dir }}/delfin.conf  >/tmp/task.log 2>&1 &'\n    - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/cmd/alert.py --config-file {{ delfin_config_dir }}/delfin.conf  >/tmp/alert.log 2>&1 &'\n    - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/exporter/prometheus/exporter_server.py --config-file {{ delfin_config_dir }}/delfin.conf  >/tmp/exporter.log 2>&1 &'\n  args:\n    chdir: \"{{ delfin_work_dir }}\"\n"
  },
  {
    "path": "installer/ansible/roles/delfin-installer/tasks/main.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n- name: Started installation of delfin\n  debug: \n    msg: \"Installing delfin {{ delfin_installation_type }} on {{ host_ip }}\"\n\n- name: Check and Install Redis\n  include_tasks: scenarios/redis.yml\n  when: delfin_installation_type != \"container\"\n\n- name: Check and Install RabbitMQ\n  include_tasks: scenarios/rabbitmq.yml\n  when: delfin_installation_type != \"container\"\n\n- name: Get delfin source code\n  include_tasks: scenarios/source-code.yml\n\n- name: Start delfin processes\n  include_tasks: scenarios/start-delfin.yml\n  when: delfin_installation_type != \"container\"\n\n- name: containerized delfin deployment\n  include_tasks: scenarios/container.yml\n  when: delfin_installation_type == \"container\"\n"
  },
  {
    "path": "installer/ansible/script/create_db.py",
    "content": "#!/usr/bin/env python\n\n# Copyright 2022 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\");\n#    you may not use this file except in compliance with the License.\n#    You may obtain a copy of the License at\n#\n#        http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS,\n#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#    See the License for the specific language governing permissions and\n#    limitations under the License.\n\n\"\"\"db create  script for delfin \"\"\"\n\nimport os\nimport sys\nfrom oslo_config import cfg\nfrom delfin import db\nfrom delfin import version\nfrom oslo_db import options as db_options\nCONF = cfg.CONF\ndb_options.set_defaults(cfg.CONF,\n                        connection='sqlite:////var/lib/delfin/delfin.sqlite')\n\n\ndef remove_prefix(text, prefix):\n    if text.startswith(prefix):\n        return text[len(prefix):]\n    return text\n\n\ndef main():\n    CONF(sys.argv[1:], project='delfin',\n         version=version.version_string())\n    connection = CONF.database.connection\n    head_tail = os.path.split(connection)\n    path = remove_prefix(head_tail[0], 'sqlite:///')\n    if not os.path.exists(path):\n        os.makedirs(path)\n    db.register_db()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "installer/ansible/script/virtualenv3_exec.j2",
    "content": "#!/usr/bin/env bash\n\n# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsource {{ venv }}/bin/activate\npython3 $@\n"
  },
  {
    "path": "installer/ansible/site.yml",
    "content": "# Copyright 2022 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n---\n# Defines deployment design and assigns role to node groups\n- hosts:\n  - delfin-nodes\n  gather_facts: true\n  any_errors_fatal: true\n  become: True\n  \n- name: Install delfin\n  hosts: delfin-nodes\n  remote_user: root\n  vars_files:\n    - group_vars/delfin.yml\n  gather_facts: false\n  become: True\n  tasks:\n    - import_role:\n        name: delfin-installer\n  tags: delfin\n"
  },
  {
    "path": "installer/helper.py",
    "content": "#!/usr/bin/python3\n\n# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport shutil\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom datetime import datetime\n\nlog_filename = 'delfin_installer.log' + \\\n               datetime.now().strftime(\"%d_%m_%Y_%H_%M_%s\")\nLOGGING_FORMAT = \"[%(asctime)s] [%(levelname)s] [%(filename)s] \" \\\n                 \"[%(funcName)s():%(lineno)s] [PID:%(process)d\" \\\n                 \"TID:%(thread)d] %(message)s\"\nLOGGING_LEVEL = \"INFO\"\nlogger = None\nlogfile = ''\ndelfin_log_dir = '/var/log/soda/'\n\n\ndef init_logging():\n    global logfile\n    global logger\n\n    try:\n        os.mkdir(delfin_log_dir)\n    except OSError:\n        pass\n    logfile = delfin_log_dir + log_filename\n    server_log_file = RotatingFileHandler(logfile, maxBytes=10000,\n                                          backupCount=5)\n    logger = logging.getLogger()\n    logger.setLevel(logging.INFO)\n    formatter = logging.Formatter(LOGGING_FORMAT)\n    server_log_file.setFormatter(formatter)\n    logger.addHandler(server_log_file)\n\n\ndef create_dir(dirname=None):\n    try:\n        os.mkdir(dirname)\n    except OSError as ose:\n        logger.warning(\"Directory [%s] already exists: [%s]\" % (dirname, ose))\n        pass\n    except Exception as e:\n        logger.error(\"Error in creating Directory [%s] [%s]\" % (dirname, e))\n        return\n\n\ndef create_file(filename):\n    if not os.path.isfile(filename):\n        os.mknod(filename, 0o777)\n\n\ndef copy_files(src=None, dest=None):\n    logger.info(\"Copying [%s] to [%s]\" % (src, dest))\n    shutil.copy(src, dest)\n\n\ninit_logging()\n"
  },
  {
    "path": "installer/install",
    "content": "#!/bin/bash\n# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nPYTHON='python3'\nBASEDIR=$(dirname \"$0\")\n\n\ndelfin_install_usage(){\n    echo \"Usage $(basename $0) [--help|--precheck|--skip_precheck]\"\ncat << DELFIN_INSTALL_INFO\nUsage:\n    $(basename $0) [-h|--help]\n    $(basename $0) [-p|--precheck]\n    $(basename $0) [-s|--skip_precheck]\nFlags:\n    -h, --help Print the usage of install\n    -p, --precheck Only perform system software requirements for installation\n    -s, --skip_precheck If precheck is not required and directly install\nDELFIN_INSTALL_INFO\n}\n\nprecheck(){\n    delfin::log \"Precheck called..\"\n    echo $!\n    source ${BASEDIR}/precheck\n}\n\nprecheck_and_install(){\n    delfin::log \"precheck_and_install\"\n    precheck\n    install\n}\n\n\ninstall(){\n    delfin::log \"Install called..\"\n\n    ${PYTHON} ${BASEDIR}/install_delfin.py\n}\n\nmain(){\n    \n    first_arg=${1}\n    second_arg=${2}\n    source ${BASEDIR}/util.sh\n\n    echo \"${PROJECT_NAME} installation started... \"\n    case \"${first_arg} ${second_arg}\" in\n    \"1 -p\"|\"1 --precheck\")\n        precheck\n        ;;\n    \"0 \")\n        precheck_and_install\n        ;;\n    \"1 -s\"|\"1 --skip_precheck\")\n        install\n        ;;\n    *)\n        delfin_install_usage\n        exit 1\n        ;;\n    esac\n}\n\n# Entry point. START\nargs=\"$# $*\"\n\nmain ${args}\n"
  },
  {
    "path": "installer/install.conf",
    "content": "# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Add all the required configs here\npython_version=3.x\npip_version=3.x\n"
  },
  {
    "path": "installer/install_delfin.py",
    "content": "#!/usr/bin/python3\n\n# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\nimport traceback as tb\nfrom subprocess import CalledProcessError\n\nfrom installer.helper import copy_files, create_dir, \\\n    logger, logfile, delfin_log_dir, create_file\n\ndelfin_source_path = ''\ndelfin_etc_dir = '/etc/delfin'\ndelfin_var_dir = '/var/lib/delfin'\nconf_file = os.path.join(delfin_etc_dir, 'delfin.conf')\nproj_name = 'delfin'\nDEVNULL = '/dev/null'\n\n\ndef _activate():\n    path_to_activate = os.path.join(delfin_source_path, 'installer',\n                                    proj_name, 'bin/activate')\n    command = '. ' + path_to_activate\n    os.system(command)\n\n\n# Initialize the settings first\ndef init():\n    pass\n\n\ndef create_delfin_db():\n    try:\n        db_path = os.path.join(delfin_source_path, 'script', 'create_db.py')\n        subprocess.check_call(['python3', db_path,\n                               '--config-file', conf_file])\n    except CalledProcessError as cpe:\n        logger.error(\"Got CPE error [%s]:[%s]\" % (cpe, tb.print_exc()))\n        return\n    logger.info('db created ')\n\n\ndef start_processes():\n    processes = ['api', 'task', 'alert']\n    # Start cmd processes\n    for process in processes:\n        env_var = 'DELFIN_' + process.upper() + '_INSTANCES'\n        try:\n            instances = os.environ.get(env_var)\n            # Ignore multiple instance of api\n            if not instances or process == 'api':\n                instances = '1'\n            start_process(process, int(instances))\n        except CalledProcessError as cpe:\n            logger.error(\"Got CPE error [%s]:[%s]\" % (cpe, tb.print_exc()))\n            return\n        except ValueError as e:\n            logger.error(\n                \"Got invalid [%s] environment variable:[%s]\" % (env_var, e))\n            return\n\n    # Start exporter server process\n    proc_path = os.path.join(delfin_source_path, 'delfin', 'exporter',\n                             'prometheus', 'exporter_server.py')\n    command = 'python3 ' + proc_path + ' --config-file ' + \\\n              conf_file + ' >' + DEVNULL + ' 2>&1 &'\n    logger.info(\"Executing command [%s]\", command)\n    os.system(command)\n    logger.info(\"Exporter process_started\")\n\n\ndef start_process(process, instances=1):\n    for instance in range(0, instances):\n        proc_path = os.path.join(delfin_source_path, 'delfin', 'cmd',\n                                 process + '.py')\n        command = 'python3 ' + proc_path + ' --config-file ' + \\\n                  conf_file + ' >' + DEVNULL + ' 2>&1 &'\n        logger.info(\"Executing command [%s]\", command)\n        os.system(command)\n        logger.info(\"[%s] process_started\", process)\n\n\ndef install_delfin():\n    python_setup_comm = ['build', 'install']\n    req_logs = os.path.join(delfin_log_dir, 'requirements.log')\n    command = 'pip3 install -r requirements.txt >' + req_logs + ' 2>&1'\n    logger.info(\"Executing [%s]\", command)\n    os.system(command)\n\n    setup_file = os.path.join(delfin_source_path, 'setup.py')\n    for command in python_setup_comm:\n        try:\n            command = 'python3 ' + setup_file + ' ' + \\\n                      command + ' >>' + logfile\n            logger.info(\"Executing [%s]\", command)\n            os.system(command)\n        except CalledProcessError as cpe:\n            logger.error(\"Got CPE error [%s]:[%s]\" % (cpe, tb.print_exc()))\n            return\n\n\ndef main():\n    global delfin_source_path\n    cwd = os.getcwd()\n    logger.info(\"Current dir is %s\" % cwd)\n    this_file_dir = os.path.dirname(os.path.realpath(__file__))\n    delfin_source_path = os.path.join(this_file_dir, \"../\")\n\n    logger.info(\"delfins [%s]\" % delfin_source_path)\n    os.chdir(delfin_source_path)\n    logger.info(os.getcwd())\n\n    # create required directories\n    create_dir(delfin_etc_dir)\n    create_dir(delfin_var_dir)\n\n    # Create blank prometheus exporter file\n    filename = delfin_var_dir + '/' + 'delfin_exporter.txt'\n    create_file(filename)\n\n    # Copy required files\n    # Copy api-paste.ini\n    ini_file_src = os.path.join(delfin_source_path, 'etc',\n                                'delfin', 'api-paste.ini')\n    ini_file_dest = os.path.join(delfin_etc_dir, 'api-paste.ini')\n    copy_files(ini_file_src, ini_file_dest)\n\n    # Copy the conf file\n    conf_file_src = os.path.join(delfin_source_path, 'etc',\n                                 'delfin', 'delfin.conf')\n    copy_files(conf_file_src, conf_file)\n\n    # install\n    install_delfin()\n\n    # create db\n    create_delfin_db()\n\n    # start\n    start_processes()\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "installer/install_dependencies.sh",
    "content": "#!/bin/bash\n\n# Install dependencies\necho Installing dependencies\nsudo apt-get install -y make curl wget libltdl7 libseccomp2 libffi-dev gawk apt-transport-https ca-certificates curl gnupg gnupg-agent lsb-release software-properties-common sshpass pv\n\necho Enabling docker repository\nsudo mkdir -p /etc/apt/keyrings\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg --yes\n\necho \\\n  \"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \\\n  $(lsb_release -cs) stable\" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null\n  \n# Update local repositories\necho Updating local repositories\nsudo apt-get update\n\n# Install python dependencies\necho Installing Python dependencies\nsudo apt-get install -y python3-distutils python3-testresources python3-pip\npython3 -m pip install -U pip\n\n# Update setuptool version if it is higher than 65\nver=$(python3 -m pip show setuptools | awk '/^Version: / {sub(\"^Version: \", \"\"); print}' | cut -d. -f1)\nif [ \"$ver\" -gt 65 ]; then\n    echo Downgrade setuptools version to 65\n    python3 -m pip install setuptools==65.0.0\nfi\n\n# Install ansible if not present\nif [ \"`which ansible`\" != \"\"  ]; then\n    echo ansible already installed, skipping.\nelse\n    echo Installing ansible\n    python3 -m pip install --user ansible\nfi\n\n# Install docker if not present\nif [ \"`which docker`\" != \"\"  ]; then\n    echo Docker already installed, skipping.\nelse\n    echo Installing docker\n    sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin\nfi\n\n# Ensure /usr/local/bin is in path\nexport PATH=$PATH:/usr/local/bin\n"
  },
  {
    "path": "installer/precheck",
    "content": "#!/bin/bash\n# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Get the required packages from the conf\nBASEDIR=$(dirname \"$0\")\npython_version=$(awk -F \"=\" '/python_version/ {print $2}' ${BASEDIR}/install.conf)\npip_version=$(awk -F \"=\" '/pip_version/ {print $2}' ${BASEDIR}/install.conf)\napt_update_command=\"sudo apt-get update\"\napt_update_done=0\nBIN_DIR=/usr/bin/\nPROJECT_NAME='delfin'\nPROJECT_DIR=$(dirname \"$0\")\nlog=delfin::log\n\ncheck_python(){\n    py_ver=$(python3 -V)\n    if [ $? == 0 ]; then\n        # result will be something like, \"Python 3.x.x\"\n        # we need to get the 3.x.x out of it\n        IFS=' ' read -ra tokens <<< \"$py_ver\"\n        py_ver=${tokens[1]}\n        installed_python=${py_ver%%\\.*}\n        req_python=${python_version%\\.*}\n        if [[ $installed_python -eq $req_python ]]; then\n            delfin::log \"Required python is [${req_python}] and installed is [${installed_python}]\"\n            \n            test -n \"$(which python${req_python})\" && python_path=$(which python${req_python})\n            if [[ -z $python_path ]]; then\n                delfin::log \"Can not find required python version installed, please install it.\"\n                exit 2\n            fi\n        fi\n        #ln -sf $python_path /usr/bin/python\n    fi\n}\n\ncheck_pip(){\n    if [[ -x \"$(which pip)\" ]]; then\n        test -n \"$(which pip3)\" && pip_path=$(which pip3)\n        if [[ -z $pip_path ]]; then\n            delfin::log \"Can not find pip, please install it.\"\n            exit 2\n        fi\n        ln -sf $pip_path /usr/local/bin/pip\n    fi\n}\n\ncheck_install_sqlite(){\n    sqlite_ver=$(sqlite3 -version)\n    if [ $? -eq 0 ]; then\n        delfin::log \"Sqlite3 version[${sqlite_ver}] is already installed\"\n        return\n    else\n        # Install sqlite\n        if [ ${apt_update_done} -eq 0 ]; then\n            ${apt_update_command}\n            apt_update_done=1\n        fi\n        sudo apt-get -y install sqlite3\n    fi\n}\n\ncheck_install_rabbitmq(){\n    # check if MQ is installed\n    mq_status=$(systemctl is-active --quiet rabbitmq-server.service)\n    if [ $? -eq 0 ]; then\n        delfin::log \"RabbitMQ is already installed\"\n        return\n    else\n        #TODO check erlang\n        # Import rabbitMQ\n        ret=$(wget -O- https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc | sudo apt-key add -)\n        if [ $? -eq 0 ]; then\n            ret=$(wget -O- https://www.rabbitmq.com/rabbitmq-release-signing-key.asc | sudo apt-key add -)\n            if [ $? -ne 0 ]; then\n                delfin::log \"Error in importing RabbitMQ\"\n                exit 1\n            fi\n        else\n            delfin::log \"Error in importing rabbitMQ\"\n            exit 1\n        fi\n        if [ ${apt_update_done} -eq 0 ]; then\n            ${apt_update_command}\n            apt_update_done=1\n        fi\n        sudo apt-get -y install rabbitmq-server\n    fi\n    mq_status=$(systemctl is-active --quiet rabbitmq-server.service)\n    if [ $? -eq 0 ]; then\n        delfin::log \"RabbitMQ is successfully installed\"\n    fi\n}\n\ncheck_install_redis(){\n    # Check if redis is installed or not\n    redis_status=$(systemctl is-active --quiet redis.service)\n    if [ $? -eq 0 ]; then\n        delfin::log \"Redis is already installed\"\n        return\n    else\n        if [ ${apt_update_done} -eq 0 ]; then\n            ${apt_update_command}\n            apt_update_done=1\n        fi\n        sudo apt -y install redis-server\n    fi\n}\n\n\ncheck_sys_req(){\n    ubuntu_release=$(${BIN_DIR}lsb_release -cs)\n    if [[ ${ubuntu_release} == 'xenial' || ${ubuntu_release} == 'bionic' ]]; then\n        delfin::log \"System requirements satisfied\"\n    else\n        delfin::log \"Installation of ${PROJECT_NAME} is not supported on this platform\"\n    fi\n}\n\ncheck_install_p3_venv(){\n\n    venv_res=$(python3 -m pip install --user virtualenv)\n    if [ $? -eq 0 ]; then\n        delfin::log \"venv is installed\"\n    fi\n    \n    py_venv=$(apt-get -y install python3-venv)\n    if [ $? -eq 0 ]; then\n        delfin::log \"python3-venv is installed\"\n    fi\n\n    if [[ $(lsb_release -rs) == \"18.04\" ]]; then\n        virtualenv -p python3 ${BASEDIR}/${PROJECT_NAME}\n    else\n        venv_dir=$(python3 -m venv ${BASEDIR}/${PROJECT_NAME})\n    fi\n    source ${BASEDIR}/${PROJECT_NAME}/bin/activate\n}\n\nmain(){\n    source ${BASEDIR}/util.sh\n    check_sys_req\n    check_python\n    check_pip\n    check_install_sqlite\n    check_install_rabbitmq\n    check_install_redis\n    check_install_p3_venv\n}\n\n# Entry function\nmain\n\n\n"
  },
  {
    "path": "installer/uninstall",
    "content": "#!/bin/bash\n# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncurr_dir=$(dirname \"$0\")\nAPI_PROC_PATH=${curr_dir}/../delfin/cmd/api.py\nALERT_PROC_PATH=${curr_dir}/../delfin/cmd/alert.py\nTASK_PROC_PATH=${curr_dir}/../delfin/cmd/task.py\nEXPORTER_SERVER_PATH=${curr_dir}/../delfin/\n\nmapfile -t api_proc_id < <( ps -eaf |grep ${API_PROC_PATH} | grep -v grep |awk '{print $2}' )\nmapfile -t alert_proc_id < <( ps -eaf |grep ${ALERT_PROC_PATH} | grep -v grep |awk '{print $2}' )\nmapfile -t task_proc_id < <( ps -eaf |grep ${TASK_PROC_PATH} | grep -v grep |awk '{print $2}' )\nmapfile -t exporter_server_id < <( ps -eaf |grep ${EXPORTER_SERVER_PATH} | grep -v grep |awk '{print $2}' )\n\n\nfor i in \"${api_proc_id[@]}\"\ndo\n    if [ ! $i == \"\" ]; then\n        echo \"Killing delfin process ${i}\"\n        $(kill -9 $i)\n    fi\ndone\n\nfor i in \"${task_proc_id[@]}\"\ndo\n    if [ ! $i == \"\" ]; then\n        echo \"Killing delfin process ${i}\"\n        $(kill -9 $i)\n    fi\ndone\n\nfor i in \"${alert_proc_id[@]}\"\ndo\n    if [ ! $i == \"\" ]; then\n        echo \"Killing delfin process ${i}\"\n        $(kill -9 $i)\n    fi\ndone\n\nfor i in \"${exporter_server_id[@]}\"\ndo\n    if [ ! $i == \"\" ]; then\n        echo \"Killing delfin process ${i}\"\n        $(kill -9 $i)\n    fi\ndone\n\n$(rm -rf /etc/delfin)\n$(rm -rf /var/lib/delfin)\n"
  },
  {
    "path": "installer/util.sh",
    "content": "#!/bin/bash\n# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Script to hold the utilities required\n\nPROJECT_NAME='delfin'\nLOG_DIR=/var/log/soda\nLOGFILE=${LOGFILE:-/var/log/soda/delfin_pre_installer.log}\n\nif [ ! -d ${LOG_DIR} ]; then\n    mkdir -p $LOG_DIR\nfi\n\n# Log function\ndelfin::log(){\n    DATE=`date \"+%Y-%m-%d %H:%M:%S\"`\n    USER=$(whoami)\n    echo \"${DATE} ${USER} execute $0 [INFO] $@ 2>&1\" >> $LOGFILE\n}\n\n"
  },
  {
    "path": "openapi-spec/swagger.yaml",
    "content": "openapi: 3.0.0\ninfo:\n  version: \"v1\"\n  title: SODA Infrastructure Management API\n  description: SODA Infrastructure Management API for resource monitoring, alerting and management across multiple, heterogeneous storage backend. Currently supporting storage monitoring and alerting.\n  contact:\n    name: SODA Support\n    url: 'https://sodafoundation.io/slack'\n    email: support@sodafoundation.io\n  license:\n    name: Apache 2.0\n    url: 'http://www.apache.org/licenses/LICENSE-2.0.html'\ntags:\n  - name: Storages\npaths:\n  /v1/storages:\n    get:\n      tags:\n        - Storages\n      description: List all registered storage back ends\n      operationId: GetStorageBackends\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description:  Comma separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: vendor\n          in: query\n          description: vendor(manufacturer) of the storage.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: model\n          in: query\n          description: model of the storage\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: name\n          in: query\n          description: The storage name.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: serial_number\n          in: query\n          description: The storage serial number.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: status\n          in: query\n          description: The storage status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - normal\n              - offline\n              - abnormal\n      responses:\n        '200':\n          description: Storage backend list available.\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                      - storages\n                additionalProperties: true\n                properties:\n                  storages:\n                    type: array\n                    title: The storages schema\n                    items:\n                      $ref: '#/components/schemas/StorageBackendResponse'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n    post:\n      tags:\n        - Storages\n      description: Register a storage device for management.\n      operationId: addStorageBackends\n      requestBody:\n        description: Inventory item to add\n        content:\n          application/json:\n            schema:\n              $ref: '#/components/schemas/StorageBackendRegistry'\n      responses:\n        '200':\n          description: 'Accepted, items added to the infrastructure management'\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/StorageBackendResponse'\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '409':\n          description: An item already exists\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}':\n    get:\n      tags:\n        - Storages\n      description: Get details of a storage device\n      operationId: GetStorageBackendbyID\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend .\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: Storage backend list available\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/StorageBackendResponse'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n    delete:\n      tags:\n        - Storages\n      description: Unregister an already registered storage backend\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend .\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '202':\n          description: Accepted\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  /v1/storages/sync:\n    post:\n      tags:\n        - Storages\n      description: Collect all resources from all registered  backend and synchronize with DB.\n      operationId: syncStorageBackends\n      responses:\n        '202':\n          description: 'Accepted, '\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '409':\n          description: An item already exists\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/sync':\n    post:\n      tags:\n        - Storages\n      description: Collect all resources from specified storage backend and synchronize with DB\n      operationId: syncStorage\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend .\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '202':\n          description: 'Accepted, '\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '409':\n          description: An item already exists\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/access-info':\n    get:\n      tags:\n        - Storages\n      description: Get access info of a registered storage backend\n      operationId: GettorageAccessInfobyID\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: Storage Access-info  available\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/StorageAccessInfoResponse'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n    put:\n      tags:\n        - Storages\n      description: Update a registered storage system access information in Infrastructure management DB.\n      operationId: updateStorageAccessInfobyID\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      requestBody:\n        content:\n          application/json:\n            schema:\n              $ref: '#/components/schemas/StorageBackendRegistryUpdate'\n      responses:\n        '200':\n          description: Storage backend  available with updated access information\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/StorageAccessInfoResponse'\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/access-infos':\n    get:\n      tags:\n        - Storages\n      description: Get access info of all registered storages\n      operationId: GetAllStorageAccessInfos\n      responses:\n        '200':\n          description: Storage Access-info  available\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/StorageAccessInfosResponse'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  /v1/storage-pools:\n    get:\n      tags:\n        - Storage Pools\n      description: List all storage pools.\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The pool name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a storage pool.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_storage_pool_id\n          in: query\n          description: Actual ID of the storage pool in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: storage_id\n          in: query\n          description: Database ID created for a storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: status\n          in: query\n          description: The pool status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - normal\n              - offline\n              - abnormal\n      responses:\n        '200':\n          description: List storage pools query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - storage_pools\n                additionalProperties: true\n                properties:\n                  storage_pools:\n                    type: array\n                    title: the storage pools schema\n                    items:\n                      $ref: '#/components/schemas/StoragePoolSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storage-pools/{id}':\n    get:\n      tags:\n        - Storage Pools\n      description: Get storage pool detail by pool ID.\n      parameters:\n        - name: id\n          in: path\n          description: Database ID created for a storage pool.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/StoragePoolSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  /v1/controllers:\n    get:\n      tags:\n        - Controllers\n      description: List all controllers.\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The controller name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a controller.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_controller_id\n          in: query\n          description: Actual ID of the controller in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: storage_id\n          in: query\n          description: Database ID created for a storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: status\n          in: query\n          description: The controller status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - normal\n              - offline\n              - unknown\n      responses:\n        '200':\n          description: List controllers query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - controllers\n                additionalProperties: true\n                properties:\n                  controllers:\n                    type: array\n                    title: the controllers schema\n                    items:\n                      $ref: '#/components/schemas/ControllerSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/controllers/{id}':\n    get:\n      tags:\n        - Controllers\n      description: Get controller detail by controller ID.\n      parameters:\n        - name: id\n          in: path\n          description: Database ID created for a controller.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ControllerSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  /v1/ports:\n    get:\n      tags:\n        - Ports\n      description: List all ports.\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The port name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a port.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_port_id\n          in: query\n          description: Actual ID of the port in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: storage_id\n          in: query\n          description: Database ID created for a storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: connection_status\n          in: query\n          description: The port connection_status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - connected\n              - disconnected\n              - unknown\n        - name: health_status\n          in: query\n          description: The port health_status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - normal\n              - abnormal\n              - unknown\n      responses:\n        '200':\n          description: List port query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - ports\n                additionalProperties: true\n                properties:\n                  ports:\n                    type: array\n                    title: the port schema\n                    items:\n                      $ref: '#/components/schemas/PortSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/ports/{id}':\n    get:\n      tags:\n        - Ports\n      description: Get port detail by port ID.\n      parameters:\n        - name: id\n          in: path\n          description: Database ID created for a port.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/PortSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/disks':\n    get:\n      tags:\n        - Disks\n      description: List all disks.\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The disk name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a disk.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_disk_id\n          in: query\n          description: Actual ID of the port in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: storage_id\n          in: query\n          description: Database ID created for a storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_disk_group_id\n          in: query\n          description: Database ID created for a disk group.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: status\n          in: query\n          description: The disk status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - normal\n              - offline\n              - abnormal\n      responses:\n        '200':\n          description: List disk query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - disks\n                additionalProperties: true\n                properties:\n                  disks:\n                    type: array\n                    title: the disk schema\n                    items:\n                      $ref: '#/components/schemas/DiskSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/disks/{id}':\n      get:\n        tags:\n          - Disks\n        description: Get disk detail by disk ID.\n        parameters:\n          - name: id\n            in: path\n            description: Database ID created for a disk.\n            required: true\n            style: simple\n            explode: false\n            schema:\n              type: string\n        responses:\n          '200':\n            description: OK\n            content:\n              application/json:\n                schema:\n                  $ref: '#/components/schemas/DiskSpec'\n          '401':\n            description: NotAuthorized\n            content:\n              application/json:\n                schema:\n                  $ref: '#/components/schemas/ErrorSpec'\n          '403':\n            description: Forbidden\n            content:\n              application/json:\n                schema:\n                  $ref: '#/components/schemas/ErrorSpec'\n          '404':\n            description: The resource does not exist\n            content:\n              application/json:\n                schema:\n                  $ref: '#/components/schemas/ErrorSpec'\n          '500':\n            description: An unexpected error occurred.\n            content:\n              application/json:\n                schema:\n                  $ref: '#/components/schemas/ErrorSpec'\n  /v1/volumes:\n    get:\n      tags:\n        - Volumes\n      description: List all storage volumes.\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The volume name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_storage_pool_id\n          in: query\n          description: Actual ID of the storage pool in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_volume_id\n          in: query\n          description: Actual ID created for the volume in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: storage_id\n          in: query\n          description: Database ID created for a storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: status\n          in: query\n          description: The volume status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - noraml\n              - offline\n              - abnormal\n      responses:\n        '200':\n          description: List volumes operation was successful\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - volumes\n                additionalProperties: true\n                properties:\n                  volumes:\n                    type: array\n                    items:\n                      $ref: '#/components/schemas/VolumeRespSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/volumes/{id}':\n    get:\n      tags:\n        - Volumes\n      description: Get storage volume detail by volume ID.\n      parameters:\n        - name: id\n          in: path\n          description: Database ID created for a volume.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/VolumeRespSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  /v1/filesystems:\n    get:\n      tags:\n        - Filesystems\n      description: List all filesystems.\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The filesystem name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a filesystem.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_filesystem_id\n          in: query\n          description: Actual ID of the filesystem in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_pool_id\n          in: query\n          description: Pool ID of the filesystem in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: storage_id\n          in: query\n          description: Database ID created for a storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: security_mode\n          in: query\n          description: The filesystem security modes\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - mixed\n              - native\n              - ntfs\n              - unix\n        - name: status\n          in: query\n          description: The filesystem status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - normal\n              - faulty\n      responses:\n        '200':\n          description: List filesystem query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - filesystems\n                additionalProperties: true\n                properties:\n                  filesystems:\n                    type: array\n                    title: the filesystem schema\n                    items:\n                      $ref: '#/components/schemas/FilesystemSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/filesystems/{id}':\n    get:\n      tags:\n        - Filesystems\n      description: Get filesystem detail by filesystem ID.\n      parameters:\n        - name: id\n          in: path\n          description: Database ID created for a filesystem.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/FilesystemSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  /v1/qtrees:\n    get:\n      tags:\n        - Qtrees\n      description: List all qtrees.\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The qtree name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a qtree.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_qtree_id\n          in: query\n          description: Actual ID of the qtree in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_filesystem_id\n          in: query\n          description: Filesystem ID of the qtree in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: storage_id\n          in: query\n          description: Database ID created for a storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: security_mode\n          in: query\n          description: The qtree security modes\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - mixed\n              - native\n              - ntfs\n              - unix\n      responses:\n        '200':\n          description: List qtree query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - qtrees\n                additionalProperties: true\n                properties:\n                  qtrees:\n                    type: array\n                    title: the qtree schema\n                    items:\n                      $ref: '#/components/schemas/QtreeSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/qtrees/{id}':\n    get:\n      tags:\n        - Qtrees\n      description: Get qtree detail by qtree ID.\n      parameters:\n        - name: id\n          in: path\n          description: Database ID created for a qtree.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/QtreeSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  /v1/quotas:\n    get:\n      tags:\n        - Quotas\n      description: List all quotas.\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The quota name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a quota.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_quota_id\n          in: query\n          description: Actual ID of the quota in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_filesystem_id\n          in: query\n          description: Filesystem ID of the quota in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_qtree_id\n          in: query\n          description: Qtree ID of the quota in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: storage_id\n          in: query\n          description: Database ID created for a storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: type\n          in: query\n          description: The quota types\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - filesystem\n              - tree\n              - user\n              - group\n      responses:\n        '200':\n          description: List quota query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - quotas\n                additionalProperties: true\n                properties:\n                  quotas:\n                    type: array\n                    title: the quota schema\n                    items:\n                      $ref: '#/components/schemas/QuotaSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/quotas/{id}':\n    get:\n      tags:\n        - Quotas\n      description: Get quota detail by quota ID.\n      parameters:\n        - name: id\n          in: path\n          description: Database ID created for a quota.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/QuotaSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  /v1/shares:\n    get:\n      tags:\n        - Shares\n      description: List all shares.\n      parameters:\n        - name: limit\n          in: query\n          description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The share name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a share.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_share_id\n          in: query\n          description: Actual ID of the share in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_filesystem_id\n          in: query\n          description: Filesystem ID of the share in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_qtree_id\n          in: query\n          description: Qtree ID of the share in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: storage_id\n          in: query\n          description: Database ID created for a storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: protocol\n          in: query\n          description: The share protocol\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - cifs\n              - nfs\n              - ftp\n              - hdfs\n      responses:\n        '200':\n          description: List share query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - shares\n                additionalProperties: true\n                properties:\n                  shares:\n                    type: array\n                    title: the share schema\n                    items:\n                      $ref: '#/components/schemas/ShareSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/shares/{id}':\n    get:\n      tags:\n        - Shares\n      description: Get share detail by share ID.\n      parameters:\n        - name: id\n          in: path\n          description: Database ID created for a share.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ShareSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/snmp-config':\n    get:\n      tags:\n        - SnmpConfig\n      description: >-\n        Get details snmp alert source information configured on behalf of\n        backend devices\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/SnmpConfigRespSpec'\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n    put:\n      tags:\n        - SnmpConfig\n      description: >-\n        Modify snmp alert source information configured on behalf of backend\n        devices\n      operationId: putSnmpConfigInfo\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      requestBody:\n        $ref: '#/components/requestBodies/SnmpConfigUpdateSpec'\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/SnmpConfigRespSpec'\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n    delete:\n      tags:\n        - SnmpConfig\n      description: >-\n        Removes snmp alert source information configured on behalf of backend\n        devices\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content: {}\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/alerts/{sequence_number}':\n    delete:\n      tags:\n        - Alerts\n      description: Clear the alert for the input alert sequence number\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n        - name: sequence_number\n          in: path\n          description: Sequence number which uniquely maps to the trap sent by a backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: OK\n          content: {}\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '404':\n          description: The resource does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/alerts/sync':\n    post:\n      tags:\n        - Alerts\n      description: Sync alerts from storage device\n      operationId: syncStorageAlerts\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend .\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      requestBody:\n        $ref: '#/components/requestBodies/StorageBackendAlertSync'\n      responses:\n        '200':\n          description: 'Accepted, '\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '409':\n          description: An item already exists\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occurred.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{id}/capabilities':\n    get:\n      tags:\n        - Performance Monitoring\n      description: |\n        Provides supported capabilities of Infrastructure management by storage configured in delfin deployment.\n      parameters:\n        - name: id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n      responses:\n        '200':\n          description: Returns specifications of storage\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/StorageCapabilitiesResponse'\n        '404':\n          description: The storage does not exist\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: Invalid capabilities.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '505':\n          description: Capability feature not supported.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/snmp-configs':\n    get:\n      tags:\n        - Storages\n      description: >-\n        Get all details snmp alert source information configured on behalf of\n        backend devices\n      responses:\n        '200':\n          description: OK\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/SnmpConfigsRespSpec'\n        '400':\n          description: BadRequest\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/storage-host-initiators':\n    get:\n      tags:\n        - Masking views\n      description: List all storage host initiators.\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n        - name: limit\n          in: query\n          description: >-\n            Requests a page size of items. Returns a number of items up to a\n            limit value. Use the limit parameter to make an initial limited\n            request and use the ID of the last-seen item from the response as\n            the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: >-\n            Used in conjunction with limit to return a slice of items.\n            offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The storage host initiator name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a storage host initiator.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: description\n          in: query\n          description: The storage host initiator description\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: alias\n          in: query\n          description: The storage host initiator alias name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: wwn\n          in: query\n          description: The storage host initiator worrld wide name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_storage_host_initiator_id\n          in: query\n          description: >-\n            Actual ID of the storage host initiator in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_storage_host_id\n          in: query\n          description: >-\n            Actual ID of the associated storage host in the storage backend\n            if any.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: status\n          in: query\n          description: The storage host initiator status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - normal\n              - offline\n              - abnormal\n              - unknown\n      responses:\n        '200':\n          description: List storage host initiators query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - storage_host_initiators\n                additionalProperties: true\n                properties:\n                  storage_host_initiators:\n                    type: array\n                    title: the storage host initiators schema\n                    items:\n                      $ref: '#/components/schemas/StorageHostInitiatorRespSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/storage-hosts':\n    get:\n      tags:\n        - Masking views\n      description: List all storage hosts.\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n        - name: limit\n          in: query\n          description: >-\n            Requests a page size of items. Returns a number of items up to a\n            limit value. Use the limit parameter to make an initial limited\n            request and use the ID of the last-seen item from the response as\n            the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: >-\n            Used in conjunction with limit to return a slice of items.\n            offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The storage host name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a storage host.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: description\n          in: query\n          description: The storage host description\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_storage_host_id\n          in: query\n          description: Actual ID of the storage host in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: status\n          in: query\n          description: The storage host status\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - normal\n              - offline\n              - abnormal\n              - unknown\n        - name: ip_address\n          in: query\n          description: Ip address of the storage host.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: os_type\n          in: query\n          description: Operating system of the storage host\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            enum:\n              - windows\n              - linux\n      responses:\n        '200':\n          description: List storage hosts query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - storage_hosts\n                additionalProperties: true\n                properties:\n                  storage_hosts:\n                    type: array\n                    title: the storage hosts schema\n                    items:\n                      $ref: '#/components/schemas/StorageHostRespSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/storage-host-groups':\n    get:\n      tags:\n        - Masking views\n      description: List all storage host groups.\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n        - name: limit\n          in: query\n          description: >-\n            Requests a page size of items. Returns a number of items up to a\n            limit value. Use the limit parameter to make an initial limited\n            request and use the ID of the last-seen item from the response as\n            the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: >-\n            Used in conjunction with limit to return a slice of items.\n            offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The storage host group name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a storage host group.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: description\n          in: query\n          description: The storage host group description\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_storage_host_group_id\n          in: query\n          description: Actual ID of the storage host group in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n      responses:\n        '200':\n          description: List storage host groups query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - storage_host_groups\n                additionalProperties: true\n                properties:\n                  storage_host_groups:\n                    type: array\n                    title: the storage host group schema\n                    items:\n                      $ref: '#/components/schemas/StorageHostGroupRespSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/port-groups':\n    get:\n      tags:\n        - Masking views\n      description: List all port groups.\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n        - name: limit\n          in: query\n          description: >-\n            Requests a page size of items. Returns a number of items up to a\n            limit value. Use the limit parameter to make an initial limited\n            request and use the ID of the last-seen item from the response as\n            the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: >-\n            Used in conjunction with limit to return a slice of items.\n            offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The port group name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a port group.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: description\n          in: query\n          description: The port group description\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_port_group_id\n          in: query\n          description: Actual ID of the port group in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n      responses:\n        '200':\n          description: List port groups query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - port_groups\n                additionalProperties: true\n                properties:\n                  port_groups:\n                    type: array\n                    title: the port groups schema\n                    items:\n                      $ref: '#/components/schemas/PortGroupRespSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/volume-groups':\n    get:\n      tags:\n        - Masking views\n      description: List all volume groups.\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n        - name: limit\n          in: query\n          description: >-\n            Requests a page size of items. Returns a number of items up to a\n            limit value. Use the limit parameter to make an initial limited\n            request and use the ID of the last-seen item from the response as\n            the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: >-\n            Used in conjunction with limit to return a slice of items.\n            offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The volume group name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a volume group.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: description\n          in: query\n          description: The volume group description\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_volume_group_id\n          in: query\n          description: Actual ID of the volume group in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n      responses:\n        '200':\n          description: List volume groups query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - volume_groups\n                additionalProperties: true\n                properties:\n                  volume_groups:\n                    type: array\n                    title: the volume groups schema\n                    items:\n                      $ref: '#/components/schemas/VolumeGroupRespSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n  '/v1/storages/{storage_id}/masking-views':\n    get:\n      tags:\n        - Masking views\n      description: List all masking views.\n      parameters:\n        - name: storage_id\n          in: path\n          description: Database ID created for a storage backend.\n          required: true\n          style: simple\n          explode: false\n          schema:\n            type: string\n        - name: limit\n          in: query\n          description: >-\n            Requests a page size of items. Returns a number of items up to a\n            limit value. Use the limit parameter to make an initial limited\n            request and use the ID of the last-seen item from the response as\n            the marker parameter value in a subsequent limited request.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 1\n            type: integer\n            format: int32\n        - name: offset\n          in: query\n          description: >-\n            Used in conjunction with limit to return a slice of items.\n            offset is where to start in the list.\n          required: false\n          style: form\n          explode: true\n          schema:\n            minimum: 0\n            type: integer\n            format: int32\n        - name: sort\n          in: query\n          description: >-\n            Comma-separated list of sort keys and optional sort directions in\n            the form of key:val\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n            example: 'sort=name:desc,id:asc'\n        - name: name\n          in: query\n          description: The masking view name\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: id\n          in: query\n          description: Database ID created for a masking view.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: description\n          in: query\n          description: The masking view description\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_masking_view_id\n          in: query\n          description: Actual ID of the masking view in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_storage_host_group_id\n          in: query\n          description: >-\n            Actual ID of the storage host group in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_volume_group_id\n          in: query\n          description: Actual ID of the volume group in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_port_group_id\n          in: query\n          description: Actual ID of the port group in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_storage_host_id\n          in: query\n          description: Actual ID of the storage host in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_volume_id\n          in: query\n          description: Actual ID of the volume in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n        - name: native_port_id\n          in: query\n          description: Actual ID of the port in the storage backend.\n          required: false\n          style: form\n          explode: true\n          schema:\n            type: string\n      responses:\n        '200':\n          description: List masking views query was success\n          content:\n            application/json:\n              schema:\n                type: object\n                required:\n                  - masking_views\n                additionalProperties: true\n                properties:\n                  masking_views:\n                    type: array\n                    title: the masking views schema\n                    items:\n                      $ref: '#/components/schemas/MaskingViewRespSpec'\n        '401':\n          description: NotAuthorized\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '403':\n          description: Forbidden\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\n        '500':\n          description: An unexpected error occured.\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ErrorSpec'\ncomponents:\n  schemas:\n    BaseModel:\n      type: object\n      properties:\n        id:\n          type: string\n          readOnly: true\n          example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n        created_at:\n          type: string\n          format: date-time\n          readOnly: true\n          example: '2017-07-10T14:36:58.014Z'\n        updated_at:\n          type: string\n          format: date-time\n          readOnly: true\n          example: '2017-07-10T14:36:58.014Z'\n    RestAccessInfoRegistry:\n      required:\n        - host\n        - port\n        - username\n        - password\n      type: object\n      properties:\n        host:\n          type: string\n          example: 10.0.0.1\n        port:\n          type: string\n          example: \"8008\"\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n    SSHAccessInfoRegistry:\n      required:\n        - host\n        - port\n        - username\n        - password\n        - pub_key\n        - pub_key_type\n      type: object\n      properties:\n        host:\n          type: string\n          example: '10.0.0.1'\n        port:\n          type: string\n          example: '22'\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n        pub_key:\n          type: string\n          example: '73:d8:34:18:70:2a:ae:d8:1c:a5:44:40:ef:50:d0:63'\n        pub_key_type:\n          type: string\n          enum: ['ed25519', 'ecdsa', 'rsa']\n    CLIAccessInfoRegistry:\n      required:\n        - host\n        - username\n        - password\n      type: object\n      properties:\n        host:\n          type: string\n          example: 10.0.0.1\n        port:\n          type: string\n          example: \"8888\"\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n    SMISAccessInfoRegistry:\n      required:\n        - host\n        - username\n        - password\n      type: object\n      properties:\n        host:\n          type: string\n          example: 10.0.0.1\n        port:\n          type: string\n          example: \"5989\"\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n        namespace:\n          type: string\n    RestAccessInfoUpdate:\n      required:\n        - username\n        - password\n      type: object\n      properties:\n        host:\n          type: string\n          example: 10.0.0.1\n        port:\n          type: string\n          example: \"8008\"\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n    SSHAccessInfoUpdate:\n      required:\n        - username\n        - password\n      type: object\n      properties:\n        host:\n          type: string\n          example: '10.0.0.1'\n        port:\n          type: string\n          example: '22'\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n        pub_key:\n          type: string\n          example: '73:d8:34:18:70:2a:ae:d8:1c:a5:44:40:ef:50:d0:63'\n        pub_key_type:\n          type: string\n          enum: ['ed25519', 'ecdsa', 'rsa']\n    CLIAccessInfoUpdate:\n      required:\n        - username\n        - password\n      type: object\n      properties:\n        host:\n          type: string\n          example: 10.0.0.1\n        port:\n          type: string\n          example: \"8888\"\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n    SMISAccessInfoUpdate:\n      required:\n        - username\n        - password\n      type: object\n      properties:\n        host:\n          type: string\n          example: 10.0.0.1\n        port:\n          type: string\n          example: \"5989\"\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n        namespace:\n          type: string\n    RestAccessInfoResponse:\n      required:\n        - host\n        - port\n        - username\n      type: object\n      properties:\n        host:\n          type: string\n          example: 10.0.0.1\n        port:\n          type: string\n          example: \"8008\"\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n    SSHAccessInfoResponse:\n      required:\n        - host\n        - port\n        - username\n      type: object\n      properties:\n        host:\n          type: string\n          example: '10.0.0.1'\n        port:\n          type: string\n          example: '22'\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n        pub_key:\n          type: string\n          example: '73:d8:34:18:70:2a:ae:d8:1c:a5:44:40:ef:50:d0:63'\n        pub_key_type:\n          type: string\n          enum: ['ed25519', 'ecdsa', 'rsa']\n    CLIAccessInfoResponse:\n      required:\n        - host\n        - port\n        - username\n      type: object\n      properties:\n        host:\n          type: string\n          example: 10.0.0.1\n        port:\n          type: string\n          example: \"8008\"\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n    SMISAccessInfoResponse:\n      required:\n        - host\n        - port\n        - username\n        - namespace\n      type: object\n      properties:\n        host:\n          type: string\n          example: 10.0.0.1\n        port:\n          type: string\n          example: \"8008\"\n        username:\n          type: string\n          example: admin\n        password:\n          type: string\n        namespace:\n          type: string\n    StorageBackendRegistry:\n      required:\n        - model\n        - vendor\n      anyOf:\n        - $ref: '#/components/schemas/RestAccessInfoRegistry'\n        - $ref: '#/components/schemas/SSHAccessInfoRegistry'\n        - $ref: '#/components/schemas/CLIAccessInfoRegistry'\n        - $ref: '#/components/schemas/SMISAccessInfoRegistry'\n      type: object\n      properties:\n        name:\n          type: string\n          example: EMC-VMAX-123456\n        description:\n          type: string\n          example: VMAX storage lab1\n        vendor:\n          type: string\n          example: dellemc\n        model:\n          type: string\n          example: vmax\n        rest:\n          $ref: '#/components/schemas/RestAccessInfoRegistry'\n        ssh:\n          $ref: '#/components/schemas/SSHAccessInfoRegistry'\n        cli:\n          $ref: '#/components/schemas/CLIAccessInfoRegistry'\n        smis:\n          $ref: '#/components/schemas/SMISAccessInfoRegistry'\n        extra_attributes:\n          type: object\n          additionalProperties:\n            type: string\n          example:\n            array_id: 00002554321\n    StorageBackendRegistryUpdate:\n      anyOf:\n        - $ref: '#/components/schemas/RestAccessInfoUpdate'\n        - $ref: '#/components/schemas/SSHAccessInfoUpdate'\n        - $ref: '#/components/schemas/CLIAccessInfoUpdate'\n        - $ref: '#/components/schemas/SMISAccessInfoUpdate'\n      type: object\n      properties:\n        rest:\n          $ref: '#/components/schemas/RestAccessInfoUpdate'\n        ssh:\n          $ref: '#/components/schemas/SSHAccessInfoUpdate'\n        cli:\n          $ref: '#/components/schemas/CLIAccessInfoUpdate'\n        smis:\n          $ref: '#/components/schemas/SMISAccessInfoUpdate'\n        extra_attributes:\n          type: object\n          additionalProperties:\n            type: string\n          example:\n            controller1: string\n            ip1: string\n            shhKeyPath: string\n    StorageBackendResponse:\n      type: object\n      properties:\n        id:\n          type: string\n        name:\n          type: string\n          example: EMC-VMAX-123456\n        description:\n          type: string\n          example: VMAX storage lab1\n        vendor:\n          type: string\n          example: Dell EMC\n        model:\n          type: string\n          example: VMAX250F\n        status:\n          type: string\n          example: normal\n        firmware:\n          type: string\n          example: 5978.278\n        serial_number:\n          type: string\n          example: '0002004355'\n        location:\n          type: string\n        created_at:\n          type: string\n        updated_at:\n          type: string\n        sync_status:\n              type: string\n              enum:\n                - SYNCED\n                - SYNCING\n        total_capacity:\n          type: integer\n          format: int64\n        used_capacity:\n          type: integer\n          format: int64\n        free_capacity:\n          type: integer\n          format: int64\n    StorageAccessInfoResponse:\n      type: object\n      properties:\n        id:\n          type: string\n        rest:\n          $ref: '#/components/schemas/RestAccessInfoResponse'\n        ssh:\n          $ref: '#/components/schemas/SSHAccessInfoResponse'\n        cli:\n          $ref: '#/components/schemas/CLIAccessInfoResponse'\n        smis:\n          $ref: '#/components/schemas/SMISAccessInfoResponse'\n        vendor:\n          type: string\n          example: dellemc\n        model:\n          type: string\n          example: vmax\n        extra_attributes:\n          type: object\n          additionalProperties:\n            type: string\n          example:\n            array_id: string\n    StorageAccessInfosResponse:\n      description: Response for all access infos configuration.\n      type: object\n      properties:\n        access_infos:\n          type: array\n          description: the list of access info\n          items:\n            $ref: '#/components/schemas/StorageAccessInfoResponse'\n\n\n    StoragePoolSpec:\n      description: >-\n        A storage pool is disocovered and updated by task manager Each pool can be\n        regarded as a physical storage pool or a virtual storage pool. It is a\n        logical and atomic pool and can be abstracted from any storage platform.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - required:\n            - id\n            - name\n          type: object\n          properties:\n            name:\n              type: string\n            id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_storage_pool_id:\n              type: string\n              readOnly: true\n              example: SRP_1\n            storage_type:\n              type: string\n              enum:\n                - block\n                - file\n                - unified\n            description:\n              type: string\n            status:\n              type: string\n              enum:\n                - normal\n                - offline\n                - abnormal\n            total_capacity:\n              type: integer\n              format: int64\n            used_capacity:\n              type: integer\n              format: int64\n            free_capacity:\n              type: integer\n              format: int64\n    ControllerSpec:\n      description: >-\n        A controller is discovered and updated by task manager.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - required:\n            - id\n            - name\n          type: object\n          properties:\n            name:\n              type: string\n            id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_controller_id:\n              type: string\n              readOnly: true\n              example: Controller_A\n            soft_version:\n              type: string\n            location:\n              type: string\n            status:\n              type: string\n              enum:\n                - normal\n                - offline\n                - unknown\n            cpu_info:\n              type: string\n            memory_size:\n              type: integer\n              format: int64\n    PortSpec:\n      description: >-\n        A port is discovered and updated by task manager.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - required:\n            - id\n            - name\n          type: object\n          properties:\n            name:\n              type: string\n            id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_port_id:\n              type: string\n              readOnly: true\n              example: Port_A\n            native_parent_id:\n              type: string\n              readOnly: true\n              example: Controller_A\n            speed:\n              type: integer\n            max_speed:\n              type: integer\n            location:\n              type: string\n            connection_status:\n              type: string\n              enum:\n                - connected\n                - disconnected\n                - unknown\n            health_status:\n              type: string\n              enum:\n                - normal\n                - abnormal\n                - unknown\n            type:\n              type: string\n              enum:\n                - fc\n                - iscsi\n                - ficon\n                - fcoe\n                - eth\n                - sas\n                - ib\n                - other\n            logical_type:\n              type: string\n              enum:\n                - frontend\n                - backend\n                - service\n                - management\n                - internal\n                - maintenance\n                - interconnect\n                - other\n            wwn:\n              type: string\n            mac_address:\n              type: string\n            ipv4:\n              type: string\n            ipv4_mask:\n              type: string\n            ipv6:\n              type: string\n            ipv6_mask:\n              type: string\n    DiskSpec:\n      description: >-\n        A disk is discovered and updated by task manager.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - required:\n            - id\n            - name\n          type: object\n          properties:\n            name:\n              type: string\n            id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_disk_id:\n              type: string\n              readOnly: true\n              example: Disk_A\n            serial_number:\n              type: string\n              readOnly: true\n              example: SN00112233\n            manufacturer:\n              type: string\n            model:\n              type: string\n            firmware:\n              type: string\n            speed:\n              type: integer\n            capacity:\n              type: integer\n            location:\n              type: string\n            status:\n              type: string\n              enum:\n                - normal\n                - offline\n                - abnormal\n            physical_type:\n              type: string\n              enum:\n                - sata\n                - sas\n                - ssd\n                - nl-sas\n                - unknown\n            logical_type:\n              type: string\n              enum:\n                - free\n                - member\n                - hotspare\n                - cache\n            health_score:\n              type: integer\n            native_disk_group_id:\n              type: string\n    FilesystemSpec:\n      description: >-\n        A filesystem is discovered and updated by task manager.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - required:\n            - id\n            - name\n          type: object\n          properties:\n            name:\n              type: string\n            id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_filesystem_id:\n              type: string\n              readOnly: true\n              example: Filesystem_A\n            native_pool_id:\n              type: string\n              readOnly: true\n              example: Pool_A\n            total_capacity:\n              type: integer\n            used_capacity:\n              type: integer\n            free_capacity:\n              type: integer\n            status:\n              type: string\n              enum:\n                - normal\n                - faulty\n            worm:\n              type: string\n              enum:\n                - non_worm\n                - audit_log\n                - compliance\n                - enterprise\n            type:\n              type: string\n              enum:\n                - thick\n                - thin\n            deduplicated:\n              type: boolean\n            compressed:\n              type: boolean\n            security_mode:\n              type: string\n              enum:\n                - mixed\n                - native\n                - ntfs\n                - unix\n    QtreeSpec:\n      description: >-\n        A qtree is discovered and updated by task manager.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - required:\n            - id\n            - name\n          type: object\n          properties:\n            name:\n              type: string\n            id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_qtree_id:\n              type: string\n              readOnly: true\n              example: Qtree_A\n            native_filesystem_id:\n              type: string\n              readOnly: true\n              example: Filesystem_A\n            path:\n              type: string\n            security_mode:\n              type: string\n              enum:\n                - mixed\n                - native\n                - ntfs\n                - unix\n    QuotaSpec:\n      description: >-\n        A quota is discovered and updated by task manager.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - required:\n            - id\n            - name\n            - type\n          type: object\n          properties:\n            id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_quota_id:\n              type: string\n              readOnly: true\n              example: Quota_A\n            name:\n              type: string\n            type:\n              type: string\n              enum:\n                - filesystem\n                - tree\n                - user\n                - group\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_filesystem_id:\n              type: string\n              readOnly: true\n              example: Filesystem_A\n            native_qtree_id:\n              type: string\n              readOnly: true\n              example: Qtree_A\n            capacity_hard_limit:\n              type: integer\n              format: int64\n              example: 100\n            capacity_soft_limit:\n              type: integer\n              format: int64\n              example: 80\n            file_hard_limit:\n              type: integer\n              format: int64\n              example: 10\n            file_soft_limit:\n              type: integer\n              format: int64\n              example: 8\n            used_capacity:\n              type: integer\n              format: int64\n              example: 100\n            file_count:\n              type: integer\n              format: int64\n              example: 10\n            user_group_name:\n              type: string\n    ShareSpec:\n      description: >-\n        A share is discovered and updated by task manager.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - required:\n            - id\n            - name\n          type: object\n          properties:\n            name:\n              type: string\n            id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_share_id:\n              type: string\n              readOnly: true\n              example: Share_A\n            native_filesystem_id:\n              type: string\n              readOnly: true\n              example: Filesystem_A\n            native_qtree_id:\n              type: string\n            protocol:\n              type: string\n              enum:\n                - cifs\n                - nfs\n                - ftp\n                - hdfs\n            path:\n              type: string\n\n    VolumeRespSpec:\n      description:\n        Volume is an device created by storage service, it can be attached to\n        physical machine or virtual machine instance.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - type: object\n          properties:\n            name:\n              type: string\n            description:\n              type: string\n            status:\n              type: string\n              enum:\n                - available\n                - error\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            total_capacity:\n              type: integer\n              format: int64\n              example: 2\n            used_capacity:\n              type: integer\n              format: int64\n              example: 2\n            free_capacity:\n              type: integer\n              format: int64\n              example: 2\n            compressed:\n              type: boolean\n              example: false\n            deduplicated:\n              type: boolean\n              example: false\n            type :\n              type: string\n              enum:\n                - thick\n                - thin\n            native_volume_id:\n              type: string\n            wwn:\n              type: string\n            native_storage_pool_id:\n              type: string\n    SnmpConfigUpdateSpec:\n      required:\n        - host\n        - version\n      type: object\n      properties:\n        version:\n          type: string\n          description: SNMP version. Should be mandatorily set by user\n          example: SNMPV2C\n          enum:\n            - SNMPV2C\n            - SNMPV3\n        community_string:\n          type: string\n          description: Community string. This should be filled if version is V2C\n        username:\n          type: string\n          description: SNMP V3 usm username. This should be filled if version is V3\n        engine_id:\n          type: string\n          description: >-\n            Engind ID of the device which will be sending the traps. This should\n            be filled if version is V3\n        security_level:\n          type: string\n          description: Security level for the user. This should be filled if version is V3\n          example: noAuthnoPriv\n          enum:\n            - noAuthnoPriv\n            - authNoPriv\n            - authPriv\n        auth_protocol:\n          type: string\n          description: >-\n            Authentication protocol to be selected. This should be filled if\n            authNoPriv or authPriv is set as security_level\n          example: MD5\n          enum:\n            - MD5\n            - SHA\n        auth_key:\n          type: string\n          description: >-\n            Authentication key. This should be filled if authNoPriv or authPriv\n            is set\n        privacy_protocol:\n          type: string\n          description: >-\n            Privacy or encryption protocol to be selected. This should be filled\n            if authPriv is set as security_level\n          example: DES\n          enum:\n            - 3DES\n            - DES\n            - AES\n        privacy_key:\n          type: string\n          description: >-\n            Privacy or encryption password. This should be filled if authPriv is\n            set as security_level\n        host:\n          type: string\n          example: 10.0.0.1\n        context_name:\n          type: string\n          description: Context name of the alert source\n          example: \"New Context\"\n        retry_num:\n          type: integer\n          description: >-\n            Maximum number of retries while connecting to alert source\n            By default, set to 1\n          example: 2\n        expiration:\n          type: integer\n          description: >-\n            Expiration time (in sec) for one alert source connect request\n            By default, set to 2\n          example: 60\n        port:\n          type: integer\n          description: >-\n            Port for connecting to alert source\n            By default, set to 161\n          example: 20162\n      description: SNMP alert source configuration attributes.\n    SnmpConfigRespSpec:\n      description: Response for snmp alert source configuration.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - type: object\n          properties:\n            version:\n              type: string\n              description: SNMP version. Should be mandatorily set by user\n              example: SNMPV2C\n              enum:\n                - SNMPV2C\n                - SNMPV3\n            community_string:\n              type: string\n              description: Community string. This should be filled ig version is V2C\n            username:\n              type: string\n              description: SNMP V3 usm username. This should be filled ig version is V3\n            engine_id:\n              type: string\n              description: Engind ID of the device which will be sending the traps\n            security_level:\n              type: string\n              description: Security level for the user\n              example: noAuthnoPriv\n              enum:\n                - noAuthnoPriv\n                - authNoPriv\n                - authPriv\n            auth_protocol:\n              type: string\n              description: >-\n                Authentication protocol to be selected. This should be filled if\n                authNoPriv or authPriv is set as security_level\n              example: MD5\n              enum:\n                - MD5\n                - SHA\n            auth_key:\n              type: string\n              description: >-\n                Authentication key. This should be filled if authNoPriv or\n                authPriv is set\n            privacy_protocol:\n              type: string\n              description: >-\n                Privacy or encryption protocol to be selected. This should be\n                filled if authPriv is set as security_level\n              example: DES\n              enum:\n                - 3DES\n                - DES\n                - AES\n            privacy_key:\n              type: string\n              description: >-\n                Privacy or encryption password. This should be filled if\n                authPriv is set as security_level\n            host:\n              type: string\n              description: All alert source ips of the device\n              example: 10.0.0.1,127.0.0.1\n            context_name:\n              type: string\n              description: Context name of the alert source\n              example: \"New Context\"\n            retry_num:\n              type: integer\n              description: >-\n                Maximum number of retries while connecting to alert source\n                By default, set to 1\n              example: 2\n            expiration:\n              type: integer\n              description: >-\n                Expiration time (in sec) for one alert source connect request\n                By default, set to 2\n              example: 60\n            port:\n              type: integer\n              description: >-\n                Port for connecting to alert source\n                By default, set to 161\n              example: 20162\n    SnmpConfigsRespSpec:\n      description: Response for all snmp alert source configuration.\n      type: object\n      properties:\n        snmp_configs:\n          type: array\n          description: the list of snmp configs\n          items:\n            $ref: '#/components/schemas/SnmpConfigRespSpec'\n    StorageHostInitiatorRespSpec:\n      description: >-\n        Storage host initiator allows a host to gain access to the storage\n        array, It may or may not have been attached to storage host.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - type: object\n          properties:\n            name:\n              type: string\n            description:\n              type: string\n              readOnly: true\n              example: \"storage host initiator\"\n            alias:\n              type: string\n              readOnly: true\n              example: \"storage host initiator\"\n            wwn:\n              type: string\n              readOnly: true\n              example: \"storage host initiator1\"\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_storage_host_initiator_id:\n              type: string\n              readOnly: true\n              description: >-\n                Actual ID of the storage host initiator in the storage backend.\n              example: storage_host_initiator_0\n            native_storage_host_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the storage host in the storage backend.\n              example: storage_host_0\n            status:\n              type: string\n              readOnly: true\n              enum:\n                - normal\n                - offline\n                - abnormal\n                - unknown\n    StorageHostRespSpec:\n      description: >-\n        Storage host is a consumer of volume from storage.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - type: object\n          properties:\n            name:\n              type: string\n            description:\n              type: string\n              readOnly: true\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_storage_host_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the storage host in the storage backend.\n              example: storage_host_0\n            status:\n              type: string\n              readOnly: true\n              enum:\n                - normal\n                - offline\n                - abnormal\n                - unknown\n            ip_address:\n              type: string\n              readOnly: true\n              description: Ip address of the storage host.\n              example: \"192.168.1.4\"\n            os_type:\n              type: string\n              readOnly: true\n              description: Operating system of the storage host.\n              enum:\n                - windows\n                - linux\n            storage_host_initiators:\n              type: array\n              items:\n                type: string\n              readOnly: true\n              description: List of storage host initiator native ids.\n    StorageHostGroupRespSpec:\n      description: >-\n        Storage host group is a consumer of volume from storage.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - type: object\n          properties:\n            name:\n              type: string\n            description:\n              type: string\n              readOnly: true\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_storage_host_group_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the storage host group in the storage backend.\n              example: storage_host_group0\n            storage_hosts:\n              type: array\n              items:\n                type: string\n              readOnly: true\n              description: List of storage host native ids.\n    PortGroupRespSpec:\n      description: >-\n        Port group is collection of ports from storage.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - type: object\n          properties:\n            name:\n              type: string\n            description:\n              type: string\n              readOnly: true\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_port_group_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the port group in the storage backend.\n              example: port_group_0\n            ports:\n              type: array\n              items:\n                type: string\n              readOnly: true\n              description: List of ports native ids.\n    VolumeGroupRespSpec:\n      description: >-\n        Volume group is collection of volumes from storage.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - type: object\n          properties:\n            name:\n              type: string\n            description:\n              type: string\n              readOnly: true\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_volume_group_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the volume group in the storage backend.\n              example: volume_group_0\n            volumes:\n              type: array\n              items:\n                type: string\n              readOnly: true\n              description: List of volumes native ids.\n    MaskingViewRespSpec:\n      description: >-\n        Masking view is and object which shows the path from host and lun.\n      allOf:\n        - $ref: '#/components/schemas/BaseModel'\n        - type: object\n          properties:\n            name:\n              type: string\n            description:\n              type: string\n              readOnly: true\n            storage_id:\n              type: string\n              readOnly: true\n              example: 084bf71e-a102-11e7-88a8-e31fe6d52248\n            native_masking_view_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the masking view in the storage backend.\n              example: masking_view_0\n            native_storage_host_group_id:\n              type: string\n              readOnly: true\n              description: >-\n                Actual ID of the storage host group in the storage backend.\n              example: storage_host_group_0\n            native_volume_group_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the volume group in the storage backend.\n              example: volume_group_0\n            native_port_group_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the port group in the storage backend.\n              example: port_group_0\n            native_storage_host_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the storage host in the storage backend.\n              example: storage_host_0\n            native_volume_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the volume in the storage backend.\n              example: volume_0\n            native_port_id:\n              type: string\n              readOnly: true\n              description: Actual ID of the port in the storage backend.\n              example: port_0\n    StorageBackendAlertSync:\n      type: object\n      properties:\n        begin_time:\n          type: integer\n          format: int64\n            Start time(in milliseconds) for alert sync. It is optional.\n            If not provided, alerts are fetched without filtering start time\n          example: 13577777777777766\n        end_time:\n          type: integer\n          format: int64\n          description: >-\n            End time(in milliseconds) for alert sync. It is optional.\n            If not provided, alerts are fetched without filtering end time\n          example: 13577777777777777\n    ErrorSpec:\n      required:\n        - error_code\n        - error_msg\n        - error_args\n      type: object\n      properties:\n        error_code:\n          type: string\n        error_msg:\n          type: string\n        error_args:\n          type: array\n          items:\n            type: string\n      description: >-\n        Detailed HTTP error response, which consists of a HTTP status code, and\n        a custom error message unique for each failure case.\n\n    StorageCapabilitiesResponse:\n      type: object\n      required:\n        - metadata\n        - spec\n      properties:\n        metadata:\n          type: object\n          properties:\n            model:\n              type: string\n              description: Name of the supported storage (driver)\n              example: VMAX250F\n            vendor:\n              type: string\n              description: Name of the vendor\n              example: Dell EMC\n        spec:\n          type: object\n          required:\n            - is_historic\n          properties:\n            is_historic:\n              type: boolean\n              example: true\n              description: Set true during storage driver registration if driver support fetching historic metrics. This enable internal performance framework to either call driver interface to pull real time metrics or historic time series metrics.\n            resource_metrics:\n              $ref: '#/components/schemas/ResourceMetrics'\n    ResourceMetrics:\n      type: object\n      description: Map of resources and supported metrics of respective resources for storage (driver)\n      additionalProperties:\n        type: array\n        items:\n          type: object\n          description: list of metrics with supported Units and its description\n          properties:\n            unit:\n              type: string\n              description: supported metric unit\n            description:\n              type: string\n              description: storage specific desctiption for respective metric\n      example:\n        storagePool:\n          - throughput:\n              unit: MB/s\n              description: Represents how much data is successfully transferred in MB/s\n          - readThroughput:\n              unit: MB/s\n              description: Represents how much data read is successfully transferred in MB/s\n          - readRequests:\n              unit: IOPS\n              description: Read requests per second\n  responses:\n    HTTPStatus400:\n      description: BadRequest\n      content:\n        application/json:\n          schema:\n            $ref: '#/components/schemas/ErrorSpec'\n    HTTPStatus401:\n      description: NotAuthorized\n      content:\n        application/json:\n          schema:\n            $ref: '#/components/schemas/ErrorSpec'\n    HTTPStatus403:\n      description: Forbidden\n      content:\n        application/json:\n          schema:\n            $ref: '#/components/schemas/ErrorSpec'\n    HTTPStatus404:\n      description: The resource does not exist\n      content:\n        application/json:\n          schema:\n            $ref: '#/components/schemas/ErrorSpec'\n    HTTPStatus409:\n      description: An item already exists\n      content:\n        application/json:\n          schema:\n            $ref: '#/components/schemas/ErrorSpec'\n    HTTPStatus500:\n      description: An unexpected error occured.\n      content:\n        application/json:\n          schema:\n            $ref: '#/components/schemas/ErrorSpec'\n  parameters:\n    storage_id:\n      name: storage_id\n      in: path\n      description: Database ID created for a storage backend.\n      required: true\n      style: simple\n      explode: false\n      schema:\n        type: string\n    native_storage_pool_id:\n      name: native_storage_pool_id\n      in: path\n      description: Actual ID of the storage pool in backend.\n      required: true\n      style: simple\n      explode: false\n      schema:\n        type: string\n    native_volume_id:\n      name: native_volume_id\n      in: path\n      description: Actual ID of the volume in backend.\n      required: true\n      style: simple\n      explode: false\n      schema:\n        type: string\n    limit:\n      name: limit\n      in: query\n      description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request.\n      required: false\n      style: form\n      explode: true\n      schema:\n        minimum: 1\n        type: integer\n        format: int32\n    offset:\n      name: offset\n      in: query\n      description: Used in conjunction with limit to return a slice of items. offset is where to start in the list.\n      required: false\n      style: form\n      explode: true\n      schema:\n        minimum: 0\n        type: integer\n        format: int32\n          - desc\n    sequence_number:\n      name: sequence_number\n      in: path\n      description: Sequence number which uniquely maps to the trap sent by a backend.\n      required: true\n      style: simple\n      explode: false\n      schema:\n        type: string\n  requestBodies:\n    SnmpConfigUpdateSpec:\n      content:\n        application/json:\n          schema:\n            $ref: '#/components/schemas/SnmpConfigUpdateSpec'\n    StorageBackendAlertSync:\n      content:\n        application/json:\n          schema:\n            $ref: '#/components/schemas/StorageBackendAlertSync'\n"
  },
  {
    "path": "requirements.txt",
    "content": "# The order of packages is significant, because pip processes them in the order\n# of appearance. Changing the order has an impact on the overall integration\n# process, which may cause wedges in the gate later.\n\n# pbr should be first\npbr!=2.1.0,>=2.0.0 # Apache-2.0\n\nalembic>=0.8.10 # MIT\nBabel>=2.9.1 # BSD\neventlet>=0.31.0 # MIT\ngreenlet>=0.4.10 # MIT\njsonschema>=2.6.0 # MIT\noslo.config>=5.2.0 # Apache-2.0\noslo.context>=2.19.2 # Apache-2.0\noslo.db==11.3.0 # Apache-2.0\noslo.i18n>=3.15.3 # Apache-2.0\noslo.log>=3.36.0 # Apache-2.0\noslo.messaging>=5.29.0 # Apache-2.0\noslo.middleware>=3.31.0 # Apache-2.0\noslo.rootwrap>=5.8.0 # Apache-2.0\noslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0\noslo.service!=1.28.1,>=1.24.0 # Apache-2.0\noslo.utils>=3.33.0 # Apache-2.0\noslo.concurrency>=3.26.0 # Apache-2.0\nparamiko>=2.0.0 # LGPLv2.1+\nPaste>=2.0.2 # MIT\nPasteDeploy>=1.5.0 # MIT\nretrying!=1.3.0,>=1.2.3 # Apache-2.0\nRoutes>=2.3.1 # MIT\nsix>=1.10.0 # MIT\nSQLAlchemy==1.4.44 # MIT\nstevedore>=1.20.0 # Apache-2.0\ntooz==2.8.0 # Apache-2.0\nWebOb>=1.7.1 # MIT\npysnmp>=4.4.11 # BSD\nredis>=3.3.8 # MIT\ncryptography<3.4; # Apache-2.0\npyopenssl==19.1.0 # Apache-2.0\nAPScheduler~=3.6.3\nflask\nkafka-python\nimportlib-metadata==3.7.0; python_version < \"3.8\"\ntenacity==6.3.1\ntzlocal<3.0\nscp>=0.13.0\ndefusedxml==0.6.0\nxlrd>=2.0.1\n"
  },
  {
    "path": "script/create_db.py",
    "content": "#!/usr/bin/env python\n\n# Copyright 2020 The SODA Authors.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\");\n#    you may not use this file except in compliance with the License.\n#    You may obtain a copy of the License at\n#\n#        http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS,\n#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#    See the License for the specific language governing permissions and\n#    limitations under the License.\n\n\"\"\"db create  script for delfin \"\"\"\n\n\nimport sys\n\nfrom oslo_config import cfg\n\nfrom delfin import db\nfrom delfin import version\n\nCONF = cfg.CONF\n\n\ndef main():\n\n    CONF(sys.argv[1:], project='delfin',\n         version=version.version_string())\n    db.register_db()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "script/start.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright 2020 The SODA Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nset -e\n\ncase \"$1\" in\n  api)\n    # Register database schema\n    python3 script/create_db.py --config-file /etc/delfin/delfin.conf\n    # Run API Server\n    exec python3 delfin/cmd/api.py --config-file /etc/delfin/delfin.conf\n    ;;\n  task)\n    exec python3 delfin/cmd/task.py --config-file /etc/delfin/delfin.conf\n    ;;\n  alert)\n    exec python3 delfin/cmd/alert.py --config-file /etc/delfin/delfin.conf\n    ;;\n  exporter)\n    exec python3 delfin/exporter/prometheus/exporter_server.py --config-file /etc/delfin/delfin.conf\n    ;;\n  *)\n    echo \"Usage: $0 {api|task|alert|exporter}\" >&2\n    exit 1\n    ;;\nesac\n"
  },
  {
    "path": "setup.cfg",
    "content": ""
  },
  {
    "path": "setup.py",
    "content": "# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\n\nsetup(\n    name=\"delfin\",\n    version=\"1.0.0\",\n    author=\"SODA Authors\",\n    author_email=\"Opensds-tech-discuss@lists.opensds.io\",\n    license=\"Apache 2.0\",\n    packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n    python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\",\n    entry_points={\n        'delfin.alert.exporters': [\n            'example = delfin.exporter.example:AlertExporterExample',\n            'prometheus = delfin.exporter.prometheus.exporter'\n            ':AlertExporterPrometheus',\n        ],\n        'delfin.performance.exporters': [\n            'example = delfin.exporter.example:PerformanceExporterExample',\n            'prometheus = delfin.exporter.prometheus.exporter'\n            ':PerformanceExporterPrometheus',\n            'kafka = delfin.exporter.kafka.exporter:PerformanceExporterKafka'\n        ],\n        'delfin.storage.drivers': [\n            'fake_storage fake_driver = delfin.drivers.fake_storage:FakeStorageDriver',\n            'fujitsu eternus = delfin.drivers.fujitsu.eternus.eternus_stor:EternusDriver',\n            'dellemc unity = delfin.drivers.dell_emc.unity.unity:UnityStorDriver',\n            'dellemc vmax = delfin.drivers.dell_emc.vmax.vmax:VMAXStorageDriver',\n            'dellemc pmax = delfin.drivers.dell_emc.vmax.vmax:VMAXStorageDriver',\n            'dellemc scaleio = delfin.drivers.dell_emc.scaleio.scaleio_stor:ScaleioStorageDriver',\n            'dellemc vnx_block = delfin.drivers.dell_emc.vnx.vnx_block.vnx_block:VnxBlockStorDriver',\n            'dellemc vplex = delfin.drivers.dell_emc.vplex.vplex_stor:VplexStorageDriver',\n            'dellemc powerstore = delfin.drivers.dell_emc.power_store.power_store:PowerStoreDriver',\n            'hitachi vsp = delfin.drivers.hitachi.vsp.vsp_stor:HitachiVspDriver',\n            'hpe 3par = delfin.drivers.hpe.hpe_3par.hpe_3parstor:Hpe3parStorDriver',\n            'hpe primera = delfin.drivers.hpe.hpe_3par.hpe_3parstor:Hpe3parStorDriver',\n            'hpe msa = delfin.drivers.hpe.hpe_msa.hpe_msastor:HpeMsaStorDriver',\n            'huawei oceanstor = delfin.drivers.huawei.oceanstor.oceanstor:OceanStorDriver',\n            'ibm storwize_svc = delfin.drivers.ibm.storwize_svc.storwize_svc:StorwizeSVCDriver',\n            'ibm ds8k = delfin.drivers.ibm.ds8k.ds8k:DS8KDriver',\n            'netapp cmode = delfin.drivers.netapp.dataontap.cluster_mode:NetAppCmodeDriver',\n            'hitachi hnas = delfin.drivers.hitachi.hnas.hds_nas:HitachiHNasDriver',\n            'pure flasharray = delfin.drivers.pure.flasharray.pure_flasharray:PureFlashArrayDriver',\n            'h3c unistor_cf = delfin.drivers.h3c.unistor_cf.unistor_cf:H3cUniStorCfDriver',\n            'macrosan macrosan = delfin.drivers.macro_san.ms.ms_stor:MacroSanDriver',\n            # AS5500/AS5300/AS2600/AS2200 use the same driver\n            'inspur as5500 = delfin.drivers.inspur.as5500.as5500:As5500Driver'\n        ]\n    },\n)\n"
  },
  {
    "path": "test-requirements.txt",
    "content": "coverage!=4.4,>=4.0 # Apache-2.0\nddt>=1.0.1 # MIT\nfixtures>=3.0.0 # Apache-2.0/BSD\niso8601>=0.1.11 # MIT\noslotest>=3.2.0 # Apache-2.0\ntesttools>=2.2.0 # MIT"
  },
  {
    "path": "tox.ini",
    "content": "[tox]\nskipsdist = True\nskip_missing_interpreters = True\nenvlist =\n    py3,\n    pep8\n\n[testenv]\nbasepython = python3\nusedevelop = True\nsetenv = VIRTUAL_ENV={envdir}\ndeps =\n    -r{toxinidir}/requirements.txt\n    -r{toxinidir}/test-requirements.txt\ncommands =\n    coverage erase\n    coverage run -m unittest discover {posargs:delfin/tests/unit}\n    coverage html -d htmlcov\n\n[testenv:pep8]\ndeps = flake8\ncommands =\n    flake8 {posargs:delfin}\n    flake8 {posargs:installer}\n\n[flake8]\nignore = E402,W503,W504\n"
  }
]